diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e43b0f9 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/README.md b/README.md index 170af6f..1b792a8 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,62 @@ -# ha-unraid -Unraid integration for Home Assistant +# Unraid Integration for Home Assistant + +This custom integration allows you to monitor and control your Unraid server from Home Assistant. + +## Features + +- Monitor CPU, RAM, Boot, Cache, Array Disks, and Array usage +- Monitor UPS connected to Unraid +- Control Docker containers +- Manage VMs +- Execute shell commands +- Manage user scripts + +## Installation + +1. Copy the `unraid` folder into your `custom_components` directory. +2. Restart Home Assistant. +3. Go to Configuration > Integrations. +4. Click the "+ ADD INTEGRATION" button. +5. Search for "Unraid" and select it. +6. Follow the configuration steps. + +## Configuration + +During the setup, you'll need to provide: + +- Host: The IP address or hostname of your Unraid server +- Username: Your Unraid username (usually 'root') +- Password: Your Unraid password +- Port: SSH port (usually 22) +- Ping Interval: How often to check if the server is online (in seconds) +- Update Interval: How often to update sensor data (in seconds) + +## Sensors + +- CPU Usage +- RAM Usage +- Array Usage +- Individual Array Disks +- Uptime + +## Switches + +- Docker Containers: Turn on/off Docker containers +- VMs: Turn on/off Virtual Machines + +## Services + +- `unraid.execute_command`: Execute a shell command on the Unraid server +- `unraid.execute_in_container`: Execute a command in a Docker container +- `unraid.execute_user_script`: Execute a user script +- `unraid.stop_user_script`: Stop a running user script + +## Examples + +### Execute a shell command + +```yaml +service: unraid.execute_command +data: + entry_id: YOUR_ENTRY_ID + command: "echo 'Hello from Home Assistant' > /boot/config/plugins/user.scripts/scripts/ha_test.sh" \ No newline at end of file diff --git a/custom_components/unraid/__init__.py b/custom_components/unraid/__init__.py new file mode 100644 index 0000000..a547b92 --- /dev/null +++ b/custom_components/unraid/__init__.py @@ -0,0 +1,94 @@ +"""The Unraid integration.""" +from __future__ import annotations + +import voluptuous as vol + +from homeassistant.config_entries import ConfigEntry +from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD, CONF_PORT +from homeassistant.core import HomeAssistant, ServiceCall +from homeassistant.exceptions import ConfigEntryNotReady +import homeassistant.helpers.config_validation as cv + +from .const import DOMAIN, PLATFORMS +from .coordinator import UnraidDataUpdateCoordinator +from .unraid import UnraidAPI + +async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: + """Set up Unraid from a config entry.""" + api = UnraidAPI( + host=entry.data[CONF_HOST], + username=entry.data[CONF_USERNAME], + password=entry.data[CONF_PASSWORD], + port=entry.data[CONF_PORT], + ) + + try: + await api.connect() + except Exception as err: + await api.disconnect() + raise ConfigEntryNotReady from err + + coordinator = UnraidDataUpdateCoordinator(hass, api, entry) + await coordinator.async_config_entry_first_refresh() + + hass.data.setdefault(DOMAIN, {})[entry.entry_id] = coordinator + + await coordinator.start_ping_task() # Start the ping task + + await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) + + return True + +async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: + """Unload a config entry.""" + coordinator = hass.data[DOMAIN][entry.entry_id] + await coordinator.stop_ping_task() # Stop the ping task + + if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS): + hass.data[DOMAIN].pop(entry.entry_id) + await coordinator.api.disconnect() + + return unload_ok + +def register_services(hass: HomeAssistant): + """Register services for Unraid.""" + + async def execute_command(call: ServiceCall): + """Execute a command on Unraid.""" + entry_id = call.data.get("entry_id") + command = call.data.get("command") + coordinator: UnraidDataUpdateCoordinator = hass.data[DOMAIN][entry_id] + result = await coordinator.api.execute_command(command) + return {"result": result} + + async def execute_in_container(call: ServiceCall): + """Execute a command in a Docker container.""" + entry_id = call.data.get("entry_id") + container = call.data.get("container") + command = call.data.get("command") + detached = call.data.get("detached", False) + coordinator: UnraidDataUpdateCoordinator = hass.data[DOMAIN][entry_id] + result = await coordinator.api.execute_in_container(container, command, detached) + return {"result": result} + + async def execute_user_script(call: ServiceCall): + """Execute a user script.""" + entry_id = call.data.get("entry_id") + script_name = call.data.get("script_name") + background = call.data.get("background", False) + coordinator: UnraidDataUpdateCoordinator = hass.data[DOMAIN][entry_id] + result = await coordinator.api.execute_user_script(script_name, background) + return {"result": result} + + async def stop_user_script(call: ServiceCall): + """Stop a user script.""" + entry_id = call.data.get("entry_id") + script_name = call.data.get("script_name") + coordinator: UnraidDataUpdateCoordinator = hass.data[DOMAIN][entry_id] + result = await coordinator.api.stop_user_script(script_name) + return {"result": result} + + hass.services.async_register(DOMAIN, "execute_command", execute_command) + hass.services.async_register(DOMAIN, "execute_in_container", execute_in_container) + hass.services.async_register(DOMAIN, "execute_user_script", execute_user_script) + hass.services.async_register(DOMAIN, "stop_user_script", stop_user_script) \ No newline at end of file diff --git a/custom_components/unraid/config_flow.py b/custom_components/unraid/config_flow.py new file mode 100644 index 0000000..19db249 --- /dev/null +++ b/custom_components/unraid/config_flow.py @@ -0,0 +1,63 @@ +"""Config flow for Unraid integration.""" +from __future__ import annotations + +import voluptuous as vol +from homeassistant import config_entries +from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD, CONF_PORT +from homeassistant.core import HomeAssistant +from homeassistant.data_entry_flow import FlowResult +from homeassistant.exceptions import HomeAssistantError + +from .unraid import UnraidAPI +from .const import DOMAIN, DEFAULT_PORT, DEFAULT_PING_INTERVAL, DEFAULT_CHECK_INTERVAL + +STEP_USER_DATA_SCHEMA = vol.Schema( + { + vol.Required(CONF_HOST): str, + vol.Required(CONF_USERNAME): str, + vol.Required(CONF_PASSWORD): str, + vol.Optional(CONF_PORT, default=DEFAULT_PORT): int, + vol.Optional("ping_interval", default=DEFAULT_PING_INTERVAL): int, + vol.Optional("check_interval", default=DEFAULT_CHECK_INTERVAL): int, + } +) + +async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> dict[str, Any]: + """Validate the user input allows us to connect.""" + api = UnraidAPI(data[CONF_HOST], data[CONF_USERNAME], data[CONF_PASSWORD], data[CONF_PORT]) + + try: + await api.connect() + await api.disconnect() + except Exception as err: + raise CannotConnect from err + + # Return info that you want to store in the config entry. + return {"title": f"Unraid Server ({data[CONF_HOST]})"} + +class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): + """Handle a config flow for Unraid.""" + + VERSION = 1 + + async def async_step_user( + self, user_input: dict[str, Any] | None = None + ) -> FlowResult: + """Handle the initial step.""" + errors: dict[str, str] = {} + if user_input is not None: + try: + info = await validate_input(self.hass, user_input) + except CannotConnect: + errors["base"] = "cannot_connect" + except Exception: # pylint: disable=broad-except + errors["base"] = "unknown" + else: + return self.async_create_entry(title=info["title"], data=user_input) + + return self.async_show_form( + step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors + ) + +class CannotConnect(HomeAssistantError): + """Error to indicate we cannot connect.""" \ No newline at end of file diff --git a/custom_components/unraid/const.py b/custom_components/unraid/const.py new file mode 100644 index 0000000..abfa7b9 --- /dev/null +++ b/custom_components/unraid/const.py @@ -0,0 +1,9 @@ +"""Constants for the Unraid integration.""" +from homeassistant.const import Platform + +DOMAIN = "unraid" +DEFAULT_PORT = 22 +DEFAULT_PING_INTERVAL = 60 +DEFAULT_CHECK_INTERVAL = 300 + +PLATFORMS = [Platform.SENSOR, Platform.SWITCH] \ No newline at end of file diff --git a/custom_components/unraid/coordinator.py b/custom_components/unraid/coordinator.py new file mode 100644 index 0000000..f86d6ec --- /dev/null +++ b/custom_components/unraid/coordinator.py @@ -0,0 +1,82 @@ +"""DataUpdateCoordinator for Unraid.""" +import asyncio +from datetime import timedelta +import logging + +from homeassistant.config_entries import ConfigEntry +from homeassistant.core import HomeAssistant +from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed + +from .const import DOMAIN +from .unraid import UnraidAPI + +_LOGGER = logging.getLogger(__name__) + +class UnraidDataUpdateCoordinator(DataUpdateCoordinator): + """Class to manage fetching Unraid data.""" + + def __init__(self, hass: HomeAssistant, api: UnraidAPI, entry: ConfigEntry) -> None: + """Initialize the data update coordinator.""" + self.api = api + self.entry = entry + self.ping_interval = entry.data["ping_interval"] + self._is_online = True + self._ping_task = None + + super().__init__( + hass, + _LOGGER, + name=DOMAIN, + update_interval=timedelta(seconds=entry.data["check_interval"]), + ) + + async def _async_update_data(self): + """Fetch data from Unraid.""" + if not self._is_online: + raise UpdateFailed("Unraid server is offline") + + try: + system_stats = await self.api.get_system_stats() + docker_containers = await self.api.get_docker_containers() + vms = await self.api.get_vms() + user_scripts = await self.api.get_user_scripts() + + return { + "system_stats": system_stats, + "docker_containers": docker_containers, + "vms": vms, + "user_scripts": user_scripts, + } + except Exception as err: + raise UpdateFailed(f"Error communicating with Unraid: {err}") from err + + async def ping_unraid(self): + """Ping the Unraid server to check if it's online.""" + while True: + try: + await self.api.ping() + if not self._is_online: + _LOGGER.info("Unraid server is back online") + self._is_online = True + await self.async_request_refresh() + except Exception: + if self._is_online: + _LOGGER.warning("Unraid server is offline") + self._is_online = False + + await asyncio.sleep(self.ping_interval) + + async def start_ping_task(self): + """Start the ping task.""" + if self._ping_task is None: + self._ping_task = self.hass.async_create_task(self.ping_unraid()) + + async def stop_ping_task(self): + """Stop the ping task.""" + if self._ping_task is not None: + self._ping_task.cancel() + try: + await self._ping_task + except asyncio.CancelledError: + pass + self._ping_task = None \ No newline at end of file diff --git a/custom_components/unraid/manifest.json b/custom_components/unraid/manifest.json new file mode 100644 index 0000000..1da552e --- /dev/null +++ b/custom_components/unraid/manifest.json @@ -0,0 +1,15 @@ +{ + "domain": "unraid", + "name": "UNRAID", + "codeowners": ["@domalab"], + "config_flow": true, + "dependencies": [], + "documentation": "https://github.com/domalab/ha-unraid/wiki", + "homekit": {}, + "iot_class": "local_polling", + "issue_tracker": "https://github.com/domalab/ha-unraid/issues", + "requirements": [], + "ssdp": [], + "version": "0.1.0", + "zeroconf": [] +} \ No newline at end of file diff --git a/custom_components/unraid/sensor.py b/custom_components/unraid/sensor.py new file mode 100644 index 0000000..22fc9df --- /dev/null +++ b/custom_components/unraid/sensor.py @@ -0,0 +1,354 @@ +"""Sensor platform for Unraid.""" +from __future__ import annotations + +from homeassistant.components.sensor import ( + SensorDeviceClass, + SensorEntity, + SensorStateClass, +) +from homeassistant.config_entries import ConfigEntry +from homeassistant.core import HomeAssistant +from homeassistant.helpers.entity_platform import AddEntitiesCallback +from homeassistant.helpers.update_coordinator import CoordinatorEntity + +from datetime import datetime, timedelta +from homeassistant.util import dt as dt_util + +from .const import DOMAIN +from .coordinator import UnraidDataUpdateCoordinator + +def format_size(size_in_bytes: float) -> str: + """Format size to appropriate unit.""" + units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'] + size = float(size_in_bytes) + unit_index = 0 + while size >= 1024 and unit_index < len(units) - 1: + size /= 1024 + unit_index += 1 + return f"{size:.2f} {units[unit_index]}" + +async def async_setup_entry( + hass: HomeAssistant, + entry: ConfigEntry, + async_add_entities: AddEntitiesCallback, +) -> None: + """Set up Unraid sensor based on a config entry.""" + coordinator: UnraidDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id] + + sensors = [ + UnraidCPUUsageSensor(coordinator), + UnraidRAMUsageSensor(coordinator), + UnraidArrayUsageSensor(coordinator), + UnraidCacheUsageSensor(coordinator), + UnraidBootUsageSensor(coordinator), + UnraidUptimeSensor(coordinator), + UnraidUPSSensor(coordinator), + ] + # Add individual disk sensors + for disk in coordinator.data["system_stats"].get("individual_disks", []): + sensors.append(UnraidIndividualDiskSensor(coordinator, disk["name"])) + + async_add_entities(sensors) + +class UnraidSensorBase(CoordinatorEntity, SensorEntity): + """Base class for Unraid sensors.""" + + def __init__( + self, + coordinator: UnraidDataUpdateCoordinator, + key: str, + name: str, + icon: str, + device_class: SensorDeviceClass | None = None, + state_class: SensorStateClass | None = None, + ) -> None: + """Initialize the sensor.""" + super().__init__(coordinator) + self._key = key + self._attr_name = f"Unraid {name}" + self._attr_unique_id = f"{coordinator.entry.entry_id}_{key}" + self._attr_icon = icon + self._attr_device_class = device_class + self._attr_state_class = state_class + + @property + def device_info(self): + """Return device information about this Unraid server.""" + return { + "identifiers": {(DOMAIN, self.coordinator.config_entry.entry_id)}, + "name": f"Unraid Server ({self.coordinator.config_entry.data['host']})", + "manufacturer": "Lime Technology", + "model": "Unraid Server", + } + + @property + def native_value(self): + """Return the state of the sensor.""" + return self.coordinator.data["system_stats"].get(self._key) + +class UnraidCPUUsageSensor(UnraidSensorBase): + """Representation of Unraid CPU usage sensor.""" + + def __init__(self, coordinator: UnraidDataUpdateCoordinator) -> None: + """Initialize the sensor.""" + super().__init__( + coordinator, + "cpu_usage", + "CPU Usage", + "mdi:cpu-64-bit", + device_class=SensorDeviceClass.POWER_FACTOR, + state_class=SensorStateClass.MEASUREMENT, + ) + + @property + def native_unit_of_measurement(self): + """Return the unit of measurement.""" + return "%" + +class UnraidRAMUsageSensor(UnraidSensorBase): + """Representation of Unraid RAM usage sensor.""" + + def __init__(self, coordinator: UnraidDataUpdateCoordinator) -> None: + """Initialize the sensor.""" + super().__init__( + coordinator, + "memory_usage", + "RAM Usage", + "mdi:memory", + device_class=SensorDeviceClass.POWER_FACTOR, + state_class=SensorStateClass.MEASUREMENT, + ) + + @property + def native_value(self): + """Return the state of the sensor.""" + percentage = self.coordinator.data["system_stats"].get("memory_usage", {}).get("percentage") + if percentage is not None: + return round(percentage, 1) # Round to one decimal place + return None + + @property + def native_unit_of_measurement(self): + """Return the unit of measurement.""" + return "%" + +class UnraidArrayUsageSensor(UnraidSensorBase): + """Representation of Unraid Array usage sensor.""" + + def __init__(self, coordinator: UnraidDataUpdateCoordinator) -> None: + """Initialize the sensor.""" + super().__init__( + coordinator, + "array_usage", + "Array Usage", + "mdi:harddisk", + device_class=SensorDeviceClass.POWER_FACTOR, + state_class=SensorStateClass.MEASUREMENT, + ) + + @property + def native_value(self): + """Return the state of the sensor.""" + percentage = self.coordinator.data["system_stats"].get("array_usage", {}).get("percentage") + return round(percentage, 1) if percentage is not None else None + + @property + def native_unit_of_measurement(self): + """Return the unit of measurement.""" + return "%" + + @property + def extra_state_attributes(self): + """Return the state attributes.""" + array_usage = self.coordinator.data["system_stats"].get("array_usage", {}) + return { + "total_size": format_size(array_usage.get("total", 0)), + "used_space": format_size(array_usage.get("used", 0)), + "free_space": format_size(array_usage.get("free", 0)), + } + +class UnraidIndividualDiskSensor(UnraidSensorBase): + """Representation of an individual Unraid disk usage sensor.""" + + def __init__(self, coordinator: UnraidDataUpdateCoordinator, disk_name: str) -> None: + """Initialize the sensor.""" + super().__init__( + coordinator, + f"disk_{disk_name}_usage", + f"Disk {disk_name} Usage", + "mdi:harddisk", + device_class=SensorDeviceClass.POWER_FACTOR, + state_class=SensorStateClass.MEASUREMENT, + ) + self._disk_name = disk_name + + @property + def native_value(self): + """Return the state of the sensor.""" + for disk in self.coordinator.data["system_stats"].get("individual_disks", []): + if disk["name"] == self._disk_name: + return disk["percentage"] + return None + + @property + def native_unit_of_measurement(self): + """Return the unit of measurement.""" + return "%" + + @property + def extra_state_attributes(self): + """Return the state attributes.""" + for disk in self.coordinator.data["system_stats"].get("individual_disks", []): + if disk["name"] == self._disk_name: + return { + "total_size": format_size(disk["total"]), + "used_space": format_size(disk["used"]), + "free_space": format_size(disk["free"]), + "mount_point": disk["mount_point"], + } + return {} + +class UnraidCacheUsageSensor(UnraidSensorBase): + """Representation of Unraid Cache usage sensor.""" + + def __init__(self, coordinator: UnraidDataUpdateCoordinator) -> None: + """Initialize the sensor.""" + super().__init__( + coordinator, + "cache_usage", + "Cache Usage", + "mdi:harddisk", + device_class=SensorDeviceClass.POWER_FACTOR, + state_class=SensorStateClass.MEASUREMENT, + ) + + @property + def native_value(self): + """Return the state of the sensor.""" + percentage = self.coordinator.data["system_stats"].get("cache_usage", {}).get("percentage") + return round(percentage, 1) if percentage is not None else None + + @property + def native_unit_of_measurement(self): + """Return the unit of measurement.""" + return "%" + + @property + def extra_state_attributes(self): + """Return the state attributes.""" + cache_usage = self.coordinator.data["system_stats"].get("cache_usage", {}) + return { + "total_size": format_size(cache_usage.get("total", 0)), + "used_space": format_size(cache_usage.get("used", 0)), + "free_space": format_size(cache_usage.get("free", 0)), + } + +class UnraidBootUsageSensor(UnraidSensorBase): + """Representation of Unraid Boot device usage sensor.""" + + def __init__(self, coordinator: UnraidDataUpdateCoordinator) -> None: + """Initialize the sensor.""" + super().__init__( + coordinator, + "boot_usage", + "Boot Usage", + "mdi:usb-flash-drive", + device_class=SensorDeviceClass.POWER_FACTOR, + state_class=SensorStateClass.MEASUREMENT, + ) + + @property + def native_value(self): + """Return the state of the sensor.""" + return self.coordinator.data["system_stats"].get("boot_usage", {}).get("percentage") + + @property + def native_unit_of_measurement(self): + """Return the unit of measurement.""" + return "%" + + @property + def extra_state_attributes(self): + """Return the state attributes.""" + boot_usage = self.coordinator.data["system_stats"].get("boot_usage", {}) + return { + "total_size": format_size(boot_usage.get("total", 0)), + "used_space": format_size(boot_usage.get("used", 0)), + "free_space": format_size(boot_usage.get("free", 0)), + } + +class UnraidUptimeSensor(UnraidSensorBase): + """Representation of Unraid Uptime sensor.""" + + def __init__(self, coordinator: UnraidDataUpdateCoordinator) -> None: + """Initialize the sensor.""" + super().__init__( + coordinator, + "uptime", + "Uptime", + "mdi:clock-outline", + device_class=None, + state_class=None, + ) + + @property + def native_value(self) -> str: + """Return the formatted uptime as the main value.""" + uptime_seconds = self.coordinator.data["system_stats"].get("uptime", 0) + days, remainder = divmod(int(uptime_seconds), 86400) + hours, remainder = divmod(remainder, 3600) + minutes, _ = divmod(remainder, 60) + return f"{days}d {hours}h {minutes}m" + + @property + def extra_state_attributes(self): + """Return the state attributes.""" + uptime_seconds = self.coordinator.data["system_stats"].get("uptime", 0) + days, remainder = divmod(int(uptime_seconds), 86400) + hours, remainder = divmod(remainder, 3600) + minutes, _ = divmod(remainder, 60) + return { + "days": days, + "hours": hours, + "minutes": minutes, + "timestamp": dt_util.utcnow() - timedelta(seconds=uptime_seconds) + } + + @property + def available(self) -> bool: + """Return True if entity is available.""" + return self.coordinator.last_update_success and "uptime" in self.coordinator.data["system_stats"] + +class UnraidUPSSensor(UnraidSensorBase): + """Representation of Unraid UPS sensor.""" + + def __init__(self, coordinator: UnraidDataUpdateCoordinator) -> None: + """Initialize the sensor.""" + super().__init__( + coordinator, + "ups_status", + "UPS Status", + "mdi:battery-medium", + device_class=None, + state_class=None, + ) + + @property + def native_value(self): + """Return the state of the sensor.""" + ups_info = self.coordinator.data["system_stats"].get("ups_info", {}) + return ups_info.get("STATUS", "Unknown") + + @property + def extra_state_attributes(self): + """Return the state attributes.""" + ups_info = self.coordinator.data["system_stats"].get("ups_info", {}) + return { + "model": ups_info.get("MODEL", "Unknown"), + "ups_load": ups_info.get("LOADPCT", "Unknown"), + "battery_charge": ups_info.get("BCHARGE", "Unknown"), + "runtime_left": ups_info.get("TIMELEFT", "Unknown"), + "nominal_power": ups_info.get("NOMPOWER", "Unknown"), + "line_voltage": ups_info.get("LINEV", "Unknown"), + "battery_voltage": ups_info.get("BATTV", "Unknown"), + } \ No newline at end of file diff --git a/custom_components/unraid/switch.py b/custom_components/unraid/switch.py new file mode 100644 index 0000000..ad3c89b --- /dev/null +++ b/custom_components/unraid/switch.py @@ -0,0 +1,120 @@ +"""Switch platform for Unraid.""" +from __future__ import annotations + +from homeassistant.components.switch import SwitchEntity +from homeassistant.config_entries import ConfigEntry +from homeassistant.core import HomeAssistant, callback +from homeassistant.helpers.entity_platform import AddEntitiesCallback +from homeassistant.helpers.update_coordinator import CoordinatorEntity + +from .const import DOMAIN +from .coordinator import UnraidDataUpdateCoordinator + +async def async_setup_entry( + hass: HomeAssistant, + entry: ConfigEntry, + async_add_entities: AddEntitiesCallback, +) -> None: + """Set up Unraid switch based on a config entry.""" + coordinator: UnraidDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id] + + if "docker_containers" not in coordinator.data or "vms" not in coordinator.data: + return + + switches = [] + + if coordinator.data["docker_containers"]: + for container in coordinator.data["docker_containers"]: + switches.append(UnraidDockerContainerSwitch(coordinator, container["name"])) + + if coordinator.data["vms"]: + for vm in coordinator.data["vms"]: + switches.append(UnraidVMSwitch(coordinator, vm["name"])) + + if switches: + async_add_entities(switches) + +class UnraidSwitchBase(CoordinatorEntity, SwitchEntity): + """Base class for Unraid switches.""" + + @property + def device_info(self): + """Return device information about this Unraid server.""" + return { + "identifiers": {(DOMAIN, self.coordinator.config_entry.entry_id)}, + "name": f"Unraid Server ({self.coordinator.config_entry.data['host']})", + "manufacturer": "Lime Technology", + "model": "Unraid Server", + } + + @property + def entity_registry_enabled_default(self) -> bool: + return True + + @property + def available(self) -> bool: + """Return True if entity is available.""" + return self.coordinator.last_update_success + + @callback + def _handle_coordinator_update(self) -> None: + """Handle updated data from the coordinator.""" + self.async_write_ha_state() + +class UnraidDockerContainerSwitch(UnraidSwitchBase): + """Representation of an Unraid Docker container switch.""" + + def __init__(self, coordinator: UnraidDataUpdateCoordinator, container_name: str) -> None: + """Initialize the switch.""" + super().__init__(coordinator) + self._container_name = container_name + self._attr_name = f"Unraid Docker {container_name}" + self._attr_unique_id = f"{coordinator.config_entry.entry_id}_docker_{container_name}" + self._attr_icon = "mdi:docker" + + @property + def is_on(self) -> bool: + """Return true if the container is running.""" + for container in self.coordinator.data["docker_containers"]: + if container["name"] == self._container_name: + return container["status"].lower() == "running" + return False + + async def async_turn_on(self, **kwargs) -> None: + """Turn the container on.""" + await self.coordinator.api.start_container(self._container_name) + await self.coordinator.async_request_refresh() + + async def async_turn_off(self, **kwargs) -> None: + """Turn the container off.""" + await self.coordinator.api.stop_container(self._container_name) + await self.coordinator.async_request_refresh() + +class UnraidVMSwitch(UnraidSwitchBase): + """Representation of an Unraid VM switch.""" + + def __init__(self, coordinator: UnraidDataUpdateCoordinator, vm_name: str) -> None: + """Initialize the switch.""" + super().__init__(coordinator) + self._vm_name = vm_name + self._attr_name = f"Unraid VM {vm_name}" + self._attr_unique_id = f"{coordinator.config_entry.entry_id}_vm_{vm_name}" + self._attr_icon = "mdi:virtual-machine" + + @property + def is_on(self) -> bool: + """Return true if the VM is running.""" + for vm in self.coordinator.data["vms"]: + if vm["name"] == self._vm_name: + return vm["status"].lower() == "running" + return False + + async def async_turn_on(self, **kwargs) -> None: + """Turn the VM on.""" + await self.coordinator.api.start_vm(self._vm_name) + await self.coordinator.async_request_refresh() + + async def async_turn_off(self, **kwargs) -> None: + """Turn the VM off.""" + await self.coordinator.api.stop_vm(self._vm_name) + await self.coordinator.async_request_refresh() \ No newline at end of file diff --git a/custom_components/unraid/translations/en.json b/custom_components/unraid/translations/en.json new file mode 100644 index 0000000..e355df9 --- /dev/null +++ b/custom_components/unraid/translations/en.json @@ -0,0 +1,96 @@ +{ + "config": { + "step": { + "user": { + "title": "Connect to Unraid", + "data": { + "host": "Host", + "username": "Username", + "password": "Password", + "port": "Port", + "ping_interval": "Ping Interval (seconds)", + "check_interval": "Update Interval (seconds)", + "mac_address": "MAC Address (optional)" + } + } + }, + "error": { + "cannot_connect": "Failed to connect to Unraid server", + "invalid_auth": "Invalid authentication", + "unknown": "Unexpected error occurred" + }, + "abort": { + "already_configured": "This Unraid server is already configured" + } + }, + "services": { + "execute_command": { + "name": "Execute Command", + "description": "Execute a shell command on the Unraid server.", + "fields": { + "entry_id": { + "name": "Unraid Instance", + "description": "The Unraid instance to use for this command." + }, + "command": { + "name": "Command", + "description": "The shell command to execute." + } + } + }, + "execute_in_container": { + "name": "Execute in Container", + "description": "Execute a command in a Docker container.", + "fields": { + "entry_id": { + "name": "Unraid Instance", + "description": "The Unraid instance to use for this command." + }, + "container": { + "name": "Container", + "description": "The name of the Docker container." + }, + "command": { + "name": "Command", + "description": "The command to execute in the container." + }, + "detached": { + "name": "Detached", + "description": "Run the command in detached mode." + } + } + }, + "execute_user_script": { + "name": "Execute User Script", + "description": "Execute a user script on the Unraid server.", + "fields": { + "entry_id": { + "name": "Unraid Instance", + "description": "The Unraid instance to use for this command." + }, + "script_name": { + "name": "Script Name", + "description": "The name of the user script to execute." + }, + "background": { + "name": "Background", + "description": "Run the script in the background." + } + } + }, + "stop_user_script": { + "name": "Stop User Script", + "description": "Stop a running user script on the Unraid server.", + "fields": { + "entry_id": { + "name": "Unraid Instance", + "description": "The Unraid instance to use for this command." + }, + "script_name": { + "name": "Script Name", + "description": "The name of the user script to stop." + } + } + } + } +} \ No newline at end of file diff --git a/custom_components/unraid/unraid.py b/custom_components/unraid/unraid.py new file mode 100644 index 0000000..7411498 --- /dev/null +++ b/custom_components/unraid/unraid.py @@ -0,0 +1,393 @@ +"""API client for Unraid.""" +import asyncssh +from typing import Dict, List, Any, Optional +import re +import logging + +_LOGGER = logging.getLogger(__name__) + +class UnraidAPI: + def __init__(self, host: str, username: str, password: str, port: int = 22): + self.host = host + self.username = username + self.password = password + self.port = port + self.conn = None + + async def connect(self): + self.conn = await asyncssh.connect( + self.host, + username=self.username, + password=self.password, + port=self.port, + known_hosts=None + ) + + async def disconnect(self): + if self.conn: + self.conn.close() + await self.conn.wait_closed() + + async def ping(self) -> bool: + try: + result = await self.execute_command("echo 'ping'") + return result.exit_status == 0 + except Exception: + return False + + async def execute_command(self, command: str) -> asyncssh.SSHCompletedProcess: + if not self.conn: + await self.connect() + return await self.conn.run(command) + + async def get_system_stats(self) -> Dict[str, Any]: + cpu_usage = await self._get_cpu_usage() + memory_usage = await self._get_memory_usage() + array_usage = await self._get_array_usage() + individual_disks = await self.get_individual_disk_usage() + cache_usage = await self._get_cache_usage() + boot_usage = await self._get_boot_usage() + uptime = await self._get_uptime() + ups_info = await self.get_ups_info() + + return { + "cpu_usage": cpu_usage, + "memory_usage": memory_usage, + "array_usage": array_usage, + "individual_disks": individual_disks, + "cache_usage": cache_usage, + "boot_usage": boot_usage, + "uptime": uptime, + "ups_info": ups_info, + } + + async def _get_cpu_usage(self) -> Optional[float]: + try: + result = await self.execute_command("top -bn1 | grep 'Cpu(s)' | awk '{print $2 + $4}'") + if result.exit_status != 0: + _LOGGER.error(f"CPU usage command failed with exit status {result.exit_status}") + return None + + match = re.search(r'(\d+(\.\d+)?)', result.stdout) + if match: + return round(float(match.group(1)), 2) + else: + _LOGGER.error(f"Failed to parse CPU usage from output: {result.stdout}") + return None + except Exception as e: + _LOGGER.error(f"Error getting CPU usage: {e}") + return None + + async def _get_memory_usage(self) -> Dict[str, Optional[float]]: + try: + result = await self.execute_command("free | awk '/Mem:/ {print $3/$2 * 100.0}'") + if result.exit_status != 0: + _LOGGER.error(f"Memory usage command failed with exit status {result.exit_status}") + return {"percentage": None} + + match = re.search(r'(\d+(\.\d+)?)', result.stdout) + if match: + return {"percentage": float(match.group(1))} + else: + _LOGGER.error(f"Failed to parse memory usage from output: {result.stdout}") + return {"percentage": None} + except Exception as e: + _LOGGER.error(f"Error getting memory usage: {e}") + return {"percentage": None} + + async def _get_array_usage(self) -> Dict[str, Optional[float]]: + try: + result = await self.execute_command("df -k /mnt/user | awk 'NR==2 {print $2,$3,$4}'") + if result.exit_status != 0: + _LOGGER.error(f"Array usage command failed with exit status {result.exit_status}") + return {"percentage": None, "total": None, "used": None, "free": None} + + total, used, free = map(int, result.stdout.strip().split()) + percentage = (used / total) * 100 + + return { + "percentage": round(percentage, 2), + "total": total * 1024, # Convert to bytes + "used": used * 1024, # Convert to bytes + "free": free * 1024 # Convert to bytes + } + except Exception as e: + _LOGGER.error(f"Error getting array usage: {e}") + return {"percentage": None, "total": None, "used": None, "free": None} + + async def get_individual_disk_usage(self) -> List[Dict[str, Any]]: + try: + result = await self.execute_command("df -k /mnt/disk* | awk 'NR>1 {print $6,$2,$3,$4}'") + if result.exit_status != 0: + _LOGGER.error(f"Individual disk usage command failed with exit status {result.exit_status}") + return [] + + disks = [] + for line in result.stdout.splitlines(): + mount_point, total, used, free = line.split() + disk_name = mount_point.split('/')[-1] + total = int(total) * 1024 # Convert to bytes + used = int(used) * 1024 # Convert to bytes + free = int(free) * 1024 # Convert to bytes + percentage = (used / total) * 100 if total > 0 else 0 + + disks.append({ + "name": disk_name, + "mount_point": mount_point, + "percentage": round(percentage, 2), + "total": total, + "used": used, + "free": free + }) + + return disks + except Exception as e: + _LOGGER.error(f"Error getting individual disk usage: {e}") + return [] + + async def _get_cache_usage(self) -> Dict[str, Optional[float]]: + try: + result = await self.execute_command("df -k /mnt/cache | awk 'NR==2 {print $2,$3,$4}'") + if result.exit_status != 0: + _LOGGER.error(f"Cache usage command failed with exit status {result.exit_status}") + return {"percentage": None, "total": None, "used": None, "free": None} + + total, used, free = map(int, result.stdout.strip().split()) + percentage = (used / total) * 100 + + return { + "percentage": round(percentage, 2), + "total": total * 1024, # Convert to bytes + "used": used * 1024, # Convert to bytes + "free": free * 1024 # Convert to bytes + } + except Exception as e: + _LOGGER.error(f"Error getting cache usage: {e}") + return {"percentage": None, "total": None, "used": None, "free": None} + + async def _get_boot_usage(self) -> Dict[str, Optional[float]]: + try: + result = await self.execute_command("df -k /boot | awk 'NR==2 {print $2,$3,$4}'") + if result.exit_status != 0: + _LOGGER.error(f"Boot usage command failed with exit status {result.exit_status}") + return {"percentage": None, "total": None, "used": None, "free": None} + + total, used, free = map(int, result.stdout.strip().split()) + percentage = (used / total) * 100 + + return { + "percentage": round(percentage, 2), + "total": total * 1024, # Convert to bytes + "used": used * 1024, # Convert to bytes + "free": free * 1024 # Convert to bytes + } + except Exception as e: + _LOGGER.error(f"Error getting boot usage: {e}") + return {"percentage": None, "total": None, "used": None, "free": None} + + def _convert_to_bytes(self, size_str: str) -> float: + """Convert a size string (e.g., '1.5P', '800T', '100G') to bytes.""" + units = { + 'B': 1, + 'K': 1024, + 'M': 1024**2, + 'G': 1024**3, + 'T': 1024**4, + 'P': 1024**5, + 'E': 1024**6, + } + match = re.match(r"([\d.]+)\s*([BKMGTPE])?", size_str, re.I) + if not match: + return 0.0 + + number, unit = match.groups() + number = float(number) + unit = (unit or 'B').upper() # Default to bytes if no unit is specified + + return number * units[unit] + + async def _get_uptime(self) -> Optional[float]: + try: + result = await self.execute_command("awk '{print $1}' /proc/uptime") + if result.exit_status != 0: + _LOGGER.error(f"Uptime command failed with exit status {result.exit_status}") + return None + + match = re.search(r'(\d+(\.\d+)?)', result.stdout) + if match: + return float(match.group(1)) + else: + _LOGGER.error(f"Failed to parse uptime from output: {result.stdout}") + return None + except Exception as e: + _LOGGER.error(f"Error getting uptime: {e}") + return None + + async def get_ups_info(self) -> Dict[str, Any]: + try: + result = await self.execute_command("apcaccess status") + if result.exit_status != 0: + _LOGGER.error("UPS info command failed") + return {} + + ups_data = {} + for line in result.stdout.splitlines(): + if ':' in line: + key, value = line.split(':', 1) + ups_data[key.strip()] = value.strip() + return ups_data + except Exception as e: + _LOGGER.error(f"Error getting UPS info: {e}") + return {} + + async def get_docker_containers(self) -> List[Dict[str, Any]]: + try: + result = await self.execute_command("docker ps -a --format '{{.Names}}|{{.State}}'") + if result.exit_status != 0: + _LOGGER.error(f"Docker container list command failed with exit status {result.exit_status}") + return [] + + containers = [] + for line in result.stdout.splitlines(): + parts = line.split('|') + if len(parts) == 2: + containers.append({"name": parts[0], "status": parts[1]}) + else: + _LOGGER.warning(f"Unexpected format in docker container output: {line}") + return containers + except Exception as e: + _LOGGER.error(f"Error getting docker containers: {e}") + return [] + + async def start_container(self, container_name: str) -> bool: + try: + result = await self.execute_command(f"docker start {container_name}") + return result.exit_status == 0 and container_name in result.stdout + except Exception as e: + _LOGGER.error(f"Error starting container {container_name}: {e}") + return False + + async def stop_container(self, container_name: str) -> bool: + try: + result = await self.execute_command(f"docker stop {container_name}") + return result.exit_status == 0 and container_name in result.stdout + except Exception as e: + _LOGGER.error(f"Error stopping container {container_name}: {e}") + return False + + async def execute_in_container(self, container_name: str, command: str, detached: bool = False) -> str: + try: + docker_command = f"docker exec {'--detach ' if detached else ''}{container_name} {command}" + result = await self.execute_command(docker_command) + if result.exit_status != 0: + _LOGGER.error(f"Command in container {container_name} failed with exit status {result.exit_status}") + return "" + return result.stdout + except Exception as e: + _LOGGER.error(f"Error executing command in container {container_name}: {e}") + return "" + + async def get_user_scripts(self) -> List[Dict[str, Any]]: + try: + result = await self.execute_command("ls -1 /boot/config/plugins/user.scripts/scripts") + if result.exit_status != 0: + _LOGGER.error(f"User scripts list command failed with exit status {result.exit_status}") + return [] + return [{"name": script.strip()} for script in result.stdout.splitlines()] + except Exception as e: + _LOGGER.error(f"Error getting user scripts: {e}") + return [] + + async def execute_user_script(self, script_name: str, background: bool = False) -> str: + try: + command = f"/usr/local/emhttp/plugins/user.scripts/scripts/{script_name}" + if background: + command += " & > /dev/null 2>&1" + result = await self.execute_command(command) + if result.exit_status != 0: + _LOGGER.error(f"User script {script_name} failed with exit status {result.exit_status}") + return "" + return result.stdout + except Exception as e: + _LOGGER.error(f"Error executing user script {script_name}: {e}") + return "" + + async def stop_user_script(self, script_name: str) -> str: + try: + result = await self.execute_command(f"pkill -f '{script_name}'") + if result.exit_status != 0: + _LOGGER.error(f"Stopping user script {script_name} failed with exit status {result.exit_status}") + return "" + return result.stdout + except Exception as e: + _LOGGER.error(f"Error stopping user script {script_name}: {e}") + return "" + + async def get_vms(self) -> List[Dict[str, Any]]: + try: + result = await self.execute_command("virsh list --all --name") + if result.exit_status != 0: + _LOGGER.error(f"VM list command failed with exit status {result.exit_status}") + return [] + + vms = [] + for line in result.stdout.splitlines(): + if line.strip(): + name = line.strip() + status = await self._get_vm_status(name) + vms.append({"name": name, "status": status}) + return vms + except Exception as e: + _LOGGER.error(f"Error getting VMs: {e}") + return [] + + async def _get_vm_status(self, vm_name: str) -> str: + try: + result = await self.execute_command(f"virsh domstate {vm_name}") + if result.exit_status != 0: + _LOGGER.error(f"VM status command for {vm_name} failed with exit status {result.exit_status}") + return "unknown" + return result.stdout.strip() + except Exception as e: + _LOGGER.error(f"Error getting VM status for {vm_name}: {e}") + return "unknown" + + async def start_vm(self, vm_name: str) -> bool: + try: + result = await self.execute_command(f"virsh start {vm_name}") + return result.exit_status == 0 and "started" in result.stdout.lower() + except Exception as e: + _LOGGER.error(f"Error starting VM {vm_name}: {e}") + return False + + async def stop_vm(self, vm_name: str) -> bool: + try: + result = await self.execute_command(f"virsh shutdown {vm_name}") + return result.exit_status == 0 and "shutting down" in result.stdout.lower() + except Exception as e: + _LOGGER.error(f"Error stopping VM {vm_name}: {e}") + return False + + async def pause_vm(self, vm_name: str) -> bool: + try: + result = await self.execute_command(f"virsh suspend {vm_name}") + return result.exit_status == 0 and "suspended" in result.stdout.lower() + except Exception as e: + _LOGGER.error(f"Error pausing VM {vm_name}: {e}") + return False + + async def resume_vm(self, vm_name: str) -> bool: + try: + result = await self.execute_command(f"virsh resume {vm_name}") + return result.exit_status == 0 and "resumed" in result.stdout.lower() + except Exception as e: + _LOGGER.error(f"Error resuming VM {vm_name}: {e}") + return False + + async def reboot_vm(self, vm_name: str) -> bool: + try: + result = await self.execute_command(f"virsh reboot {vm_name}") + return result.exit_status == 0 and "rebooted" in result.stdout.lower() + except Exception as e: + _LOGGER.error(f"Error rebooting VM {vm_name}: {e}") + return False \ No newline at end of file diff --git a/hacs.json b/hacs.json new file mode 100644 index 0000000..8ce9314 --- /dev/null +++ b/hacs.json @@ -0,0 +1,7 @@ +{ + "name": "UNRAID", + "render_readme": true, + "country": [], + "homeassistant": "2024.9.0", + "hacs": "1.34.0" + } \ No newline at end of file diff --git a/info.md b/info.md new file mode 100644 index 0000000..1b792a8 --- /dev/null +++ b/info.md @@ -0,0 +1,62 @@ +# Unraid Integration for Home Assistant + +This custom integration allows you to monitor and control your Unraid server from Home Assistant. + +## Features + +- Monitor CPU, RAM, Boot, Cache, Array Disks, and Array usage +- Monitor UPS connected to Unraid +- Control Docker containers +- Manage VMs +- Execute shell commands +- Manage user scripts + +## Installation + +1. Copy the `unraid` folder into your `custom_components` directory. +2. Restart Home Assistant. +3. Go to Configuration > Integrations. +4. Click the "+ ADD INTEGRATION" button. +5. Search for "Unraid" and select it. +6. Follow the configuration steps. + +## Configuration + +During the setup, you'll need to provide: + +- Host: The IP address or hostname of your Unraid server +- Username: Your Unraid username (usually 'root') +- Password: Your Unraid password +- Port: SSH port (usually 22) +- Ping Interval: How often to check if the server is online (in seconds) +- Update Interval: How often to update sensor data (in seconds) + +## Sensors + +- CPU Usage +- RAM Usage +- Array Usage +- Individual Array Disks +- Uptime + +## Switches + +- Docker Containers: Turn on/off Docker containers +- VMs: Turn on/off Virtual Machines + +## Services + +- `unraid.execute_command`: Execute a shell command on the Unraid server +- `unraid.execute_in_container`: Execute a command in a Docker container +- `unraid.execute_user_script`: Execute a user script +- `unraid.stop_user_script`: Stop a running user script + +## Examples + +### Execute a shell command + +```yaml +service: unraid.execute_command +data: + entry_id: YOUR_ENTRY_ID + command: "echo 'Hello from Home Assistant' > /boot/config/plugins/user.scripts/scripts/ha_test.sh" \ No newline at end of file