mirror of
https://github.com/outscale/zabbix-super-vision.git
synced 2025-06-22 01:53:41 +02:00
New supervision
- using fastAPI - offline/online working - warning if ZABBIX API is too long - showing settings - showing last ack message - showing procedure - menu split by SU team
This commit is contained in:
43
utils/__init__.py
Normal file
43
utils/__init__.py
Normal file
@ -0,0 +1,43 @@
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Dict
|
||||
|
||||
import aiofiles
|
||||
import aiofiles.os as async_os
|
||||
|
||||
|
||||
async def write_json_file(file_path: str, data: Any) -> None:
|
||||
await async_os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
json_data = json.dumps(data, indent=4)
|
||||
async with aiofiles.open(file_path, mode="w") as file:
|
||||
await file.write(json_data)
|
||||
|
||||
|
||||
async def read_json_file(filepath: str) -> Dict[str, Any]:
|
||||
try:
|
||||
async with aiofiles.open(filepath, "r") as file:
|
||||
content = await file.read()
|
||||
return json.loads(content)
|
||||
except FileNotFoundError:
|
||||
return {}
|
||||
|
||||
|
||||
def time_since_event(event_time: datetime) -> str:
|
||||
now: datetime = datetime.now()
|
||||
time_difference: timedelta = now - event_time
|
||||
days: int = time_difference.days
|
||||
seconds: int = time_difference.seconds
|
||||
|
||||
if days > 0:
|
||||
return f"{days} day(s)"
|
||||
|
||||
hours: int = seconds // 3600
|
||||
if hours > 0:
|
||||
return f"{hours} hour(s)"
|
||||
|
||||
minutes: int = (seconds % 3600) // 60
|
||||
if minutes > 0:
|
||||
return f"{minutes} minute(s)"
|
||||
|
||||
return f"{seconds} second(s)"
|
47
utils/background_tasks.py
Normal file
47
utils/background_tasks.py
Normal file
@ -0,0 +1,47 @@
|
||||
import asyncio
|
||||
import socket
|
||||
from typing import Dict, Tuple
|
||||
|
||||
from schemas import ServerStatuses, ZabbixServer
|
||||
from settings import settings
|
||||
from utils import write_json_file
|
||||
from utils.log import logger
|
||||
|
||||
|
||||
def socket_connect(ip: str, port: int) -> int:
|
||||
try:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.settimeout(5)
|
||||
result = sock.connect_ex((ip, port))
|
||||
sock.close()
|
||||
return result
|
||||
except Exception as e:
|
||||
return -1 # or any other suitable error code
|
||||
|
||||
|
||||
async def check_server(server: ZabbixServer) -> Tuple[str, bool]:
|
||||
try:
|
||||
result = await asyncio.to_thread(socket_connect, server.ip, server.port)
|
||||
if result == 0:
|
||||
logger.info(f"[INFO] - Port {server.port} OK: {server.ip}")
|
||||
return (server.ip, True)
|
||||
else:
|
||||
logger.info(f"[ERR] - Port {server.port} KO: {server.ip}")
|
||||
return (server.ip, False)
|
||||
except Exception as e:
|
||||
logger.info(f"[ERR] - Connection Failed: {server.ip}, Error: {e}")
|
||||
return (server.ip, False)
|
||||
|
||||
|
||||
async def check_servers() -> None:
|
||||
while True:
|
||||
servers: Dict[str, bool] = {}
|
||||
tasks = [check_server(server) for server in settings.ZABBIX_SERVERS_CHECK]
|
||||
results_list = await asyncio.gather(*tasks)
|
||||
for ip, status in results_list:
|
||||
servers[ip] = status
|
||||
server_statuses = ServerStatuses(servers)
|
||||
await write_json_file(
|
||||
f"{settings.DATA_DIR}/{settings.ZABBIX_SERVERS_JSON}", server_statuses.root
|
||||
)
|
||||
await asyncio.sleep(60)
|
27
utils/hostgroups.py
Normal file
27
utils/hostgroups.py
Normal file
@ -0,0 +1,27 @@
|
||||
import random
|
||||
from typing import List
|
||||
|
||||
from schemas import HostGroupRequest
|
||||
from schemas.zabbix_client import ZabbixClient
|
||||
from settings import settings
|
||||
|
||||
|
||||
def get_hostgroup_color(hostgroup: str):
|
||||
color = settings.COLOR_TEAM.get(hostgroup)
|
||||
if color is None:
|
||||
color = f"#{random.randint(0, 0xFFFFFF):06x};"
|
||||
settings.COLOR_TEAM[hostgroup] = color
|
||||
return color
|
||||
|
||||
|
||||
async def get_hostgroups(zabbix_client: ZabbixClient) -> List[str]:
|
||||
groups = []
|
||||
for hg in [team for teams in settings.TEAMS.values() for team in teams]:
|
||||
request = HostGroupRequest(search={"name": hg})
|
||||
resp = await zabbix_client.call(
|
||||
request=request.model_dump(), method="hostgroup.get"
|
||||
)
|
||||
# Process response
|
||||
for item in resp["result"]:
|
||||
groups.append(item["name"])
|
||||
return groups
|
8
utils/log.py
Normal file
8
utils/log.py
Normal file
@ -0,0 +1,8 @@
|
||||
import logging
|
||||
|
||||
# Logger Configuration
|
||||
stdio_handler = logging.StreamHandler()
|
||||
stdio_handler.setLevel(logging.INFO)
|
||||
logger = logging.getLogger("aiohttp.access")
|
||||
logger.addHandler(stdio_handler)
|
||||
logger.setLevel(logging.INFO)
|
127
utils/problems.py
Normal file
127
utils/problems.py
Normal file
@ -0,0 +1,127 @@
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from typing import List
|
||||
|
||||
from async_lru import alru_cache
|
||||
|
||||
from schemas.alerts import Event, Trigger
|
||||
from schemas.zabbix_client import ZabbixClient
|
||||
from settings import settings
|
||||
from utils import time_since_event
|
||||
from utils.hostgroups import get_hostgroup_color, get_hostgroups
|
||||
from utils.log import logger
|
||||
|
||||
|
||||
@alru_cache
|
||||
async def get_problems(zabbix_client: ZabbixClient, ttl_hash=None) -> List[dict]:
|
||||
problems = []
|
||||
limit = settings.ZABBIX_API_LIMIT
|
||||
groups = [group.lower() for group in await get_hostgroups(zabbix_client)]
|
||||
team_list = [team for teams in settings.TEAMS.values() for team in teams]
|
||||
|
||||
groupids = await get_group_ids(zabbix_client, team_list)
|
||||
trigger_data = await get_triggers(zabbix_client, limit, groupids)
|
||||
triggers = [Trigger.parse_obj(trigger) for trigger in trigger_data]
|
||||
|
||||
# Getting events
|
||||
event_ids = [trigger.lastEvent.eventid for trigger in triggers]
|
||||
request_event = {
|
||||
"eventids": event_ids,
|
||||
"select_acknowledges": "extend",
|
||||
"select_related_users": "extend",
|
||||
}
|
||||
events: List[Event] = (await zabbix_client.call(request=request_event, method="event.get"))['result']
|
||||
|
||||
for trigger in triggers:
|
||||
if trigger.hosts:
|
||||
for group in trigger.groups:
|
||||
if int(group.groupid) in groupids:
|
||||
problems += await process_trigger_data(zabbix_client, trigger, events, group.name, groups)
|
||||
|
||||
for tag in trigger.tags:
|
||||
if tag.value.lower() in groups:
|
||||
problems += await process_trigger_data(zabbix_client, trigger, events, tag.value, groups)
|
||||
|
||||
logger.info(f"[INFO] - {datetime.now()}: Refresh...")
|
||||
return problems
|
||||
|
||||
|
||||
async def get_group_ids(zabbix_client, team_list):
|
||||
groupids = []
|
||||
for hostgroup_name in team_list:
|
||||
request = {
|
||||
"output": "extend",
|
||||
"search": {"name": hostgroup_name},
|
||||
"searchWildcardsEnabled": 1,
|
||||
}
|
||||
response = (await zabbix_client.call(request=request, method="hostgroup.get"))[
|
||||
"result"
|
||||
]
|
||||
groupids.extend(int(group["groupid"]) for group in response)
|
||||
return groupids
|
||||
|
||||
|
||||
async def get_triggers(zabbix_client, limit, groupids):
|
||||
request = {
|
||||
"limit": limit,
|
||||
"groupids": groupids,
|
||||
"monitored": 1,
|
||||
"maintenance": False,
|
||||
"active": 1,
|
||||
"min_severity": settings.SEVERITY,
|
||||
"output": "extend",
|
||||
"expandData": 1,
|
||||
"selectHosts": "extend",
|
||||
"selectGroups": "extend",
|
||||
"expandDescription": 1,
|
||||
"only_true": 1,
|
||||
"skipDependent": 1,
|
||||
"withUnacknowledgedEvents": 1,
|
||||
"withLastEventUnacknowledged": 1,
|
||||
"selectTags": "extend",
|
||||
"filter": {"value": 1},
|
||||
"sortfield": ["priority", "lastchange"],
|
||||
"sortorder": ["DESC"],
|
||||
"output": "extend",
|
||||
"selectLastEvent": "extend"
|
||||
}
|
||||
return (await zabbix_client.call(request=request, method="trigger.get"))["result"]
|
||||
|
||||
|
||||
async def process_trigger_data(
|
||||
zabbix_client: ZabbixClient, trigger: Trigger, events: List[Event], hostgroup_name: str, groups: List[str]
|
||||
) -> List[dict]:
|
||||
color = get_hostgroup_color(hostgroup_name)
|
||||
time_difference = int(time.time()) - int(trigger.lastchange)
|
||||
event_time = datetime.fromtimestamp(time.time()) - timedelta(
|
||||
seconds=time_difference
|
||||
)
|
||||
since = time_since_event(event_time)
|
||||
event_data = next((event for event in events if event["eventid"] == trigger.lastEvent.eventid), None)
|
||||
if event_data:
|
||||
try:
|
||||
username = event_data.get("acknowledges")[0].get("alias")
|
||||
message = event_data.get("acknowledges")[0].get("message")
|
||||
except IndexError:
|
||||
username = None
|
||||
message = None
|
||||
else:
|
||||
username = None
|
||||
message = None
|
||||
return [
|
||||
{
|
||||
"description": trigger.description,
|
||||
"host": trigger.hosts[0].host,
|
||||
"priority": trigger.priority,
|
||||
"triggerid": trigger.triggerid,
|
||||
"since": since,
|
||||
"hostgroup": hostgroup_name,
|
||||
"color": color,
|
||||
"lastchange": trigger.lastchange,
|
||||
"url": trigger.url,
|
||||
"acknowledge": {
|
||||
"username": username,
|
||||
"message": message,
|
||||
}
|
||||
}
|
||||
]
|
Reference in New Issue
Block a user