Linux ip-148-66-134-25.ip.secureserver.net 3.10.0-1160.119.1.el7.tuxcare.els10.x86_64 #1 SMP Fri Oct 11 21:40:41 UTC 2024 x86_64
Apache
: 148.66.134.25 | : 3.139.72.152
66 Domain
8.0.30
amvm
www.github.com/MadExploits
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
CPANEL RESET
BLACK DEFEND!
README
+ Create Folder
+ Create File
/
usr /
lib /
fm-agent /
plugins /
[ HOME SHELL ]
Name
Size
Permission
Action
__pycache__
[ DIR ]
drwxr-xr-x
__init__.py
0
B
-rw-r--r--
apache.py
19.06
KB
-rw-r--r--
apache_kafka.py
12.9
KB
-rw-r--r--
apache_zookeeper.py
6.26
KB
-rw-r--r--
bandwidth.py
21.3
KB
-rw-r--r--
cassandra.py
9.21
KB
-rw-r--r--
cert.py
2.78
KB
-rw-r--r--
couch.py
9.5
KB
-rw-r--r--
cpu_usage.py
33
KB
-rw-r--r--
dem_plugin.py
6.08
KB
-rw-r--r--
disk.py
16.48
KB
-rw-r--r--
docker.py
38.41
KB
-rw-r--r--
elasticsearch.py
2.83
KB
-rw-r--r--
entropy.py
900
B
-rw-r--r--
exim.py
1.01
KB
-rw-r--r--
file_presence.py
5
KB
-rw-r--r--
haproxy.py
13.37
KB
-rw-r--r--
io_stats.py
13.41
KB
-rw-r--r--
jboss.py
13.46
KB
-rw-r--r--
jmx.py
8.02
KB
-rw-r--r--
linux_logs.py
3.4
KB
-rw-r--r--
lm_sensors.py
2.51
KB
-rw-r--r--
logstash_forwarder.py
1.58
KB
-rw-r--r--
memcache.py
5.99
KB
-rw-r--r--
memory_usage.py
26.11
KB
-rw-r--r--
mongo.py
15.96
KB
-rw-r--r--
mysql.py
19.74
KB
-rw-r--r--
nagios.py
5.36
KB
-rw-r--r--
nginx.py
11.96
KB
-rw-r--r--
nodejs.py
6.29
KB
-rw-r--r--
ntp.py
1.98
KB
-rw-r--r--
opcache.py
2.26
KB
-rw-r--r--
oracle.py
15.15
KB
-rw-r--r--
package_upgrade.py
8.08
KB
-rw-r--r--
phpfpm.py
5.51
KB
-rw-r--r--
ping.py
2.45
KB
-rw-r--r--
postfix.py
1.98
KB
-rw-r--r--
postgresql.py
19.13
KB
-rw-r--r--
process.py
16.32
KB
-rw-r--r--
rabbitmq.py
19.33
KB
-rw-r--r--
redis.py
11.19
KB
-rw-r--r--
sendmail.py
2.39
KB
-rw-r--r--
sysctl.py
1.46
KB
-rw-r--r--
tcp.py
6.26
KB
-rw-r--r--
template.py
3.28
KB
-rw-r--r--
tomcat.py
6.79
KB
-rw-r--r--
tomcat_jmx.py
15.82
KB
-rw-r--r--
unbound_dns.py
4.54
KB
-rw-r--r--
uptime.py
3.46
KB
-rw-r--r--
users.py
1.09
KB
-rw-r--r--
uwsgi.py
4.57
KB
-rw-r--r--
varnish.py
4.79
KB
-rw-r--r--
weblogic.py
13.38
KB
-rw-r--r--
weblogic12c.py
18.75
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : disk.py
import agent_util import sys import os import platform from agent_util import float import json NETWORK_FS = ['ncpfs', 'nfs', 'ntfs', 'smb', 'vfat', 'smb2', 'cifs', 'nfs4'] # Timeout after 10 seconds, so we don't get hung on remote filesystems TIMEOUT_LIMIT = 10 def get_findmnt_cmd(extra_args=""): timeout = "" if agent_util.which("timeout"): timeout = "timeout %s " % TIMEOUT_LIMIT return "%sfindmnt --fstab --df --bytes --raw --evaluate --all %s" % (timeout, extra_args) def get_df_cmd(extra_arg=""): timeout = "" if agent_util.which("timeout"): timeout = "timeout %s " % TIMEOUT_LIMIT df_cmd = 'df -PkT' if "vmware" in sys.platform: df_cmd = 'df -kT' elif 'sunos' in sys.platform: df_cmd = 'df -kt' elif "darwin" in sys.platform or "aix" in sys.platform or 'freebsd' in sys.platform: df_cmd = "df -Pk" return "%s%s %s" % (timeout, df_cmd, extra_arg) def get_idf_cmd(extra_arg=""): timeout = "" if agent_util.which("timeout"): timeout = "timeout %s " % TIMEOUT_LIMIT idf_cmd = 'df -iPT' if 'sunos' in sys.platform or "vmware" in sys.platform: idf_cmd = 'df -iT' elif 'aix' == sys.platform: idf_cmd = 'df -ik' elif 'darwin' == sys.platform or 'freebsd' in sys.platform: idf_cmd = 'df -Pik' return "%s%s %s" % (timeout, idf_cmd, extra_arg) class DiskDFParser: def __init__(self, log, config): self.log = log self.device_ignore_list = ("tmpfs", "devtmpfs", "none", "proc", "swap", "devices", "cgroup", "/dev/loop") self.mountpoint_excludes = () cfg_device_list = config.get("device_ignore_list", None) if cfg_device_list is not None: self.device_ignore_list = self.parse_ignore_list(cfg_device_list) if "aix" in sys.platform or "sunos" in sys.platform: self.device_ignore_list = self.device_ignore_list + ("/proc", "/swap", "/ahafs") if 'darwin' == sys.platform: self.mountpoint_excludes = ('/Library/Developer/CoreSimulator/Volumes', ) mpe = config.get('mountpoint_excludes', None) if mpe is not None: self.mountpoint_excludes = self.parse_ignore_list(mpe) def __str__(self): return "Disk df parser" def parse_ignore_list(self, device_list): try: dl_type = type(device_list) if type(tuple) == dl_type: return device_list if type("") == dl_type: if '(' in device_list and ')' in device_list: device_list_items = device_list.replace('(', '').replace(')', '').split(',') items = [d.strip().strip('"') for d in device_list_items] return tuple(items) except: self.log.error('Error parsing device list {}'.format(device_list)) return () def parse_df_output(self, output): outlines = output.splitlines() headers = self.build_header_data(outlines[0]) df_table = {} for df_line in outlines[1:]: df_line = df_line.strip().split() mount_point = None mount_point_idx = headers.get('mounted on', None) if mount_point_idx: mount_point = ' '.join(df_line[mount_point_idx:]) if not mount_point: self.log.warning('No mount point in {}'.format(df_line)) continue df_table[mount_point] = {} for entry in headers.keys(): val = df_line[headers[entry]] if 'mounted on' == entry: val = mount_point df_table[mount_point][entry] = val return df_table def build_header_data(self, header_line): hdr_idx = 0 headers = {} for hdr in header_line.split(): # # For lines that end with 'Mounted on' - skip the last split # hdr = hdr.lower() if 'on' == hdr: continue if 'mounted' == hdr: hdr = 'mounted on' if hdr in ['iuse%', '%iused']: hdr = 'iuse_pct' elif hdr in ['available', 'avail']: hdr = 'available' headers[hdr] = hdr_idx hdr_idx += 1 return headers def get_device_data(self, output, key_map, custom_network_fs): df_table = self.parse_df_output(output) devices = [] max_disk = {} for mountpoint_table in df_table.values(): try: device_key = key_map.get('device') device = mountpoint_table[device_key] fs_type_key = key_map.get('fs_type') filesystem = mountpoint_table.get(fs_type_key, '') skip_device = False for test_device in self.device_ignore_list: if device.startswith(test_device) or filesystem.startswith(test_device): self.log.debug("Skipping metadata for device %s" % device) skip_device = True break if skip_device: continue mounted_key = key_map.get('mountpoint') mounted = mountpoint_table.get(mounted_key, None) if not mounted: continue skip_mp = False for mp in self.mountpoint_excludes: if mounted.startswith(mp): self.log.debug('Skipping mountpoint {}'.format(mounted)) skip_mp = True break if skip_mp: continue desc = "%s mounted at %s" % (device, mounted) devices.append( { "device": device, "mountpoint": mounted, "filesystem": filesystem, "resource": desc, "is_network": filesystem in NETWORK_FS or filesystem in custom_network_fs } ) available_key = key_map.get('available') available = mountpoint_table.get(available_key, None) if available is not None: max_disk[desc] = available except: self.log.error("Unable to parse df output") continue return devices, max_disk class DiskUsagePlugin(agent_util.Plugin): textkey = "disk" label = "Disk" darwin_fstype_excludes = 'nullfs,nodev,devfs,autofs' # adding min for disk usage min_capacity = 0 if "AIX" in os.uname(): sys.platform = "aix" @classmethod def dump_disk_output(self, config, cmd, raw_output): if config.get("debug", False): self.log.debug('#####################################################') self.log.debug("Disk command '%s' output :" % cmd) self.log.debug(raw_output) self.log.debug('#####################################################') @classmethod def get_metadata(self, config): status = agent_util.SUPPORTED msg = None if not agent_util.which("df", exc=False): self.log.warning("df binary not found") status = agent_util.UNSUPPORTED if agent_util.SUPPORTED != status: return {} # See if there are custom DF flags specified in the config file extra_df_arg = self.gather_extra_df_arg(config=config) # See if the config file specifies to use findmnt to identify expected disks use_findmnt = config.get("use_findmnt") and agent_util.which("findmnt") extra_findmnt_arg = config.get("extra_findmnt_arg", "") custom_network_fs = config.get('network_fs', []) if custom_network_fs: custom_network_fs = custom_network_fs.split(',') table_keys = { 'device' : 'filesystem', 'fs_type' : 'type', 'mountpoint': 'mounted on', 'available' : 'available', } if use_findmnt: table_keys = { 'device' : 'source', 'fs_type' : 'fstype', 'mountpoint' : 'target', 'available' : 'avail', } block_query = get_findmnt_cmd(extra_findmnt_arg) inode_query = block_query else: block_query = get_df_cmd(extra_df_arg) inode_query = get_idf_cmd(extra_df_arg) parser = DiskDFParser(self.log, config) ret_code, block_result = agent_util.execute_command(block_query, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT) if 0 != ret_code: devices = [] max_disk = {} msg = 'Command exit status {}'.format(ret_code) self.log.error('{} exit status {}'.format(block_query, ret_code)) status = agent_util.UNSUPPORTED else: self.dump_disk_output(config, block_query, block_result) devices, max_disk = parser.get_device_data( block_result, table_keys, custom_network_fs ) inode_status = agent_util.SUPPORTED idevices = [] imax_disk = {} inode_status_msg = None if 'sunos' in sys.platform or 'hp-ux' in sys.platform or 'aix' in sys.platform: inode_status = agent_util.UNSUPPORTED inode_status_msg = 'Unsupported on this platform' else: ret_code, inode_result = agent_util.execute_command(inode_query, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT) if 0 != ret_code: inode_status_msg = 'Command exit status {}'.format(ret_code) self.log.error('{} exit status {}'.format(inode_query, ret_code)) inode_status = agent_util.UNSUPPORTED else: if not use_findmnt: table_keys['available'] = 'ifree' self.dump_disk_output(config, inode_query, inode_result) idevices, imax_disk = parser.get_device_data( inode_result, table_keys, custom_network_fs ) options_schema = { 'device': 'string', 'mountpoint': 'string', 'filesystem': 'string', 'resource': 'string', 'is_network': 'boolean' } data = { "usage.percent_used": { "label": "Percentage of disk used", "options": devices, "options_schema": options_schema, "status": status, "error_message": msg, "unit": "percent", "min_value": 0, "max_value": 100, }, "usage.kb_available": { "label": "Disk space available", "options": devices, "options_schema": options_schema, "status": status, "error_message": msg, "unit": "kB", "min_value": 0, "max_value": max_disk, }, "filesystem.mounted": { "label": "Filesystem mounted", "options": devices, "options_schema": options_schema, "status": status, "error_message": msg, }, "inode.percent_used": { "label": "Inodes percent used", "options": idevices, "options_schema": options_schema, "status": inode_status, "error_message": inode_status_msg, "unit": "percent", "min_value": 0, "max_value": 100, }, "inode.used": { "label": "Inode used", "options": idevices, "options_schema": options_schema, "status": inode_status, "error_message": inode_status_msg, "unit": "Inodes", "min_value": 0, "max_value": imax_disk, }, "inode.available": { "label": "Inodes Available", "options": idevices, "options_schema": options_schema, "status": inode_status, "error_message": inode_status_msg, "unit": "Inodes", "min_value": 0, "max_value": imax_disk, }, } # no inodes for vmware to_del = [] if 'vmware' in sys.platform: for k in data.keys(): if 'inode' in k: to_del.append(k) for d in to_del: del data[d] return data def collect_vmware(self, textkey, mounted): ret, output = agent_util.execute_command("stat -f %s" % mounted, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT) # make sure it's mounted first if ret != 0 and textkey != 'filesystem.mounted': self.log.error("Unable to find disk %s, is it mounted?!" % mounted) self.log.error(output) return None elif ret != 0 and textkey == 'filesystem.mounted': return 0 block_size = 0 metrics = {} for line in output.split('\n'): l = str(line).strip().lower() if l.startswith('file:') or l.startswith('id:'): continue elif l.startswith('block size:'): block_size = l.split()[-1] if l.startswith('blocks:'): try: btext, ttext, total_size, ftext, free_size, atext, avail_size = l.split() except: self.log.error("Unable to parse disk output!") self.log.error(output) return None metrics['usage.percent_used'] = 100. - ((float(free_size) / float(total_size)) * 100) metrics['usage.kb_available'] = float(free_size) * float(block_size) return metrics[str(textkey)] @classmethod def gather_extra_df_arg(self, config): extra_df_arg = config.get("extra_df_arg", "") if 'darwin' in sys.platform: configKey = 'ignore_fstypes' ignores = self.darwin_fstype_excludes if config.get(configKey, None): ignores = '{},{}'.format(ignores, config.get(configKey)) extra_df_arg ='{} -T no{}'.format(extra_df_arg, ignores) return extra_df_arg def check(self, textkey, dev_mount, config): dev_mount = dev_mount.split() mounted = ' '.join(dev_mount[3:]) extra_df_arg = self.gather_extra_df_arg(config) if "vmware" in sys.platform: return self.collect_vmware(textkey, mounted) is_inode_query = False if textkey.startswith("i"): df_cmd = get_idf_cmd(extra_df_arg) is_inode_query = True else: df_cmd = get_df_cmd(extra_df_arg) rc, output = agent_util.execute_command(df_cmd, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT) if 0 != rc: return None self.log.debug(u"%s output: %s" % (df_cmd, output)) parser = DiskDFParser(self.log, config) df_data = parser.parse_df_output(output) mountpoint_data = df_data.get(mounted, None) if not mountpoint_data: self.log.error("Mountpoint %r not found" % mounted) if textkey == "filesystem.mounted": return False return None def convert_capacity_field(capacity): if capacity is None: return None if capacity == '-': return 0 else: return int(capacity.rstrip('%')) if 'filesystem.mounted' == textkey: return True if textkey in ['usage.percent_used', "inode.percent_used"]: key = 'capacity' if is_inode_query: key = 'iuse_pct' return convert_capacity_field(mountpoint_data.get(key, None)) key = None if "inode.used" == textkey: key = 'iused' elif "inode.available" == textkey: key = 'ifree' elif 'usage.kb_available' == textkey: key = 'available' if not key: return None mv = mountpoint_data.get(key, None) if mv is None: return None if '-' == mv: return 0 return int(mv)
Close