Linux ip-148-66-134-25.ip.secureserver.net 3.10.0-1160.119.1.el7.tuxcare.els10.x86_64 #1 SMP Fri Oct 11 21:40:41 UTC 2024 x86_64
Apache
: 148.66.134.25 | : 3.144.100.252
66 Domain
8.0.30
amvm
www.github.com/MadExploits
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
CPANEL RESET
BLACK DEFEND!
README
+ Create Folder
+ Create File
/
usr /
lib /
fm-agent /
library /
[ HOME SHELL ]
Name
Size
Permission
Action
__pycache__
[ DIR ]
drwxr-xr-x
__init__.py
0
B
-rw-r--r--
agent.py
96.8
KB
-rw-r--r--
agent_exceptions.py
110
B
-rw-r--r--
agent_util.py
8.58
KB
-rw-r--r--
aggregator.py
14.89
KB
-rw-r--r--
anomaly.py
2.19
KB
-rw-r--r--
blacklister.py
809
B
-rw-r--r--
container_discovery.py
3.3
KB
-rw-r--r--
display.py
2.06
KB
-rw-r--r--
forticlient_helper.py
2.59
KB
-rw-r--r--
inspector.py
15.7
KB
-rw-r--r--
iperf3.py
2.12
KB
-rw-r--r--
log_matcher.py
4.27
KB
-rw-r--r--
maintenance.py
3.61
KB
-rw-r--r--
pickle_database.py
1.28
KB
-rw-r--r--
plugin_driver.py
4.78
KB
-rw-r--r--
plugin_manager.py
11.04
KB
-rw-r--r--
process_manager.py
851
B
-rw-r--r--
progress_printer.py
837
B
-rw-r--r--
result_queue.py
1.99
KB
-rw-r--r--
schedule.py
3.19
KB
-rw-r--r--
threshold.py
1.5
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : agent_util.py
""" This file is a library of some essential classes for our agent's use.""" import time import logging import os import itertools import subprocess import sys import signal import locale import socket try: import json except ImportError: import simplejson as json try: # Python 2.x import httplib except: import http.client as httplib if sys.version[0] == '3': from io import BytesIO as StringIO else: from StringIO import StringIO # Set the locale for LC numeric to prevent issue with weird languages like de_DE # and wrap that in a custom float function that converts int to str so atof # doesnt break. If we don't have an LC_ALL environment variable, set one to avoid # an exception from locale if "LC_ALL" not in os.environ: os.environ["LC_ALL"] = "C" locale.resetlocale(locale.LC_NUMERIC) def custom_float(value): try: return locale.atof(str(value)) except Exception: if type(__builtins__) == type({}): return __builtins__['float'](value) return __builtins__.float(value) float = custom_float LOCAL_CACHE_RESULTS = {} DEFAULT_CACHE_TIMEOUT = 60 # Needed to support older version so of Python def any(iterable): for element in iterable: if element: return True return False def all(iterable): for element in iterable: if not element: return False return True def total_seconds(timedelta): return (timedelta.microseconds + (timedelta.seconds + timedelta.days * 24 * 3600) * 10**6) / 10**6 # statuses for the individual resource textkeys SUPPORTED = 0 UNSUPPORTED = 1 MISCONFIGURED = 2 # these are additional paths that which() should search, that may not be on the # user's path. some of these contain binaries that the plugins need to run, # for example, apachectl on centos lives in /usr/sbin ADDITIONAL_SEARCH_PATHS = ["/usr/local/sbin", "/usr/local/bin", "/usr/sbin", "/usr/bin", "/sbin", "/bin"] def which(program, exc=False): def is_exe(fpath): return os.path.exists(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: if "PATH" not in os.environ: return None for path in itertools.chain(os.environ["PATH"].split(os.pathsep), ADDITIONAL_SEARCH_PATHS): exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file if exc: raise Exception("%r not found" % program) else: return None def execute_command(cmd, cwd=None, shell=True, kill_tree=True, timeout=15, env=None, block=True, cache_timeout=None): "Run a command line call with a timeout after which it will be forcibly killed." if shell is False: cmd = cmd.split() log = logging.getLogger("execute_command") if cache_timeout: # We search first for the cached result for that specific command. cached_result = LOCAL_CACHE_RESULTS.get(cmd) if cached_result and time.time() - cached_result.get('timestamp') < cache_timeout: log.debug('Retrieved information from the local cache timeout for %s' % cmd) return cached_result.get('retcode'), cached_result.get('output') p = None if 'darwin' == sys.platform.lower(): p = subprocess.Popen(cmd, shell=shell, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) if not block: return try: stdout, stderr = p.communicate(timeout=timeout) except subprocess.TimeoutExpired as te: pids = [p.pid] if kill_tree: pids.extend(get_process_children(p.pid)) for pid in pids: # process might have died before getting to this line # so wrap to avoid OSError: no such process try: os.kill(pid, signal.SIGKILL) except OSError: pass return -9, 'Timeout exceeded, process killed' else: class Alarm(Exception): pass def alarm_handler(signum, frame): raise Alarm # Kick off the command, and exit if we're not running in blocking mode waiting for a response p = subprocess.Popen(cmd, shell=shell, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) if not block: return if timeout != -1 and timeout != None: signal.signal(signal.SIGALRM, alarm_handler) signal.alarm(timeout) try: stdout, stderr = p.communicate() if timeout != -1 and timeout != None: signal.alarm(0) except Alarm: pids = [p.pid] if kill_tree: pids.extend(get_process_children(p.pid)) for pid in pids: # process might have died before getting to this line # so wrap to avoid OSError: no such process try: os.kill(pid, signal.SIGKILL) except OSError: pass return -9, 'Timeout exceeded, process killed' retcode = p.returncode if not stdout: stdout = stderr output = stdout.decode("utf8") if cache_timeout: # Create the cache for this result so subsequent calls use it # instead of making the same calls. log.debug('Created cache for cmd %s' % cmd) LOCAL_CACHE_RESULTS[cmd] = {'retcode': retcode, 'output': output, 'timestamp': time.time()} log.debug("%s: %s %s" % (cmd, retcode, output)) return (retcode, output) def get_process_children(pid): "Helper method for killing off child processes when they timeout" p = subprocess.Popen('ps --no-headers -o pid --ppid %d' % pid, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() return [int(p) for p in stdout.split()] def get_container_ip(container): try: networks = container["NetworkSettings"]["Networks"] network = list(networks.values())[0] return network["IPAddress"] except Exception: t, e = sys.exc_info()[:2] logging.exception(e) raise e def json_loads(val, **kwargs): try: return json.loads(val, **kwargs) except TypeError: # Python 3.5 json module does not support bytes return json.loads(val.decode(), **kwargs) # Subclass of HTTPConnection that allows connecting to a UNIX socket # Adapted from uhttplib class UnixHTTPConnection(httplib.HTTPConnection): def __init__(self, path, host='localhost', **kwargs): # Can't use super() as httplib.HTTPConnection is not new-style httplib.HTTPConnection.__init__(self, host, **kwargs) self.path = path def connect(self): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(self.path) self.sock = sock # XXX Move this into library! # Base class for our plugins class Plugin(object): # a unique textkey to identify this plugin textkey = "undefined" # One-line human-readable description of the plugin description = "" log = logging def __init__(self, schedule): self.schedule = schedule self.log = logging.getLogger("plugin %r" % self.textkey) # saves some check data in a temporary location in the db, for use by a # future call, for figuring things like N per second. def cache_result(self, textkey, option, value, replace=False): now = time.time() cache = self.schedule.cached_results tcache = cache.setdefault(textkey, {}) if replace: tcache[option] = [] results = tcache.setdefault(option, []) results.append((now, value)) # don't let our data structure get too big. this number is arbitrary. # we could use a collections.deque, but its maxlen property is not # supported before 2.5 while len(results) > 1000: results.pop(0) # fetches some cached data from a previous check call. # arg is passed in because each call to check receives a different # arg (if that resource textkey has multiple args), so the cached results # are specific to that arg. def get_cache_results(self, textkey, option, num=1): now = time.time() cache = self.schedule.cached_results tcache = cache.setdefault(textkey, {}) results = tcache.setdefault(option, []) ret = [] for stored, result in results[-num:]: ret.append((now - stored, result)) ret.reverse() return ret def get_metadata(self, config): return {} def check(self, textkey, data, config): return 0
Close