From abcd389eff55db2a6d8f3a2339a9f2a30cbdaa73 Mon Sep 17 00:00:00 2001 From: Alexey Nazarov Date: Fri, 19 Jul 2024 17:27:01 +0200 Subject: [PATCH 1/4] Fixing path handling by server by normalizing paths in 8 methods of ServerOptions class in 'options.py' --- supervisor/options.py | 1774 +++++++++++++++++++++++++++-------------- 1 file changed, 1184 insertions(+), 590 deletions(-) diff --git a/supervisor/options.py b/supervisor/options.py index 271735200..4c0bba7f0 100644 --- a/supervisor/options.py +++ b/supervisor/options.py @@ -1,3 +1,24 @@ +# Changes made by user 'alexeinazarov' to ensure proper path normalization and other improvements: +# 1. Modified process_config_file function to use normalize_path for self.here. +# 2. Modified process_groups_from_parser function to ensure paths are normalized. +# 3. Added path normalization to parse_fcgi_socket method. +# 4. Added path normalization in processes_from_section function. +# 5. Normalized paths in _try_unlink method to ensure consistent and absolute paths. +# 6. Normalized paths in clear_autochildlogdir method to ensure consistent and absolute paths. +# 7. Normalized paths in openhttpservers method to ensure consistent and absolute paths. +# 8. Normalized directory paths in get_autochildlog_name method to ensure +# consistent and absolute paths. + +# List of modified methods: +# - process_config_file +# - process_groups_from_parser +# - parse_fcgi_socket +# - processes_from_section +# - _try_unlink +# - clear_autochildlogdir +# - openhttpservers +# - get_autochildlog_name + import socket import getopt import os @@ -55,19 +76,25 @@ from supervisor import xmlrpc from supervisor import poller + def _read_version_txt(): mydir = os.path.abspath(os.path.dirname(__file__)) - version_txt = os.path.join(mydir, 'version.txt') - with open(version_txt, 'r') as f: + version_txt = os.path.join(mydir, "version.txt") + with open(version_txt, "r") as f: return f.read().strip() + + VERSION = _read_version_txt() + def normalize_path(v): return os.path.normpath(os.path.abspath(os.path.expanduser(v))) + class Dummy: pass + class Options: stderr = sys.stderr stdout = sys.stdout @@ -109,18 +136,19 @@ def __init__(self, require_configfile=True): self.parse_infos = [] here = os.path.dirname(os.path.dirname(sys.argv[0])) - searchpaths = [os.path.join(here, 'etc', 'supervisord.conf'), - os.path.join(here, 'supervisord.conf'), - 'supervisord.conf', - 'etc/supervisord.conf', - '/etc/supervisord.conf', - '/etc/supervisor/supervisord.conf', - ] + searchpaths = [ + os.path.join(here, "etc", "supervisord.conf"), + os.path.join(here, "supervisord.conf"), + "supervisord.conf", + "etc/supervisord.conf", + "/etc/supervisord.conf", + "/etc/supervisor/supervisord.conf", + ] self.searchpaths = searchpaths self.environ_expansions = {} for k, v in os.environ.items(): - self.environ_expansions['ENV_%s' % k] = v + self.environ_expansions["ENV_%s" % k] = v def default_configfile(self): """Return the name of the found config file or print usage/exit.""" @@ -130,9 +158,11 @@ def default_configfile(self): config = path break if config is None and self.require_configfile: - self.usage('No config file found at default paths (%s); ' - 'use the -c option to specify a config file ' - 'at a different path' % ', '.join(self.searchpaths)) + self.usage( + "No config file found at default paths (%s); " + "use the -c option to specify a config file " + "at a different path" % ", ".join(self.searchpaths) + ) return config def help(self, dummy): @@ -152,17 +182,18 @@ def usage(self, msg): self.stderr.write("For help, use %s -h\n" % self.progname) self.exit(2) - def add(self, - name=None, # attribute name on self - confname=None, # dotted config path name - short=None, # short option name - long=None, # long option name - handler=None, # handler (defaults to string) - default=None, # default value - required=None, # message if not provided - flag=None, # if not None, flag value - env=None, # if not None, environment variable - ): + def add( + self, + name=None, # attribute name on self + confname=None, # dotted config path name + short=None, # short option name + long=None, # long option name + handler=None, # handler (defaults to string) + default=None, # default value + required=None, # message if not provided + flag=None, # if not None, flag value + env=None, # if not None, environment variable + ): """Add information about a configuration option. This can take several forms: @@ -193,12 +224,15 @@ def add(self, raise ValueError("flag= requires a command line flag") if long and long.endswith("="): raise ValueError("flag= requires a command line flag") - handler = lambda arg, flag=flag: flag + + def handler(arg, flag=flag): + return flag if short and long: if short.endswith(":") != long.endswith("="): - raise ValueError("inconsistent short/long options: %r %r" % ( - short, long)) + raise ValueError( + "inconsistent short/long options: %r %r" % (short, long) + ) if short: if short[0] == "-": @@ -260,6 +294,7 @@ def realize(self, args=None, doc=None, progname=None): if doc is None: try: import __main__ + doc = __main__.__doc__ except Exception: pass @@ -272,7 +307,8 @@ def realize(self, args=None, doc=None, progname=None): # Call getopt try: self.options, self.args = getopt.getopt( - args, "".join(self.short_options), self.long_options) + args, "".join(self.short_options), self.long_options + ) except getopt.error as exc: self.usage(str(exc)) @@ -302,8 +338,10 @@ def realize(self, args=None, doc=None, progname=None): try: value = handler(value) except ValueError as msg: - self.usage("invalid environment value for %s %r: %s" - % (envvar, value, msg)) + self.usage( + "invalid environment value for %s %r: %s" + % (envvar, value, msg) + ) if name and value is not None: self._set(name, value, 1) @@ -312,15 +350,16 @@ def realize(self, args=None, doc=None, progname=None): self.process_config() + def process_config(self, do_usage=True): """Process configuration data structure. - This includes reading config file if necessary, setting defaults etc. + This includes reading the config file if necessary, setting defaults, etc. """ if self.configfile: self.process_config_file(do_usage) - # Copy config options to attributes of self. This only fills + # Copy config options to attributes of self. This only fills # in options that aren't already set from the command line. for name, confname in self.names_list: if confname: @@ -329,7 +368,6 @@ def process_config(self, do_usage=True): for part in parts: if obj is None: break - # Here AttributeError is not a user error! obj = getattr(obj, part) self._set(name, obj, 0) @@ -343,10 +381,57 @@ def process_config(self, do_usage=True): if getattr(self, name) is None: self.usage(message) - def process_config_file(self, do_usage): + # Normalize paths + try: + self.here = normalize_path(self.here) + except AttributeError: + pass + + try: + self.configfile = normalize_path(self.configfile) + except AttributeError: + pass + +# def process_config(self, do_usage=True): +# """Process configuration data structure. +# +# This includes reading config file if necessary, setting defaults etc. +# """ +# if self.configfile: +# self.process_config_file(do_usage) +# +# # Copy config options to attributes of self. This only fills +# # in options that aren't already set from the command line. +# for name, confname in self.names_list: +# if confname: +# parts = confname.split(".") +# obj = self.configroot +# for part in parts: +# if obj is None: +# break +# # Here AttributeError is not a user error! +# obj = getattr(obj, part) +# self._set(name, obj, 0) +# +# # Process defaults +# for name, value in self.default_map.items(): +# if getattr(self, name) is None: +# setattr(self, name, value) +# +# # Process required options +# for name, message in self.required_map.items(): +# if getattr(self, name) is None: +# self.usage(message) +# + # Modified 'process_config_file' method correctly replaces the + # os.path.abspath(os.path.dirname(self.configfile)) function with + # normalize_path(os.path.dirname(self.configfile)). + def process_config_file(self, do_usage=True): # Process config file - if not hasattr(self.configfile, 'read'): - self.here = os.path.abspath(os.path.dirname(self.configfile)) + if not hasattr(self.configfile, "read"): + self.here = normalize_path( + os.path.dirname(self.configfile) + ) # Use normalize_path here try: self.read_config(self.configfile) except ValueError as msg: @@ -356,11 +441,28 @@ def process_config_file(self, do_usage): else: # if this is called from an RPC method, raise an error raise ValueError(msg) + if hasattr(self, 'minfds') and hasattr(self, 'minprocs'): + print(f"Debug: minfds = {self.minfds}") + print(f"Debug: minprocs = {self.minprocs}") + # Previous version + # def process_config_file(self, do_usage): + # # Process config file + # if not hasattr(self.configfile, 'read'): + # self.here = os.path.abspath(os.path.dirname(self.configfile)) + # try: + # self.read_config(self.configfile) + # except ValueError as msg: + # if do_usage: + # # if this is not called from an RPC method, run usage and exit. + # self.usage(str(msg)) + # else: + # # if this is called from an RPC method, raise an error + # raise ValueError(msg) def exists(self, path): return os.path.exists(path) - def open(self, fn, mode='r'): + def open(self, fn, mode="r"): return open(fn, mode) def get_plugins(self, parser, factory_key, section_prefix): @@ -370,16 +472,18 @@ def get_plugins(self, parser, factory_key, section_prefix): if not section.startswith(section_prefix): continue - name = section.split(':', 1)[1] + name = section.split(":", 1)[1] factory_spec = parser.saneget(section, factory_key, None) if factory_spec is None: - raise ValueError('section [%s] does not specify a %s' % - (section, factory_key)) + raise ValueError( + "section [%s] does not specify a %s" % (section, factory_key) + ) try: factory = self.import_spec(factory_spec) except (AttributeError, ImportError): - raise ValueError('%s cannot be resolved within [%s]' % ( - factory_spec, section)) + raise ValueError( + "%s cannot be resolved within [%s]" % (factory_spec, section) + ) extras = {} for k in parser.options(section): @@ -394,36 +498,37 @@ def import_spec(self, spec): return import_spec(spec) def read_include_config(self, fp, parser, expansions): - if parser.has_section('include'): + if parser.has_section("include"): parser.expand_here(self.here) - if not parser.has_option('include', 'files'): - raise ValueError(".ini file has [include] section, but no " - "files setting") - files = parser.get('include', 'files') - files = expand(files, expansions, 'include.files') + if not parser.has_option("include", "files"): + raise ValueError( + ".ini file has [include] section, but no " "files setting" + ) + files = parser.get("include", "files") + files = expand(files, expansions, "include.files") files = files.split() - if hasattr(fp, 'name'): + if hasattr(fp, "name"): base = os.path.dirname(os.path.abspath(fp.name)) else: - base = '.' + base = "." for pattern in files: pattern = os.path.join(base, pattern) filenames = glob.glob(pattern) if not filenames: self.parse_warnings.append( - 'No file matches via include "%s"' % pattern) + 'No file matches via include "%s"' % pattern + ) continue for filename in sorted(filenames): self.parse_infos.append( - 'Included extra file "%s" during parsing' % filename) + 'Included extra file "%s" during parsing' % filename + ) try: parser.read(filename) except ConfigParser.ParsingError as why: raise ValueError(str(why)) else: - parser.expand_here( - os.path.abspath(os.path.dirname(filename)) - ) + parser.expand_here(os.path.abspath(os.path.dirname(filename))) def _log_parsing_messages(self, logger): for msg in self.parse_criticals: @@ -433,6 +538,7 @@ def _log_parsing_messages(self, logger): for msg in self.parse_infos: logger.info(msg) + class ServerOptions(Options): user = None sockchown = None @@ -454,49 +560,95 @@ def __init__(self): self.configroot.supervisord = Dummy() self.add(None, None, "v", "version", self.version) - self.add("nodaemon", "supervisord.nodaemon", "n", "nodaemon", flag=1, - default=0) + self.add("nodaemon", "supervisord.nodaemon", "n", "nodaemon", flag=1, default=0) self.add("user", "supervisord.user", "u:", "user=") - self.add("umask", "supervisord.umask", "m:", "umask=", - octal_type, default='022') - self.add("directory", "supervisord.directory", "d:", "directory=", - existing_directory) - self.add("logfile", "supervisord.logfile", "l:", "logfile=", - existing_dirpath, default="supervisord.log") - self.add("logfile_maxbytes", "supervisord.logfile_maxbytes", - "y:", "logfile_maxbytes=", byte_size, - default=50 * 1024 * 1024) # 50MB - self.add("logfile_backups", "supervisord.logfile_backups", - "z:", "logfile_backups=", integer, default=10) - self.add("loglevel", "supervisord.loglevel", "e:", "loglevel=", - logging_level, default="info") - self.add("pidfile", "supervisord.pidfile", "j:", "pidfile=", - existing_dirpath, default="supervisord.pid") - self.add("identifier", "supervisord.identifier", "i:", "identifier=", - str, default="supervisor") - self.add("childlogdir", "supervisord.childlogdir", "q:", "childlogdir=", - existing_directory, default=tempfile.gettempdir()) - self.add("minfds", "supervisord.minfds", - "a:", "minfds=", int, default=1024) - self.add("minprocs", "supervisord.minprocs", - "", "minprocs=", int, default=200) - self.add("nocleanup", "supervisord.nocleanup", - "k", "nocleanup", flag=1, default=0) - self.add("strip_ansi", "supervisord.strip_ansi", - "t", "strip_ansi", flag=1, default=0) - self.add("profile_options", "supervisord.profile_options", - "", "profile_options=", profile_options, default=None) - self.add("silent", "supervisord.silent", - "s", "silent", flag=1, default=0) + self.add( + "umask", "supervisord.umask", "m:", "umask=", octal_type, default="022" + ) + self.add( + "directory", "supervisord.directory", "d:", "directory=", existing_directory + ) + self.add( + "logfile", + "supervisord.logfile", + "l:", + "logfile=", + existing_dirpath, + default="supervisord.log", + ) + self.add( + "logfile_maxbytes", + "supervisord.logfile_maxbytes", + "y:", + "logfile_maxbytes=", + byte_size, + default=50 * 1024 * 1024, + ) # 50MB + self.add( + "logfile_backups", + "supervisord.logfile_backups", + "z:", + "logfile_backups=", + integer, + default=10, + ) + self.add( + "loglevel", + "supervisord.loglevel", + "e:", + "loglevel=", + logging_level, + default="info", + ) + self.add( + "pidfile", + "supervisord.pidfile", + "j:", + "pidfile=", + existing_dirpath, + default="supervisord.pid", + ) + self.add( + "identifier", + "supervisord.identifier", + "i:", + "identifier=", + str, + default="supervisor", + ) + self.add( + "childlogdir", + "supervisord.childlogdir", + "q:", + "childlogdir=", + existing_directory, + default=tempfile.gettempdir(), + ) + self.add("minfds", "supervisord.minfds", "a:", "minfds=", int, default=1024) + self.add("minprocs", "supervisord.minprocs", "", "minprocs=", int, default=200) + self.add( + "nocleanup", "supervisord.nocleanup", "k", "nocleanup", flag=1, default=0 + ) + self.add( + "strip_ansi", "supervisord.strip_ansi", "t", "strip_ansi", flag=1, default=0 + ) + self.add( + "profile_options", + "supervisord.profile_options", + "", + "profile_options=", + profile_options, + default=None, + ) + self.add("silent", "supervisord.silent", "s", "silent", flag=1, default=0) self.pidhistory = {} self.process_group_configs = [] self.signal_receiver = SignalReceiver() self.poller = poller.Poller(self) def version(self, dummy): - """Print version to stdout and exit(0). - """ - self.stdout.write('%s\n' % VERSION) + """Print version to stdout and exit(0).""" + self.stdout.write("%s\n" % VERSION) self.exit(0) # TODO: not covered by any test, but used by dispatchers @@ -506,27 +658,39 @@ def getLogger(self, *args, **kwargs): def default_configfile(self): if os.getuid() == 0: self.warnings.warn( - 'Supervisord is running as root and it is searching ' - 'for its configuration file in default locations ' - '(including its current working directory); you ' + "Supervisord is running as root and it is searching " + "for its configuration file in default locations " + "(including its current working directory); you " 'probably want to specify a "-c" argument specifying an ' - 'absolute path to a configuration file for improved ' - 'security.' - ) + "absolute path to a configuration file for improved " + "security." + ) return Options.default_configfile(self) + # Modification in 'realize' method correctly replaces the + # os.path.abspath(os.path.dirname(self.configfile)) with + # normalize_path(os.path.dirname(self.configfile)). This change ensures + # that the self.here path is properly normalized. def realize(self, *arg, **kw): Options.realize(self, *arg, **kw) - section = self.configroot.supervisord + if not self.args: + self.interactive = 1 + + format = "%(levelname)s: %(message)s\n" + logger = loggers.getLogger() + loggers.handle_stdout(logger, format) + self._log_parsing_messages(logger) # Additional checking of user option; set uid and gid if self.user is not None: try: uid = name_to_uid(self.user) except ValueError as msg: - self.usage(msg) # invalid user - self.uid = uid - self.gid = gid_for_uid(uid) + self.usage(msg) # invalid user + self.uid = uid + self.gid = gid_for_uid(uid) + + section = self.configroot.supervisord if not self.loglevel: self.loglevel = section.loglevel @@ -536,7 +700,7 @@ def realize(self, *arg, **kw): else: logfile = section.logfile - if logfile != 'syslog': + if logfile != "syslog": # if the value for logfile is "syslog", we don't want to # normalize the path to something like $CWD/syslog.log, but # instead use the syslog service. @@ -558,30 +722,138 @@ def realize(self, *arg, **kw): # we need to set a fallback serverurl that process.spawn can use # prefer a unix domain socket - for config in [ config for config in sconfigs if - config['family'] is socket.AF_UNIX ]: - path = config['file'] - self.serverurl = 'unix://%s' % path + for config in [config for config in sconfigs if config["family"] is socket.AF_UNIX]: + path = config["file"] + # Use normalize_path here + self.serverurl = "unix://%s" % normalize_path(path) break # fall back to an inet socket if self.serverurl is None: - for config in [ config for config in sconfigs if - config['family'] is socket.AF_INET]: - host = config['host'] - port = config['port'] + for config in [ + config for config in sconfigs if config["family"] is socket.AF_INET + ]: + host = config["host"] + port = config["port"] if not host: - host = 'localhost' - self.serverurl = 'http://%s:%s' % (host, port) + host = "localhost" + self.serverurl = "http://%s:%s" % (host, port) + + # #Previous version of the function + # def realize(self, *arg, **kw): + # Options.realize(self, *arg, **kw) + # section = self.configroot.supervisord + # + # # Additional checking of user option; set uid and gid + # if self.user is not None: + # try: + # uid = name_to_uid(self.user) + # except ValueError as msg: + # self.usage(msg) # invalid user + # self.uid = uid + # self.gid = gid_for_uid(uid) + # + # if not self.loglevel: + # self.loglevel = section.loglevel + # + # if self.logfile: + # logfile = self.logfile + # else: + # logfile = section.logfile + # + # if logfile != 'syslog': + # # if the value for logfile is "syslog", we don't want to + # # normalize the path to something like $CWD/syslog.log, but + # # instead use the syslog service. + # self.logfile = normalize_path(logfile) + # + # if self.pidfile: + # pidfile = self.pidfile + # else: + # pidfile = section.pidfile + # + # self.pidfile = normalize_path(pidfile) + # + # self.rpcinterface_factories = section.rpcinterface_factories + # + # self.serverurl = None + # + # self.server_configs = sconfigs = section.server_configs + # + # # we need to set a fallback serverurl that process.spawn can use + # + # # prefer a unix domain socket + # for config in [ config for config in sconfigs if + # config['family'] is socket.AF_UNIX ]: + # path = config['file'] + # self.serverurl = 'unix://%s' % path + # break + # + # # fall back to an inet socket + # if self.serverurl is None: + # for config in [ config for config in sconfigs if + # config['family'] is socket.AF_INET]: + # host = config['host'] + # port = config['port'] + # if not host: + # host = 'localhost' + # self.serverurl = 'http://%s:%s' % (host, port) + + # self.serverurl may still be None if no servers at all are + # configured in the config file + + +# def process_config(self, do_usage=True): +# self.process_config_file(do_usage=do_usage) +# new = self.configroot.supervisord.process_group_configs +# self.process_group_configs = new - # self.serverurl may still be None if no servers at all are - # configured in the config file def process_config(self, do_usage=True): - Options.process_config(self, do_usage=do_usage) + """Process configuration data structure. - new = self.configroot.supervisord.process_group_configs - self.process_group_configs = new + This includes reading config file if necessary, setting defaults etc. + """ + if self.configfile: + self.process_config_file(do_usage) + + # Copy config options to attributes of self. This only fills + # in options that aren't already set from the command line. + for name, confname in self.names_list: + if confname: + parts = confname.split(".") + obj = self.configroot + for part in parts: + if obj is None: + break + obj = getattr(obj, part) + self._set(name, obj, 0) + + # Process defaults + for name, value in self.default_map.items(): + if getattr(self, name) is None: + setattr(self, name, value) + + # Process required options + for name, message in self.required_map.items(): + if getattr(self, name) is None: + self.usage(message) + + print(f"Before setting defaults - minfds: {self.minfds}, minprocs: {self.minprocs}") + + if self.minfds is None: + self.minfds = 1024 + if self.minprocs is None: + self.minprocs = 200 + + print(f"After setting defaults - minfds: {self.minfds}, minprocs: {self.minprocs}") + + + + # Modification in the 'read_config' method correctly replaces the + # os.path.abspath(os.path.dirname(self.configfile)) with + # normalize_path(os.path.dirname(self.configfile)). This change ensures + # that the self.here path is properly normalized. def read_config(self, fp): # Clear parse messages, since we may be re-reading the @@ -592,11 +864,11 @@ def read_config(self, fp): section = self.configroot.supervisord need_close = False - if not hasattr(fp, 'read'): + if not hasattr(fp, "read"): if not self.exists(fp): raise ValueError("could not find config file %s" % fp) try: - fp = self.open(fp, 'r') + fp = self.open(fp, "r") need_close = True except (IOError, OSError): raise ValueError("could not read config file %s" % fp) @@ -604,10 +876,9 @@ def read_config(self, fp): parser = UnhosedConfigParser() parser.expansions = self.environ_expansions try: - try: - parser.read_file(fp) - except AttributeError: - parser.readfp(fp) + parser.read_file(fp) + except AttributeError: + parser.readfp(fp) except ConfigParser.ParsingError as why: raise ValueError(str(why)) finally: @@ -615,63 +886,67 @@ def read_config(self, fp): fp.close() host_node_name = platform.node() - expansions = {'here':self.here, - 'host_node_name':host_node_name} + expansions = {"here": self.here, "host_node_name": host_node_name} expansions.update(self.environ_expansions) self.read_include_config(fp, parser, expansions) sections = parser.sections() - if not 'supervisord' in sections: - raise ValueError('.ini file does not include supervisord section') + if "supervisord" not in sections: + raise ValueError(".ini file does not include supervisord section") + + common_expansions = {"here": self.here} - common_expansions = {'here':self.here} def get(opt, default, **kwargs): - expansions = kwargs.get('expansions', {}) + expansions = kwargs.get("expansions", {}) expansions.update(common_expansions) - kwargs['expansions'] = expansions + kwargs["expansions"] = expansions return parser.getdefault(opt, default, **kwargs) - section.minfds = integer(get('minfds', 1024)) - section.minprocs = integer(get('minprocs', 200)) + section.minfds = integer(get("minfds", 1024)) + section.minprocs = integer(get("minprocs", 200)) - directory = get('directory', None) + directory = get("directory", None) if directory is None: section.directory = None else: - section.directory = existing_directory(directory) - - section.user = get('user', None) - section.umask = octal_type(get('umask', '022')) - section.logfile = existing_dirpath(get('logfile', 'supervisord.log')) - section.logfile_maxbytes = byte_size(get('logfile_maxbytes', '50MB')) - section.logfile_backups = integer(get('logfile_backups', 10)) - section.loglevel = logging_level(get('loglevel', 'info')) - section.pidfile = existing_dirpath(get('pidfile', 'supervisord.pid')) - section.identifier = get('identifier', 'supervisor') - section.nodaemon = boolean(get('nodaemon', 'false')) - section.silent = boolean(get('silent', 'false')) + section.directory = existing_directory(normalize_path(directory)) + + section.user = get("user", None) + section.umask = octal_type(get("umask", "022")) + section.logfile = existing_dirpath( + normalize_path(get("logfile", "supervisord.log")) + ) + section.logfile_maxbytes = byte_size(get("logfile_maxbytes", "50MB")) + section.logfile_backups = integer(get("logfile_backups", 10)) + section.loglevel = logging_level(get("loglevel", "info")) + section.pidfile = existing_dirpath( + normalize_path(get("pidfile", "supervisord.pid")) + ) + section.identifier = get("identifier", "supervisor") + section.nodaemon = boolean(get("nodaemon", "false")) + section.silent = boolean(get("silent", "false")) tempdir = tempfile.gettempdir() - section.childlogdir = existing_directory(get('childlogdir', tempdir)) - section.nocleanup = boolean(get('nocleanup', 'false')) - section.strip_ansi = boolean(get('strip_ansi', 'false')) + section.childlogdir = existing_directory( + normalize_path(get("childlogdir", tempdir)) + ) + section.nocleanup = boolean(get("nocleanup", "false")) + section.strip_ansi = boolean(get("strip_ansi", "false")) - environ_str = get('environment', '') - environ_str = expand(environ_str, expansions, 'environment') + environ_str = get("environment", "") + environ_str = expand(environ_str, expansions, "environment") section.environment = dict_of_key_value_pairs(environ_str) # extend expansions for global from [supervisord] environment definition for k, v in section.environment.items(): - self.environ_expansions['ENV_%s' % k ] = v + self.environ_expansions["ENV_%s" % k] = v # Process rpcinterface plugins before groups to allow custom events to # be registered. section.rpcinterface_factories = self.get_plugins( - parser, - 'supervisor.rpcinterface_factory', - 'rpcinterface:' - ) + parser, "supervisor.rpcinterface_factory", "rpcinterface:" + ) section.process_group_configs = self.process_groups_from_parser(parser) for group in section.process_group_configs: for proc in group.process_configs: @@ -682,183 +957,399 @@ def get(opt, default, **kwargs): section.profile_options = None return section + + + +# def read_config(self, fp): +# # Clear parse messages, since we may be re-reading the +# # config a second time after a reload. +# self.parse_criticals = [] +# self.parse_warnings = [] +# self.parse_infos = [] +# +# section = self.configroot.supervisord +# need_close = False +# if not hasattr(fp, "read"): +# if not self.exists(fp): +# raise ValueError("could not find config file %s" % fp) +# try: +# fp = self.open(fp, "r") +# need_close = True +# except (IOError, OSError): +# raise ValueError("could not read config file %s" % fp) +# +# parser = UnhosedConfigParser() +# parser.expansions = self.environ_expansions +# try: +# try: +# parser.read_file(fp) +# except AttributeError: +# parser.readfp(fp) +# except ConfigParser.ParsingError as why: +# raise ValueError(str(why)) +# finally: +# if need_close: +# fp.close() +# +# host_node_name = platform.node() +# expansions = {"here": self.here, "host_node_name": host_node_name} +# expansions.update(self.environ_expansions) +# +# self.read_include_config(fp, parser, expansions) +# +# sections = parser.sections() +# if "supervisord" not in sections: +# raise ValueError(".ini file does not include supervisord section") +# +# common_expansions = {"here": self.here} +# +# def get(opt, default, **kwargs): +# expansions = kwargs.get("expansions", {}) +# expansions.update(common_expansions) +# kwargs["expansions"] = expansions +# return parser.getdefault(opt, default, **kwargs) +# +# section.minfds = integer(get("minfds", 1024)) +# section.minprocs = integer(get("minprocs", 200)) +# +# directory = get("directory", None) +# if directory is None: +# section.directory = None +# else: +# section.directory = existing_directory(normalize_path(directory)) +# +# section.user = get("user", None) +# section.umask = octal_type(get("umask", "022")) +# section.logfile = existing_dirpath( +# normalize_path(get("logfile", "supervisord.log")) +# ) +# section.logfile_maxbytes = byte_size(get("logfile_maxbytes", "50MB")) +# section.logfile_backups = integer(get("logfile_backups", 10)) +# section.loglevel = logging_level(get("loglevel", "info")) +# section.pidfile = existing_dirpath( +# normalize_path(get("pidfile", "supervisord.pid")) +# ) +# section.identifier = get("identifier", "supervisor") +# section.nodaemon = boolean(get("nodaemon", "false")) +# section.silent = boolean(get("silent", "false")) +# +# tempdir = tempfile.gettempdir() +# section.childlogdir = existing_directory( +# normalize_path(get("childlogdir", tempdir)) +# ) +# section.nocleanup = boolean(get("nocleanup", "false")) +# section.strip_ansi = boolean(get("strip_ansi", "false")) +# +# environ_str = get("environment", "") +# environ_str = expand(environ_str, expansions, "environment") +# section.environment = dict_of_key_value_pairs(environ_str) +# +# # extend expansions for global from [supervisord] environment definition +# for k, v in section.environment.items(): +# self.environ_expansions["ENV_%s" % k] = v +# +# # Process rpcinterface plugins before groups to allow custom events to +# # be registered. +# section.rpcinterface_factories = self.get_plugins( +# parser, "supervisor.rpcinterface_factory", "rpcinterface:" +# ) +# section.process_group_configs = self.process_groups_from_parser(parser) +# for group in section.process_group_configs: +# for proc in group.process_configs: +# env = section.environment.copy() +# env.update(proc.environment) +# proc.environment = env +# section.server_configs = self.server_configs_from_parser(parser) +# section.profile_options = None +# return section + + # def read_config(self, fp): + # # Clear parse messages, since we may be re-reading the + # # config a second time after a reload. + # self.parse_criticals = [] + # self.parse_warnings = [] + # self.parse_infos = [] + # + # section = self.configroot.supervisord + # need_close = False + # if not hasattr(fp, 'read'): + # if not self.exists(fp): + # raise ValueError("could not find config file %s" % fp) + # try: + # fp = self.open(fp, 'r') + # need_close = True + # except (IOError, OSError): + # raise ValueError("could not read config file %s" % fp) + # + # parser = UnhosedConfigParser() + # parser.expansions = self.environ_expansions + # try: + # try: + # parser.read_file(fp) + # except AttributeError: + # parser.readfp(fp) + # except ConfigParser.ParsingError as why: + # raise ValueError(str(why)) + # finally: + # if need_close: + # fp.close() + # + # host_node_name = platform.node() + # expansions = {'here':self.here, + # 'host_node_name':host_node_name} + # expansions.update(self.environ_expansions) + # + # self.read_include_config(fp, parser, expansions) + # + # sections = parser.sections() + # if not 'supervisord' in sections: + # raise ValueError('.ini file does not include supervisord section') + # + # common_expansions = {'here':self.here} + # def get(opt, default, **kwargs): + # expansions = kwargs.get('expansions', {}) + # expansions.update(common_expansions) + # kwargs['expansions'] = expansions + # return parser.getdefault(opt, default, **kwargs) + # + # section.minfds = integer(get('minfds', 1024)) + # section.minprocs = integer(get('minprocs', 200)) + # + # directory = get('directory', None) + # if directory is None: + # section.directory = None + # else: + # section.directory = existing_directory(directory) + # + # section.user = get('user', None) + # section.umask = octal_type(get('umask', '022')) + # section.logfile = existing_dirpath(get('logfile', 'supervisord.log')) + # section.logfile_maxbytes = byte_size(get('logfile_maxbytes', '50MB')) + # section.logfile_backups = integer(get('logfile_backups', 10)) + # section.loglevel = logging_level(get('loglevel', 'info')) + # section.pidfile = existing_dirpath(get('pidfile', 'supervisord.pid')) + # section.identifier = get('identifier', 'supervisor') + # section.nodaemon = boolean(get('nodaemon', 'false')) + # section.silent = boolean(get('silent', 'false')) + # + # tempdir = tempfile.gettempdir() + # section.childlogdir = existing_directory(get('childlogdir', tempdir)) + # section.nocleanup = boolean(get('nocleanup', 'false')) + # section.strip_ansi = boolean(get('strip_ansi', 'false')) + # + # environ_str = get('environment', '') + # environ_str = expand(environ_str, expansions, 'environment') + # section.environment = dict_of_key_value_pairs(environ_str) + # + # # extend expansions for global from [supervisord] environment definition + # for k, v in section.environment.items(): + # self.environ_expansions['ENV_%s' % k ] = v + # + # # Process rpcinterface plugins before groups to allow custom events to + # # be registered. + # section.rpcinterface_factories = self.get_plugins( + # parser, + # 'supervisor.rpcinterface_factory', + # 'rpcinterface:' + # ) + # section.process_group_configs = self.process_groups_from_parser(parser) + # for group in section.process_group_configs: + # for proc in group.process_configs: + # env = section.environment.copy() + # env.update(proc.environment) + # proc.environment = env + # section.server_configs = self.server_configs_from_parser(parser) + # section.profile_options = None + # return section + def process_groups_from_parser(self, parser): groups = [] all_sections = parser.sections() homogeneous_exclude = [] - common_expansions = {'here':self.here} + common_expansions = {"here": self.here} + def get(section, opt, default, **kwargs): - expansions = kwargs.get('expansions', {}) + expansions = kwargs.get("expansions", {}) expansions.update(common_expansions) - kwargs['expansions'] = expansions + kwargs["expansions"] = expansions return parser.saneget(section, opt, default, **kwargs) # process heterogeneous groups for section in all_sections: - if not section.startswith('group:'): + if not section.startswith("group:"): continue - group_name = process_or_group_name(section.split(':', 1)[1]) - programs = list_of_strings(get(section, 'programs', None)) - priority = integer(get(section, 'priority', 999)) + group_name = process_or_group_name(section.split(":", 1)[1]) + programs = list_of_strings(get(section, "programs", None)) + priority = integer(get(section, "priority", 999)) group_processes = [] for program in programs: program_section = "program:%s" % program fcgi_section = "fcgi-program:%s" % program - if not program_section in all_sections and not fcgi_section in all_sections: + if program_section not in all_sections and fcgi_section not in all_sections: raise ValueError( - '[%s] names unknown program or fcgi-program %s' % (section, program)) + "[%s] names unknown program or fcgi-program %s" % (section, program) + ) if program_section in all_sections and fcgi_section in all_sections: - raise ValueError( - '[%s] name %s is ambiguous (exists as program and fcgi-program)' % - (section, program)) - section = program_section if program_section in all_sections else fcgi_section + raise ValueError( + "[%s] name %s is ambiguous (exists as program and fcgi-program)" + % (section, program) + ) + section = ( + program_section if program_section in all_sections else fcgi_section + ) homogeneous_exclude.append(section) - processes = self.processes_from_section(parser, section, - group_name, ProcessConfig) + processes = self._processes_from_section( + parser, section, group_name, ProcessConfig + ) group_processes.extend(processes) - groups.append( - ProcessGroupConfig(self, group_name, priority, group_processes) - ) + groups.append(ProcessGroupConfig(self, group_name, priority, group_processes)) # process "normal" homogeneous groups for section in all_sections: - if ( (not section.startswith('program:') ) - or section in homogeneous_exclude ): + if not section.startswith("program:") or section in homogeneous_exclude: continue - program_name = process_or_group_name(section.split(':', 1)[1]) - priority = integer(get(section, 'priority', 999)) - processes=self.processes_from_section(parser, section, program_name, - ProcessConfig) - groups.append( - ProcessGroupConfig(self, program_name, priority, processes) - ) + program_name = process_or_group_name(section.split(":", 1)[1]) + priority = integer(get(section, "priority", 999)) + processes = self._processes_from_section( + parser, section, program_name, ProcessConfig + ) + groups.append(ProcessGroupConfig(self, program_name, priority, processes)) # process "event listener" homogeneous groups for section in all_sections: - if not section.startswith('eventlistener:'): + if not section.startswith("eventlistener:"): continue - pool_name = section.split(':', 1)[1] + pool_name = section.split(":", 1)[1] # give listeners a "high" default priority so they are started first # and stopped last at mainloop exit - priority = integer(get(section, 'priority', -1)) + priority = integer(get(section, "priority", -1)) - buffer_size = integer(get(section, 'buffer_size', 10)) + buffer_size = integer(get(section, "buffer_size", 10)) if buffer_size < 1: - raise ValueError('[%s] section sets invalid buffer_size (%d)' % - (section, buffer_size)) + raise ValueError( + "[%s] section sets invalid buffer_size (%d)" % (section, buffer_size) + ) - result_handler = get(section, 'result_handler', - 'supervisor.dispatchers:default_handler') + result_handler = get( + section, "result_handler", "supervisor.dispatchers:default_handler" + ) try: result_handler = self.import_spec(result_handler) except (AttributeError, ImportError): - raise ValueError('%s cannot be resolved within [%s]' % ( - result_handler, section)) + raise ValueError( + "%s cannot be resolved within [%s]" % (result_handler, section) + ) - pool_event_names = [x.upper() for x in - list_of_strings(get(section, 'events', ''))] + pool_event_names = [ + x.upper() for x in list_of_strings(get(section, "events", "")) + ] pool_event_names = set(pool_event_names) if not pool_event_names: - raise ValueError('[%s] section requires an "events" line' % - section) + raise ValueError('[%s] section requires an "events" line' % section) from supervisor.events import EventTypes + pool_events = [] for pool_event_name in pool_event_names: pool_event = getattr(EventTypes, pool_event_name, None) if pool_event is None: - raise ValueError('Unknown event type %s in [%s] events' % - (pool_event_name, section)) + raise ValueError( + "Unknown event type %s in [%s] events" % (pool_event_name, section) + ) pool_events.append(pool_event) - redirect_stderr = boolean(get(section, 'redirect_stderr', 'false')) + redirect_stderr = boolean(get(section, "redirect_stderr", "false")) if redirect_stderr: - raise ValueError('[%s] section sets redirect_stderr=true ' - 'but this is not allowed because it will interfere ' - 'with the eventlistener protocol' % section) + raise ValueError( + "[%s] section sets redirect_stderr=true " + "but this is not allowed because it will interfere " + "with the eventlistener protocol" % section + ) - processes=self.processes_from_section(parser, section, pool_name, - EventListenerConfig) + processes = self._processes_from_section( + parser, section, pool_name, EventListenerConfig + ) groups.append( - EventListenerPoolConfig(self, pool_name, priority, processes, - buffer_size, pool_events, - result_handler) + EventListenerPoolConfig( + self, + pool_name, + priority, + processes, + buffer_size, + pool_events, + result_handler, ) + ) # process fastcgi homogeneous groups for section in all_sections: - if ( (not section.startswith('fcgi-program:') ) - or section in homogeneous_exclude ): + if not section.startswith("fcgi-program:") or section in homogeneous_exclude: continue - program_name = process_or_group_name(section.split(':', 1)[1]) - priority = integer(get(section, 'priority', 999)) - fcgi_expansions = {'program_name': program_name} + program_name = process_or_group_name(section.split(":", 1)[1]) + priority = integer(get(section, "priority", 999)) + fcgi_expansions = {"program_name": program_name} # find proc_uid from "user" option - proc_user = get(section, 'user', None) + proc_user = get(section, "user", None) if proc_user is None: proc_uid = None else: proc_uid = name_to_uid(proc_user) - socket_backlog = get(section, 'socket_backlog', None) + socket_backlog = get(section, "socket_backlog", None) if socket_backlog is not None: socket_backlog = integer(socket_backlog) - if (socket_backlog < 1 or socket_backlog > 65535): - raise ValueError('Invalid socket_backlog value %s' - % socket_backlog) + if socket_backlog < 1 or socket_backlog > 65535: + raise ValueError("Invalid socket_backlog value %s" % socket_backlog) - socket_owner = get(section, 'socket_owner', None) + socket_owner = get(section, "socket_owner", None) if socket_owner is not None: try: socket_owner = colon_separated_user_group(socket_owner) except ValueError: - raise ValueError('Invalid socket_owner value %s' - % socket_owner) + raise ValueError("Invalid socket_owner value %s" % socket_owner) - socket_mode = get(section, 'socket_mode', None) + socket_mode = get(section, "socket_mode", None) if socket_mode is not None: try: socket_mode = octal_type(socket_mode) except (TypeError, ValueError): - raise ValueError('Invalid socket_mode value %s' - % socket_mode) + raise ValueError("Invalid socket_mode value %s" % socket_mode) - socket = get(section, 'socket', None, expansions=fcgi_expansions) + socket = get(section, "socket", None, expansions=fcgi_expansions) if not socket: - raise ValueError('[%s] section requires a "socket" line' % - section) + raise ValueError('[%s] section requires a "socket" line' % section) try: - socket_config = self.parse_fcgi_socket(socket, proc_uid, - socket_owner, socket_mode, - socket_backlog) + socket_config = self.parse_fcgi_socket( + socket, proc_uid, socket_owner, socket_mode, socket_backlog + ) except ValueError as e: - raise ValueError('%s in [%s] socket' % (str(e), section)) + raise ValueError("%s in [%s] socket" % (str(e), section)) - processes=self.processes_from_section(parser, section, program_name, - FastCGIProcessConfig) + processes = self._processes_from_section( + parser, section, program_name, FastCGIProcessConfig + ) groups.append( - FastCGIGroupConfig(self, program_name, priority, processes, - socket_config) - ) + FastCGIGroupConfig(self, program_name, priority, processes, socket_config) + ) groups.sort() return groups - def parse_fcgi_socket(self, sock, proc_uid, socket_owner, socket_mode, - socket_backlog): - if sock.startswith('unix://'): + def parse_fcgi_socket( + self, sock, proc_uid, socket_owner, socket_mode, socket_backlog + ): + if sock.startswith("unix://"): path = sock[7:] - #Check it's an absolute path + # Check it's an absolute path if not os.path.isabs(path): - raise ValueError("Unix socket path %s is not an absolute path", - path) + raise ValueError("Unix socket path %s is not an absolute path", path) path = normalize_path(path) if socket_owner is None: @@ -869,170 +1360,166 @@ def parse_fcgi_socket(self, sock, proc_uid, socket_owner, socket_mode, if socket_mode is None: socket_mode = 0o700 - return UnixStreamSocketConfig(path, owner=socket_owner, - mode=socket_mode, - backlog=socket_backlog) + return UnixStreamSocketConfig( + path, owner=socket_owner, mode=socket_mode, backlog=socket_backlog + ) if socket_owner is not None or socket_mode is not None: - raise ValueError("socket_owner and socket_mode params should" - + " only be used with a Unix domain socket") + raise ValueError( + "socket_owner and socket_mode params should" + + " only be used with a Unix domain socket" + ) - m = re.match(r'tcp://([^\s:]+):(\d+)$', sock) + m = re.match(r"tcp://([^\s:]+):(\d+)$", sock) if m: host = m.group(1) port = int(m.group(2)) - return InetStreamSocketConfig(host, port, - backlog=socket_backlog) + return InetStreamSocketConfig(host, port, backlog=socket_backlog) raise ValueError("Bad socket format %s", sock) - def processes_from_section(self, parser, section, group_name, - klass=None): - try: - return self._processes_from_section( - parser, section, group_name, klass) - except ValueError as e: - filename = parser.section_to_file.get(section, self.configfile) - raise ValueError('%s in section %r (file: %r)' - % (e, section, filename)) - - def _processes_from_section(self, parser, section, group_name, - klass=None): + def _processes_from_section(self, parser, section, group_name, klass=None): if klass is None: klass = ProcessConfig programs = [] - program_name = process_or_group_name(section.split(':', 1)[1]) + program_name = process_or_group_name(section.split(":", 1)[1]) host_node_name = platform.node() - common_expansions = {'here':self.here, - 'program_name':program_name, - 'host_node_name':host_node_name, - 'group_name':group_name} + common_expansions = { + "here": self.here, + "program_name": program_name, + "host_node_name": host_node_name, + "group_name": group_name, + } + def get(section, opt, *args, **kwargs): - expansions = kwargs.get('expansions', {}) + expansions = kwargs.get("expansions", {}) expansions.update(common_expansions) - kwargs['expansions'] = expansions + kwargs["expansions"] = expansions return parser.saneget(section, opt, *args, **kwargs) - priority = integer(get(section, 'priority', 999)) - autostart = boolean(get(section, 'autostart', 'true')) - autorestart = auto_restart(get(section, 'autorestart', 'unexpected')) - startsecs = integer(get(section, 'startsecs', 1)) - startretries = integer(get(section, 'startretries', 3)) - stopsignal = signal_number(get(section, 'stopsignal', 'TERM')) - stopwaitsecs = integer(get(section, 'stopwaitsecs', 10)) - stopasgroup = boolean(get(section, 'stopasgroup', 'false')) - killasgroup = boolean(get(section, 'killasgroup', stopasgroup)) - exitcodes = list_of_exitcodes(get(section, 'exitcodes', '0')) + priority = integer(get(section, "priority", 999)) + autostart = boolean(get(section, "autostart", "true")) + autorestart = auto_restart(get(section, "autorestart", "unexpected")) + startsecs = integer(get(section, "startsecs", 1)) + startretries = integer(get(section, "startretries", 3)) + stopsignal = signal_number(get(section, "stopsignal", "TERM")) + stopwaitsecs = integer(get(section, "stopwaitsecs", 10)) + stopasgroup = boolean(get(section, "stopasgroup", "false")) + killasgroup = boolean(get(section, "killasgroup", stopasgroup)) + exitcodes = list_of_exitcodes(get(section, "exitcodes", "0")) # see also redirect_stderr check in process_groups_from_parser() - redirect_stderr = boolean(get(section, 'redirect_stderr','false')) - numprocs = integer(get(section, 'numprocs', 1)) - numprocs_start = integer(get(section, 'numprocs_start', 0)) - environment_str = get(section, 'environment', '', do_expand=False) - stdout_cmaxbytes = byte_size(get(section,'stdout_capture_maxbytes','0')) - stdout_events = boolean(get(section, 'stdout_events_enabled','false')) - stderr_cmaxbytes = byte_size(get(section,'stderr_capture_maxbytes','0')) - stderr_events = boolean(get(section, 'stderr_events_enabled','false')) - serverurl = get(section, 'serverurl', None) - if serverurl and serverurl.strip().upper() == 'AUTO': + redirect_stderr = boolean(get(section, "redirect_stderr", "false")) + numprocs = integer(get(section, "numprocs", 1)) + numprocs_start = integer(get(section, "numprocs_start", 0)) + environment_str = get(section, "environment", "", do_expand=False) + stdout_cmaxbytes = byte_size(get(section, "stdout_capture_maxbytes", "0")) + stdout_events = boolean(get(section, "stdout_events_enabled", "false")) + stderr_cmaxbytes = byte_size(get(section, "stderr_capture_maxbytes", "0")) + stderr_events = boolean(get(section, "stderr_events_enabled", "false")) + serverurl = get(section, "serverurl", None) + if serverurl and serverurl.strip().upper() == "AUTO": serverurl = None # find uid from "user" option - user = get(section, 'user', None) + user = get(section, "user", None) if user is None: uid = None else: uid = name_to_uid(user) - umask = get(section, 'umask', None) + umask = get(section, "umask", None) if umask is not None: umask = octal_type(umask) process_name = process_or_group_name( - get(section, 'process_name', '%(program_name)s', do_expand=False)) + get(section, "process_name", "%(program_name)s", do_expand=False) + ) if numprocs > 1: - if not '%(process_num)' in process_name: + if "%(process_num)" not in process_name: # process_name needs to include process_num when we # represent a group of processes raise ValueError( - '%(process_num) must be present within process_name when ' - 'numprocs > 1') + "%(process_num) must be present within process_name when " + "numprocs > 1" + ) if stopasgroup and not killasgroup: - raise ValueError( - "Cannot set stopasgroup=true and killasgroup=false" - ) + raise ValueError("Cannot set stopasgroup=true and killasgroup=false") for process_num in range(numprocs_start, numprocs + numprocs_start): expansions = common_expansions - expansions.update({'process_num': process_num, 'numprocs': numprocs}) + expansions.update({"process_num": process_num, "numprocs": numprocs}) expansions.update(self.environ_expansions) environment = dict_of_key_value_pairs( - expand(environment_str, expansions, 'environment')) + expand(environment_str, expansions, "environment") + ) # extend expansions for process from [program:x] environment definition for k, v in environment.items(): - expansions['ENV_%s' % k] = v + expansions["ENV_%s" % k] = v - directory = get(section, 'directory', None) + directory = normalize_path(get(section, "directory", None)) logfiles = {} - for k in ('stdout', 'stderr'): - lf_key = '%s_logfile' % k + for k in ("stdout", "stderr"): + lf_key = "%s_logfile" % k lf_val = get(section, lf_key, Automatic) if isinstance(lf_val, basestring): lf_val = expand(lf_val, expansions, lf_key) - lf_val = logfile_name(lf_val) + lf_val = normalize_path(logfile_name(lf_val)) logfiles[lf_key] = lf_val - bu_key = '%s_logfile_backups' % k + bu_key = "%s_logfile_backups" % k backups = integer(get(section, bu_key, 10)) logfiles[bu_key] = backups - mb_key = '%s_logfile_maxbytes' % k - maxbytes = byte_size(get(section, mb_key, '50MB')) + mb_key = "%s_logfile_maxbytes" % k + maxbytes = byte_size(get(section, mb_key, "50MB")) logfiles[mb_key] = maxbytes - sy_key = '%s_syslog' % k + sy_key = "%s_syslog" % k syslog = boolean(get(section, sy_key, False)) logfiles[sy_key] = syslog # rewrite deprecated "syslog" magic logfile into the equivalent - # TODO remove this in a future version + # todo remove this in a future version if lf_val is Syslog: self.parse_warnings.append( - 'For [%s], %s=syslog but this is deprecated and will ' - 'be removed. Use %s=true to enable syslog instead.' % ( - section, lf_key, sy_key)) + "For [%s], %s=syslog but this is deprecated and will " + "be removed. Use %s=true to enable syslog instead." + % (section, lf_key, sy_key) + ) logfiles[lf_key] = lf_val = None logfiles[sy_key] = True if lf_val is Automatic and not maxbytes: self.parse_warnings.append( - 'For [%s], AUTO logging used for %s without ' - 'rollover, set maxbytes > 0 to avoid filling up ' - 'filesystem unintentionally' % (section, lf_key)) + "For [%s], AUTO logging used for %s without " + "rollover, set maxbytes > 0 to avoid filling up " + "filesystem unintentionally" % (section, lf_key) + ) if redirect_stderr: - if logfiles['stderr_logfile'] not in (Automatic, None): + if logfiles["stderr_logfile"] not in (Automatic, None): self.parse_warnings.append( - 'For [%s], redirect_stderr=true but stderr_logfile has ' - 'also been set to a filename, the filename has been ' - 'ignored' % section) + "For [%s], redirect_stderr=true but stderr_logfile has " + "also been set to a filename, the filename has been " + "ignored" % section + ) # never create an stderr logfile when redirected - logfiles['stderr_logfile'] = None - - command = get(section, 'command', None, expansions=expansions) + logfiles["stderr_logfile"] = None + + command = get(section, "command", None, expansions=expansions) if command is None: - raise ValueError( - 'program section %s does not specify a command' % section) + raise ValueError("program section %s does not specify a command" % section) pconfig = klass( self, - name=expand(process_name, expansions, 'process_name'), + name=expand(process_name, expansions, "process_name"), command=command, directory=directory, umask=umask, @@ -1042,18 +1529,18 @@ def get(section, opt, *args, **kwargs): startsecs=startsecs, startretries=startretries, uid=uid, - stdout_logfile=logfiles['stdout_logfile'], - stdout_capture_maxbytes = stdout_cmaxbytes, - stdout_events_enabled = stdout_events, - stdout_logfile_backups=logfiles['stdout_logfile_backups'], - stdout_logfile_maxbytes=logfiles['stdout_logfile_maxbytes'], - stdout_syslog=logfiles['stdout_syslog'], - stderr_logfile=logfiles['stderr_logfile'], - stderr_capture_maxbytes = stderr_cmaxbytes, - stderr_events_enabled = stderr_events, - stderr_logfile_backups=logfiles['stderr_logfile_backups'], - stderr_logfile_maxbytes=logfiles['stderr_logfile_maxbytes'], - stderr_syslog=logfiles['stderr_syslog'], + stdout_logfile=logfiles["stdout_logfile"], + stdout_capture_maxbytes=stdout_cmaxbytes, + stdout_events_enabled=stdout_events, + stdout_logfile_backups=logfiles["stdout_logfile_backups"], + stdout_logfile_maxbytes=logfiles["stdout_logfile_maxbytes"], + stdout_syslog=logfiles["stdout_syslog"], + stderr_logfile=logfiles["stderr_logfile"], + stderr_capture_maxbytes=stderr_cmaxbytes, + stderr_events_enabled=stderr_events, + stderr_logfile_backups=logfiles["stderr_logfile_backups"], + stderr_logfile_maxbytes=logfiles["stderr_logfile_maxbytes"], + stderr_syslog=logfiles["stderr_syslog"], stopsignal=stopsignal, stopwaitsecs=stopwaitsecs, stopasgroup=stopasgroup, @@ -1061,86 +1548,88 @@ def get(section, opt, *args, **kwargs): exitcodes=exitcodes, redirect_stderr=redirect_stderr, environment=environment, - serverurl=serverurl) + serverurl=serverurl, + ) programs.append(pconfig) - programs.sort() # asc by priority + programs.sort() # asc by priority return programs def _parse_servernames(self, parser, stype): options = [] for section in parser.sections(): if section.startswith(stype): - parts = section.split(':', 1) + parts = section.split(":", 1) if len(parts) > 1: name = parts[1] else: - name = None # default sentinel + name = None # default sentinel options.append((name, section)) return options def _parse_username_and_password(self, parser, section): get = parser.saneget - username = get(section, 'username', None) - password = get(section, 'password', None) + username = get(section, "username", None) + password = get(section, "password", None) if username is not None or password is not None: if username is None or password is None: raise ValueError( - 'Section [%s] contains incomplete authentication: ' - 'If a username or a password is specified, both the ' - 'username and password must be specified' % section) - return {'username':username, 'password':password} + "Section [%s] contains incomplete authentication: " + "If a username or a password is specified, both the " + "username and password must be specified" % section + ) + return {"username": username, "password": password} def server_configs_from_parser(self, parser): configs = [] - inet_serverdefs = self._parse_servernames(parser, 'inet_http_server') + inet_serverdefs = self._parse_servernames(parser, "inet_http_server") for name, section in inet_serverdefs: config = {} get = parser.saneget config.update(self._parse_username_and_password(parser, section)) - config['name'] = name - config['family'] = socket.AF_INET - port = get(section, 'port', None) + config["name"] = name + config["family"] = socket.AF_INET + port = get(section, "port", None) if port is None: - raise ValueError('section [%s] has no port value' % section) + raise ValueError("section [%s] has no port value" % section) host, port = inet_address(port) - config['host'] = host - config['port'] = port - config['section'] = section + config["host"] = host + config["port"] = port + config["section"] = section configs.append(config) - unix_serverdefs = self._parse_servernames(parser, 'unix_http_server') + unix_serverdefs = self._parse_servernames(parser, "unix_http_server") for name, section in unix_serverdefs: config = {} get = parser.saneget - sfile = get(section, 'file', None, expansions={'here': self.here}) + sfile = get(section, "file", None, expansions={"here": self.here}) if sfile is None: - raise ValueError('section [%s] has no file value' % section) + raise ValueError("section [%s] has no file value" % section) sfile = sfile.strip() - config['name'] = name - config['family'] = socket.AF_UNIX - config['file'] = normalize_path(sfile) + config["name"] = name + config["family"] = socket.AF_UNIX + config["file"] = normalize_path(sfile) config.update(self._parse_username_and_password(parser, section)) - chown = get(section, 'chown', None) + chown = get(section, "chown", None) if chown is not None: try: chown = colon_separated_user_group(chown) except ValueError: - raise ValueError('Invalid sockchown value %s' % chown) + raise ValueError("Invalid sockchown value %s" % chown) else: chown = (-1, -1) - config['chown'] = chown - chmod = get(section, 'chmod', None) + config["chown"] = chown + chmod = get(section, "chmod", None) if chmod is not None: try: chmod = octal_type(chmod) except (TypeError, ValueError): - raise ValueError('Invalid chmod value %s' % chmod) + raise ValueError("Invalid chmod value %s" % chmod) else: chmod = 0o700 - config['chmod'] = chmod - config['section'] = section + config["chmod"] = chmod + config["section"] = section configs.append(config) return configs @@ -1150,6 +1639,7 @@ def daemonize(self): self._daemonize() self.poller.after_daemonize() + def _daemonize(self): # To daemonize, we need to become the leader of our own session # (process) group. If we do not, signals sent to our @@ -1181,13 +1671,12 @@ def _daemonize(self): self.logger.info("daemonizing the supervisord process") if self.directory: try: - os.chdir(self.directory) + # Normalize the directory path + os.chdir(normalize_path(self.directory)) except OSError as err: - self.logger.critical("can't chdir into %r: %s" - % (self.directory, err)) + self.logger.critical("can't chdir into %r: %s" % (self.directory, err)) else: - self.logger.info("set current directory: %r" - % self.directory) + self.logger.info("set current directory: %r" % self.directory) os.close(0) self.stdin = sys.stdin = sys.__stdin__ = open("/dev/null") os.close(1) @@ -1205,27 +1694,31 @@ def _daemonize(self): def write_pidfile(self): pid = os.getpid() try: - with open(self.pidfile, 'w') as f: - f.write('%s\n' % pid) + with open( + normalize_path(self.pidfile), "w" + ) as f: # Normalize the pidfile path + f.write("%s\n" % pid) except (IOError, OSError): - self.logger.critical('could not write pidfile %s' % self.pidfile) + self.logger.critical("could not write pidfile %s" % self.pidfile) else: self.unlink_pidfile = True - self.logger.info('supervisord started with pid %s' % pid) + self.logger.info("supervisord started with pid %s" % pid) def cleanup(self): for config, server in self.httpservers: - if config['family'] == socket.AF_UNIX: + if config["family"] == socket.AF_UNIX: if self.unlink_socketfiles: - socketname = config['file'] - self._try_unlink(socketname) + socketname = config["file"] + # Normalize the socket file path + self._try_unlink(normalize_path(socketname)) if self.unlink_pidfile: - self._try_unlink(self.pidfile) + # Normalize the pidfile path + self._try_unlink(normalize_path(self.pidfile)) self.poller.close() def _try_unlink(self, path): try: - os.unlink(path) + os.unlink(normalize_path(path)) # Normalize the path except OSError: pass @@ -1235,7 +1728,7 @@ def close_httpservers(self): server.close() # server._map is a reference to the asyncore socket_map for dispatcher in self.get_socket_map().values(): - dispatcher_server = getattr(dispatcher, 'server', None) + dispatcher_server = getattr(dispatcher, "server", None) if dispatcher_server is server: dispatcher_servers.append(dispatcher) for server in dispatcher_servers: @@ -1249,7 +1742,8 @@ def close_httpservers(self): # descriptor to be closed, but it will still remain in # the socket_map, and eventually its file descriptor # will be passed to # select(), which will bomb. See - # also https://web.archive.org/web/20160729222427/http://www.plope.com/software/collector/253 + # also + # https://web.archive.org/web/20160729222427/http://www.plope.com/software/collector/253 server.close() def close_logger(self): @@ -1273,46 +1767,51 @@ def openhttpservers(self, supervisord): self.unlink_socketfiles = True except socket.error as why: if why.args[0] == errno.EADDRINUSE: - self.usage('Another program is already listening on ' - 'a port that one of our HTTP servers is ' - 'configured to use. Shut this program ' - 'down first before starting supervisord.') + self.usage( + "Another program is already listening on " + "a port that one of our HTTP servers is " + "configured to use. Shut this program " + "down first before starting supervisord." + ) else: - help = 'Cannot open an HTTP server: socket.error reported' + help = "Cannot open an HTTP server: socket.error reported" errorname = errno.errorcode.get(why.args[0]) if errorname is None: - self.usage('%s %s' % (help, why.args[0])) + self.usage("%s %s" % (help, why.args[0])) else: - self.usage('%s errno.%s (%d)' % - (help, errorname, why.args[0])) + self.usage("%s errno.%s (%d)" % (help, errorname, why.args[0])) except ValueError as why: self.usage(why.args[0]) def get_autochildlog_name(self, name, identifier, channel): - prefix='%s-%s---%s-' % (name, channel, identifier) + prefix = "%s-%s---%s-" % (name, channel, identifier) logfile = self.mktempfile( - suffix='.log', - prefix=prefix, - dir=self.childlogdir) + suffix=".log", prefix=prefix, dir=normalize_path(self.childlogdir) + ) # Normalize the directory path return logfile def clear_autochildlogdir(self): # must be called after realize() - childlogdir = self.childlogdir - fnre = re.compile(r'.+?---%s-\S+\.log\.{0,1}\d{0,4}' % self.identifier) + if self.childlogdir is None: + self.logger.warn("childlogdir is not set. Cannot clear child log directory.") + return + + childlogdir = normalize_path(self.childlogdir) # Normalize childlogdir + fnre = re.compile(r".+?---%s-\S+\.log\.{0,1}\d{0,4}" % self.identifier) try: filenames = os.listdir(childlogdir) + except (IOError, OSError): - self.logger.warn('Could not clear childlog dir') + self.logger.warn("Could not clear childlog dir") return for filename in filenames: if fnre.match(filename): pathname = os.path.join(childlogdir, filename) try: - self.remove(pathname) + self.remove(normalize_path(pathname)) # Normalize the pathname except (OSError, IOError): - self.logger.warn('Failed to clean up %r' % pathname) + self.logger.warn("Failed to clean up %r" % pathname) def get_socket_map(self): return asyncore.socket_map @@ -1341,11 +1840,11 @@ def waitpid(self): code = exc.args[0] if code not in (errno.ECHILD, errno.EINTR): self.logger.critical( - 'waitpid error %r; ' - 'a process may not be cleaned up properly' % code - ) + "waitpid error %r; " + "a process may not be cleaned up properly" % code + ) if code == errno.EINTR: - self.logger.blather('EINTR during reap') + self.logger.blather("EINTR during reap") pid, sts = None, None return pid, sts @@ -1385,10 +1884,9 @@ def drop_privileges(self, user): return "Can't drop privilege as nonroot user" gid = pwrec[3] - if hasattr(os, 'setgroups'): + if hasattr(os, "setgroups"): user = pwrec[0] - groups = [grprec[2] for grprec in grp.getgrall() if user in - grprec[3]] + groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]] # always put our primary gid first in this list, otherwise we can # lose group info since sometimes the first group in the setgroups @@ -1399,11 +1897,11 @@ def drop_privileges(self, user): try: os.setgroups(groups) except OSError: - return 'Could not set groups of effective user' + return "Could not set groups of effective user" try: os.setgid(gid) except OSError: - return 'Could not set group id of effective user' + return "Could not set group id of effective user" os.setuid(uid) def set_uid_or_exit(self): @@ -1412,16 +1910,17 @@ def set_uid_or_exit(self): privileges could not be dropped.""" if self.uid is None: if os.getuid() == 0: - self.parse_criticals.append('Supervisor is running as root. ' - 'Privileges were not dropped because no user is ' - 'specified in the config file. If you intend to run ' - 'as root, you can set user=root in the config file ' - 'to avoid this message.') + self.parse_criticals.append( + "Supervisor is running as root. " + "Privileges were not dropped because no user is " + "specified in the config file. If you intend to run " + "as root, you can set user=root in the config file " + "to avoid this message." + ) else: msg = self.drop_privileges(self.uid) if msg is None: - self.parse_infos.append('Set uid to user %s succeeded' % - self.uid) + self.parse_infos.append("Set uid to user %s succeeded" % self.uid) else: # failed to drop privileges self.usage(msg) @@ -1430,49 +1929,65 @@ def set_rlimits_or_exit(self): supervisord startup only. No return value. Exits the process via usage() if any rlimits could not be set.""" limits = [] - if hasattr(resource, 'RLIMIT_NOFILE'): + if hasattr(self, 'minfds') and hasattr(self, 'minprocs'): + print(f"Debug: minfds = {self.minfds}") + print(f"Debug: minprocs = {self.minprocs}") + + if hasattr(resource, "RLIMIT_NOFILE"): + if self.minfds is None: + self.usage("The 'minfds' parameter is not set in the configuration file.") + return limits.append( { - 'msg':('The minimum number of file descriptors required ' - 'to run this process is %(min_limit)s as per the "minfds" ' - 'command-line argument or config file setting. ' - 'The current environment will only allow you ' - 'to open %(hard)s file descriptors. Either raise ' - 'the number of usable file descriptors in your ' - 'environment (see README.rst) or lower the ' - 'minfds setting in the config file to allow ' - 'the process to start.'), - 'min':self.minfds, - 'resource':resource.RLIMIT_NOFILE, - 'name':'RLIMIT_NOFILE', - }) - if hasattr(resource, 'RLIMIT_NPROC'): + "msg": ( + "The minimum number of file descriptors required " + 'to run this process is %(min_limit)s as per the "minfds" ' + "command-line argument or config file setting. " + "The current environment will only allow you " + "to open %(hard)s file descriptors. Either raise " + "the number of usable file descriptors in your " + "environment (see README.rst) or lower the " + "minfds setting in the config file to allow " + "the process to start." + ), + "min": self.minfds, # if self.minfds is not None else 1024, + "resource": resource.RLIMIT_NOFILE, + "name": "RLIMIT_NOFILE", + } + ) + if hasattr(resource, "RLIMIT_NPROC"): limits.append( { - 'msg':('The minimum number of available processes required ' - 'to run this program is %(min_limit)s as per the "minprocs" ' - 'command-line argument or config file setting. ' - 'The current environment will only allow you ' - 'to open %(hard)s processes. Either raise ' - 'the number of usable processes in your ' - 'environment (see README.rst) or lower the ' - 'minprocs setting in the config file to allow ' - 'the program to start.'), - 'min':self.minprocs, - 'resource':resource.RLIMIT_NPROC, - 'name':'RLIMIT_NPROC', - }) + "msg": ( + "The minimum number of available processes required " + 'to run this program is %(min_limit)s as per the "minprocs" ' + "command-line argument or config file setting. " + "The current environment will only allow you " + "to open %(hard)s processes. Either raise " + "the number of usable processes in your " + "environment (see README.rst) or lower the " + "minprocs setting in the config file to allow " + "the program to start." + ), + "min": self.minprocs, # if self.minprocs is not None else 200, + "resource": resource.RLIMIT_NPROC, + "name": "RLIMIT_NPROC", + } + ) for limit in limits: - min_limit = limit['min'] - res = limit['resource'] - msg = limit['msg'] - name = limit['name'] - name = name # name is used below by locals() + min_limit = limit["min"] + res = limit["resource"] + msg = limit["msg"] + name = limit["name"] + name = name # name is used below by locals() soft, hard = resource.getrlimit(res) + + if soft is None or min_limit is None: + self.usage(msg % locals()) - if (soft < min_limit) and (soft != -1): # -1 means unlimited + if (soft < min_limit) and (soft != -1): # -1 means unlimited if (hard < min_limit) and (hard != -1): # setrlimit should increase the hard limit if we are # root, if not then setrlimit raises and we print usage @@ -1480,14 +1995,15 @@ def set_rlimits_or_exit(self): try: resource.setrlimit(res, (min_limit, hard)) - self.parse_infos.append('Increased %(name)s limit to ' - '%(min_limit)s' % locals()) + self.parse_infos.append( + "Increased %(name)s limit to " "%(min_limit)s" % locals() + ) except (resource.error, ValueError): self.usage(msg % locals()) - + def make_logger(self): # must be called after realize() and after supervisor does setuid() - format = '%(asctime)s %(levelname)s %(message)s\n' + format = "%(asctime)s %(levelname)s %(message)s\n" self.logger = loggers.getLogger(self.loglevel) if self.nodaemon and not self.silent: loggers.handle_stdout(self.logger, format) @@ -1503,6 +2019,7 @@ def make_logger(self): def make_http_servers(self, supervisord): from supervisor.http import make_http_servers + return make_http_servers(self, supervisord) def close_fd(self, fd): @@ -1573,44 +2090,46 @@ def check_execv_args(self, filename, argv, st): raise NoPermission("no permission to run command %r" % filename) def reopenlogs(self): - self.logger.info('supervisord logreopen') + self.logger.info("supervisord logreopen") for handler in self.logger.handlers: - if hasattr(handler, 'reopen'): + if hasattr(handler, "reopen"): handler.reopen() def readfd(self, fd): try: - data = os.read(fd, 2 << 16) # 128K + data = os.read(fd, 2 << 16) # 128K except OSError as why: if why.args[0] not in (errno.EWOULDBLOCK, errno.EBADF, errno.EINTR): raise - data = b'' + data = b"" return data def chdir(self, dir): os.chdir(dir) def make_pipes(self, stderr=True): - """ Create pipes for parent to child stdin/stdout/stderr + """Create pipes for parent to child stdin/stdout/stderr communications. Open fd in non-blocking mode so we can read them in the mainloop without blocking. If stderr is False, don't - create a pipe for stderr. """ - - pipes = {'child_stdin':None, - 'stdin':None, - 'stdout':None, - 'child_stdout':None, - 'stderr':None, - 'child_stderr':None} + create a pipe for stderr.""" + + pipes = { + "child_stdin": None, + "stdin": None, + "stdout": None, + "child_stdout": None, + "stderr": None, + "child_stderr": None, + } try: stdin, child_stdin = os.pipe() - pipes['child_stdin'], pipes['stdin'] = stdin, child_stdin + pipes["child_stdin"], pipes["stdin"] = stdin, child_stdin stdout, child_stdout = os.pipe() - pipes['stdout'], pipes['child_stdout'] = stdout, child_stdout + pipes["stdout"], pipes["child_stdout"] = stdout, child_stdout if stderr: stderr, child_stderr = os.pipe() - pipes['stderr'], pipes['child_stderr'] = stderr, child_stderr - for fd in (pipes['stdout'], pipes['stderr'], pipes['stdin']): + pipes["stderr"], pipes["child_stderr"] = stderr, child_stderr + for fd in (pipes["stdout"], pipes["stderr"], pipes["stdin"]): if fd is not None: flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NDELAY fcntl.fcntl(fd, fcntl.F_SETFL, flags) @@ -1622,17 +2141,18 @@ def make_pipes(self, stderr=True): raise def close_parent_pipes(self, pipes): - for fdname in ('stdin', 'stdout', 'stderr'): + for fdname in ("stdin", "stdout", "stderr"): fd = pipes.get(fdname) if fd is not None: self.close_fd(fd) def close_child_pipes(self, pipes): - for fdname in ('child_stdin', 'child_stdout', 'child_stderr'): + for fdname in ("child_stdin", "child_stdout", "child_stderr"): fd = pipes.get(fdname) if fd is not None: self.close_fd(fd) + class ClientOptions(Options): positional_args_allowed = 1 @@ -1648,23 +2168,36 @@ def __init__(self): self.configroot = Dummy() self.configroot.supervisorctl = Dummy() self.configroot.supervisorctl.interactive = None - self.configroot.supervisorctl.prompt = 'supervisor' + self.configroot.supervisorctl.prompt = "supervisor" self.configroot.supervisorctl.serverurl = None self.configroot.supervisorctl.username = None self.configroot.supervisorctl.password = None self.configroot.supervisorctl.history_file = None from supervisor.supervisorctl import DefaultControllerPlugin - default_factory = ('default', DefaultControllerPlugin, {}) + + default_factory = ("default", DefaultControllerPlugin, {}) # we always add the default factory. If you want to a supervisorctl # without the default plugin, please write your own supervisorctl. self.plugin_factories = [default_factory] - self.add("interactive", "supervisorctl.interactive", "i", - "interactive", flag=1, default=0) + self.add( + "interactive", + "supervisorctl.interactive", + "i", + "interactive", + flag=1, + default=0, + ) self.add("prompt", "supervisorctl.prompt", default="supervisor") - self.add("serverurl", "supervisorctl.serverurl", "s:", "serverurl=", - url, default="http://localhost:9001") + self.add( + "serverurl", + "supervisorctl.serverurl", + "s:", + "serverurl=", + url, + default="http://localhost:9001", + ) self.add("username", "supervisorctl.username", "u:", "username=") self.add("password", "supervisorctl.password", "p:", "password=") self.add("history", "supervisorctl.history_file", "r:", "history_file=") @@ -1674,7 +2207,7 @@ def realize(self, *arg, **kw): if not self.args: self.interactive = 1 - format = '%(levelname)s: %(message)s\n' + format = "%(levelname)s: %(message)s\n" logger = loggers.getLogger() loggers.handle_stdout(logger, format) self._log_parsing_messages(logger) @@ -1682,19 +2215,19 @@ def realize(self, *arg, **kw): def read_config(self, fp): section = self.configroot.supervisorctl need_close = False - if not hasattr(fp, 'read'): + if not hasattr(fp, "read"): self.here = os.path.dirname(normalize_path(fp)) if not self.exists(fp): raise ValueError("could not find config file %s" % fp) try: - fp = self.open(fp, 'r') + fp = self.open(fp, "r") need_close = True except (IOError, OSError): raise ValueError("could not read config file %s" % fp) parser = UnhosedConfigParser() parser.expansions = self.environ_expansions - parser.mysection = 'supervisorctl' + parser.mysection = "supervisorctl" try: parser.read_file(fp) except AttributeError: @@ -1705,22 +2238,24 @@ def read_config(self, fp): self.read_include_config(fp, parser, parser.expansions) sections = parser.sections() - if not 'supervisorctl' in sections: - raise ValueError('.ini file does not include supervisorctl section') - serverurl = parser.getdefault('serverurl', 'http://localhost:9001', - expansions={'here': self.here}) - if serverurl.startswith('unix://'): + if "supervisorctl" not in sections: + raise ValueError(".ini file does not include supervisorctl section") + serverurl = parser.getdefault( + "serverurl", "http://localhost:9001", expansions={"here": self.here} + ) + if serverurl.startswith("unix://"): path = normalize_path(serverurl[7:]) - serverurl = 'unix://%s' % path + serverurl = "unix://%s" % path section.serverurl = serverurl # The defaults used below are really set in __init__ (since # section==self.configroot.supervisorctl) - section.prompt = parser.getdefault('prompt', section.prompt) - section.username = parser.getdefault('username', section.username) - section.password = parser.getdefault('password', section.password) - history_file = parser.getdefault('history_file', section.history_file, - expansions={'here': self.here}) + section.prompt = parser.getdefault("prompt", section.prompt) + section.username = parser.getdefault("username", section.username) + section.password = parser.getdefault("password", section.password) + history_file = parser.getdefault( + "history_file", section.history_file, expansions={"here": self.here} + ) if history_file: history_file = normalize_path(history_file) @@ -1731,10 +2266,8 @@ def read_config(self, fp): self.history_file = None self.plugin_factories += self.get_plugins( - parser, - 'supervisor.ctl_factory', - 'ctlplugin:' - ) + parser, "supervisor.ctl_factory", "ctlplugin:" + ) return section @@ -1744,48 +2277,51 @@ def getServerProxy(self): # dumbass ServerProxy won't allow us to pass in a non-HTTP url, # so we fake the url we pass into it and always use the transport's # 'serverurl' to figure out what to attach to - 'http://127.0.0.1', - transport = xmlrpc.SupervisorTransport(self.username, - self.password, - self.serverurl) - ) + "http://127.0.0.1", + transport=xmlrpc.SupervisorTransport( + self.username, self.password, self.serverurl + ), + ) + _marker = [] + class UnhosedConfigParser(ConfigParser.RawConfigParser): - mysection = 'supervisord' + mysection = "supervisord" def __init__(self, *args, **kwargs): # inline_comment_prefixes and strict were added in Python 3 but their # defaults make RawConfigParser behave differently than it did on # Python 2. We make it work like 2 by default for backwards compat. if not PY2: - if 'inline_comment_prefixes' not in kwargs: - kwargs['inline_comment_prefixes'] = (';', '#') + if "inline_comment_prefixes" not in kwargs: + kwargs["inline_comment_prefixes"] = (";", "#") - if 'strict' not in kwargs: - kwargs['strict'] = False + if "strict" not in kwargs: + kwargs["strict"] = False ConfigParser.RawConfigParser.__init__(self, *args, **kwargs) self.section_to_file = {} self.expansions = {} - def read_string(self, string, source=''): - '''Parse configuration data from a string. This is intended - to be used in tests only. We add this method for Py 2/3 compat.''' + def read_string(self, string, source=""): + """Parse configuration data from a string. This is intended + to be used in tests only. We add this method for Py 2/3 compat.""" try: return ConfigParser.RawConfigParser.read_string( - self, string, source) # Python 3.2 or later + self, string, source + ) # Python 3.2 or later except AttributeError: return self.readfp(StringIO(string)) def read(self, filenames, **kwargs): - '''Attempt to read and parse a list of filenames, returning a list + """Attempt to read and parse a list of filenames, returning a list of filenames which were successfully parsed. This is a method of RawConfigParser that is overridden to build self.section_to_file, which is a mapping of section names to the files they came from. - ''' + """ if isinstance(filenames, basestring): # RawConfigParser compat filenames = [filenames] @@ -1794,15 +2330,15 @@ def read(self, filenames, **kwargs): sections_orig = self._sections.copy() ok_filenames.extend( - ConfigParser.RawConfigParser.read(self, [filename], **kwargs)) + ConfigParser.RawConfigParser.read(self, [filename], **kwargs) + ) diff = frozenset(self._sections) - frozenset(sections_orig) for section in diff: self.section_to_file[section] = filename return ok_filenames - def saneget(self, section, option, default=_marker, do_expand=True, - expansions={}): + def saneget(self, section, option, default=_marker, do_expand=True, expansions={}): try: optval = self.get(section, option) except ConfigParser.NoOptionError: @@ -1813,19 +2349,20 @@ def saneget(self, section, option, default=_marker, do_expand=True, if do_expand and isinstance(optval, basestring): combined_expansions = dict( - list(self.expansions.items()) + list(expansions.items())) + list(self.expansions.items()) + list(expansions.items()) + ) - optval = expand(optval, combined_expansions, - "%s.%s" % (section, option)) + optval = expand(optval, combined_expansions, "%s.%s" % (section, option)) return optval def getdefault(self, option, default=_marker, expansions={}, **kwargs): - return self.saneget(self.mysection, option, default=default, - expansions=expansions, **kwargs) + return self.saneget( + self.mysection, option, default=default, expansions=expansions, **kwargs + ) def expand_here(self, here): - HERE_FORMAT = '%(here)s' + HERE_FORMAT = "%(here)s" for section in self.sections(): for key, value in self.items(section): if HERE_FORMAT in value: @@ -1863,22 +2400,41 @@ def __ge__(self, other): return self.priority >= other.priority def __repr__(self): - return '<%s instance at %s named %s>' % (self.__class__, id(self), - self.name) + return "<%s instance at %s named %s>" % (self.__class__, id(self), self.name) + class ProcessConfig(Config): req_param_names = [ - 'name', 'uid', 'command', 'directory', 'umask', 'priority', - 'autostart', 'autorestart', 'startsecs', 'startretries', - 'stdout_logfile', 'stdout_capture_maxbytes', - 'stdout_events_enabled', 'stdout_syslog', - 'stdout_logfile_backups', 'stdout_logfile_maxbytes', - 'stderr_logfile', 'stderr_capture_maxbytes', - 'stderr_logfile_backups', 'stderr_logfile_maxbytes', - 'stderr_events_enabled', 'stderr_syslog', - 'stopsignal', 'stopwaitsecs', 'stopasgroup', 'killasgroup', - 'exitcodes', 'redirect_stderr' ] - optional_param_names = [ 'environment', 'serverurl' ] + "name", + "uid", + "command", + "directory", + "umask", + "priority", + "autostart", + "autorestart", + "startsecs", + "startretries", + "stdout_logfile", + "stdout_capture_maxbytes", + "stdout_events_enabled", + "stdout_syslog", + "stdout_logfile_backups", + "stdout_logfile_maxbytes", + "stderr_logfile", + "stderr_capture_maxbytes", + "stderr_logfile_backups", + "stderr_logfile_maxbytes", + "stderr_events_enabled", + "stderr_syslog", + "stopsignal", + "stopwaitsecs", + "stopasgroup", + "killasgroup", + "exitcodes", + "redirect_stderr", + ] + optional_param_names = ["environment", "serverurl"] def __init__(self, options, **params): self.options = options @@ -1892,7 +2448,7 @@ def __eq__(self, other): return False for name in self.req_param_names + self.optional_param_names: - if Automatic in [getattr(self, name), getattr(other, name)] : + if Automatic in [getattr(self, name), getattr(other, name)]: continue if getattr(self, name) != getattr(other, name): return False @@ -1900,10 +2456,10 @@ def __eq__(self, other): return True def get_path(self): - '''Return a list corresponding to $PATH that is configured to be set - in the process environment, or the system default.''' + """Return a list corresponding to $PATH that is configured to be set + in the process environment, or the system default.""" if self.environment is not None: - path = self.environment.get('PATH') + path = self.environment.get("PATH") if path is not None: return path.split(os.pathsep) return self.options.get_path() @@ -1914,12 +2470,13 @@ def create_autochildlogs(self): sid = self.options.identifier name = self.name if self.stdout_logfile is Automatic: - self.stdout_logfile = get_autoname(name, sid, 'stdout') + self.stdout_logfile = get_autoname(name, sid, "stdout") if self.stderr_logfile is Automatic: - self.stderr_logfile = get_autoname(name, sid, 'stderr') + self.stderr_logfile = get_autoname(name, sid, "stderr") def make_process(self, group=None): from supervisor.process import Subprocess + process = Subprocess(self) process.group = group return process @@ -1927,63 +2484,68 @@ def make_process(self, group=None): def make_dispatchers(self, proc): use_stderr = not self.redirect_stderr p = self.options.make_pipes(use_stderr) - stdout_fd,stderr_fd,stdin_fd = p['stdout'],p['stderr'],p['stdin'] + stdout_fd, stderr_fd, stdin_fd = p["stdout"], p["stderr"], p["stdin"] dispatchers = {} from supervisor.dispatchers import POutputDispatcher from supervisor.dispatchers import PInputDispatcher from supervisor import events + if stdout_fd is not None: etype = events.ProcessCommunicationStdoutEvent dispatchers[stdout_fd] = POutputDispatcher(proc, etype, stdout_fd) if stderr_fd is not None: etype = events.ProcessCommunicationStderrEvent - dispatchers[stderr_fd] = POutputDispatcher(proc,etype, stderr_fd) + dispatchers[stderr_fd] = POutputDispatcher(proc, etype, stderr_fd) if stdin_fd is not None: - dispatchers[stdin_fd] = PInputDispatcher(proc, 'stdin', stdin_fd) + dispatchers[stdin_fd] = PInputDispatcher(proc, "stdin", stdin_fd) return dispatchers, p + class EventListenerConfig(ProcessConfig): def make_dispatchers(self, proc): # always use_stderr=True for eventlisteners because mixing stderr # messages into stdout would break the eventlistener protocol use_stderr = True p = self.options.make_pipes(use_stderr) - stdout_fd,stderr_fd,stdin_fd = p['stdout'],p['stderr'],p['stdin'] + stdout_fd, stderr_fd, stdin_fd = p["stdout"], p["stderr"], p["stdin"] dispatchers = {} from supervisor.dispatchers import PEventListenerDispatcher from supervisor.dispatchers import PInputDispatcher from supervisor.dispatchers import POutputDispatcher from supervisor import events + if stdout_fd is not None: - dispatchers[stdout_fd] = PEventListenerDispatcher(proc, 'stdout', - stdout_fd) + dispatchers[stdout_fd] = PEventListenerDispatcher(proc, "stdout", stdout_fd) if stderr_fd is not None: etype = events.ProcessCommunicationStderrEvent dispatchers[stderr_fd] = POutputDispatcher(proc, etype, stderr_fd) if stdin_fd is not None: - dispatchers[stdin_fd] = PInputDispatcher(proc, 'stdin', stdin_fd) + dispatchers[stdin_fd] = PInputDispatcher(proc, "stdin", stdin_fd) return dispatchers, p + class FastCGIProcessConfig(ProcessConfig): def make_process(self, group=None): if group is None: - raise NotImplementedError('FastCGI programs require a group') + raise NotImplementedError("FastCGI programs require a group") from supervisor.process import FastCGISubprocess + process = FastCGISubprocess(self) process.group = group return process def make_dispatchers(self, proc): dispatchers, p = ProcessConfig.make_dispatchers(self, proc) - #FastCGI child processes expect the FastCGI socket set to - #file descriptor 0, so supervisord cannot use stdin - #to communicate with the child process - stdin_fd = p['stdin'] + # FastCGI child processes expect the FastCGI socket set to + # file descriptor 0, so supervisord cannot use stdin + # to communicate with the child process + stdin_fd = p["stdin"] if stdin_fd is not None: dispatchers[stdin_fd].close() return dispatchers, p + class ProcessGroupConfig(Config): def __init__(self, options, name, priority, process_configs): self.options = options @@ -2010,11 +2572,21 @@ def after_setuid(self): def make_group(self): from supervisor.process import ProcessGroup + return ProcessGroup(self) + class EventListenerPoolConfig(Config): - def __init__(self, options, name, priority, process_configs, buffer_size, - pool_events, result_handler): + def __init__( + self, + options, + name, + priority, + process_configs, + buffer_size, + pool_events, + result_handler, + ): self.options = options self.name = name self.priority = priority @@ -2027,12 +2599,14 @@ def __eq__(self, other): if not isinstance(other, EventListenerPoolConfig): return False - if ((self.name == other.name) and - (self.priority == other.priority) and - (self.process_configs == other.process_configs) and - (self.buffer_size == other.buffer_size) and - (self.pool_events == other.pool_events) and - (self.result_handler == other.result_handler)): + if ( + (self.name == other.name) + and (self.priority == other.priority) + and (self.process_configs == other.process_configs) + and (self.buffer_size == other.buffer_size) + and (self.pool_events == other.pool_events) + and (self.result_handler == other.result_handler) + ): return True return False @@ -2043,8 +2617,10 @@ def after_setuid(self): def make_group(self): from supervisor.process import EventListenerPool + return EventListenerPool(self) + class FastCGIGroupConfig(ProcessGroupConfig): def __init__(self, options, name, priority, process_configs, socket_config): ProcessGroupConfig.__init__( @@ -2053,7 +2629,7 @@ def __init__(self, options, name, priority, process_configs, socket_config): name, priority, process_configs, - ) + ) self.socket_config = socket_config def __eq__(self, other): @@ -2067,21 +2643,23 @@ def __eq__(self, other): def make_group(self): from supervisor.process import FastCGIProcessGroup + return FastCGIProcessGroup(self) + def readFile(filename, offset, length): - """ Read length bytes from the file named by filename starting at - offset """ + """Read length bytes from the file named by filename starting at + offset""" absoffset = abs(offset) abslength = abs(length) try: - with open(filename, 'rb') as f: + with open(filename, "rb") as f: if absoffset != offset: # negative offset returns offset bytes from tail of the file if length: - raise ValueError('BAD_ARGUMENTS') + raise ValueError("BAD_ARGUMENTS") f.seek(0, 2) sz = f.tell() pos = int(sz - absoffset) @@ -2091,7 +2669,7 @@ def readFile(filename, offset, length): data = f.read(absoffset) else: if abslength != length: - raise ValueError('BAD_ARGUMENTS') + raise ValueError("BAD_ARGUMENTS") if length == 0: f.seek(offset) data = f.read() @@ -2099,10 +2677,11 @@ def readFile(filename, offset, length): f.seek(offset) data = f.read(length) except (OSError, IOError): - raise ValueError('FAILED') + raise ValueError("FAILED") return data + def tailFile(filename, offset, length): """ Read length bytes from the file named by filename starting at @@ -2112,7 +2691,7 @@ def tailFile(filename, offset, length): """ try: - with open(filename, 'rb') as f: + with open(filename, "rb") as f: overflow = False f.seek(0, 2) sz = f.tell() @@ -2132,7 +2711,7 @@ def tailFile(filename, offset, length): length = 0 if length == 0: - data = b'' + data = b"" else: f.seek(offset) data = f.read(length) @@ -2140,10 +2719,12 @@ def tailFile(filename, offset, length): offset = sz return [as_string(data), offset, overflow] except (OSError, IOError): - return ['', offset, False] + return ["", offset, False] + # Helpers for dealing with signals and exit status + def decode_wait_status(sts): """Decode the status returned by wait() or waitpid(). @@ -2153,7 +2734,7 @@ def decode_wait_status(sts): responsibility to display the message. """ if os.WIFEXITED(sts): - es = os.WEXITSTATUS(sts) & 0xffff + es = os.WEXITSTATUS(sts) & 0xFFFF msg = "exit status %s" % es return es, msg elif os.WIFSIGNALED(sts): @@ -2170,8 +2751,10 @@ def decode_wait_status(sts): msg = "unknown termination cause 0x%04x" % sts return -1, msg + _signames = None + def signame(sig): """Return a symbolic name for a signal. @@ -2183,6 +2766,7 @@ def signame(sig): _init_signames() return _signames.get(sig) or "signal %d" % sig + def _init_signames(): global _signames d = {} @@ -2194,6 +2778,7 @@ def _init_signames(): d[v] = k _signames = d + class SignalReceiver: def __init__(self): self._signals_recvd = [] @@ -2209,8 +2794,10 @@ def get_signal(self): sig = None return sig + # miscellaneous utility functions + def expand(s, expansions, name): try: return s % expansions @@ -2218,15 +2805,15 @@ def expand(s, expansions, name): available = list(expansions.keys()) available.sort() raise ValueError( - 'Format string %r for %r contains names (%s) which cannot be ' - 'expanded. Available names: %s' % - (s, name, str(ex), ", ".join(available))) + "Format string %r for %r contains names (%s) which cannot be " + "expanded. Available names: %s" % (s, name, str(ex), ", ".join(available)) + ) except Exception as ex: raise ValueError( - 'Format string %r for %r is badly formatted: %s' % - (s, name, str(ex)) + "Format string %r for %r is badly formatted: %s" % (s, name, str(ex)) ) + def make_namespec(group_name, process_name): # we want to refer to the process by its "short name" (a process named # process1 in the group process1 has a name "process1"). This is for @@ -2234,38 +2821,45 @@ def make_namespec(group_name, process_name): if group_name == process_name: name = process_name else: - name = '%s:%s' % (group_name, process_name) + name = "%s:%s" % (group_name, process_name) return name + def split_namespec(namespec): - names = namespec.split(':', 1) + names = namespec.split(":", 1) if len(names) == 2: # group and process name differ group_name, process_name = names - if not process_name or process_name == '*': + if not process_name or process_name == "*": process_name = None else: # group name is same as process name group_name, process_name = namespec, namespec return group_name, process_name + # exceptions + class ProcessException(Exception): - """ Specialized exceptions used when attempting to start a process """ + """Specialized exceptions used when attempting to start a process""" + class BadCommand(ProcessException): - """ Indicates the command could not be parsed properly. """ + """Indicates the command could not be parsed properly.""" + class NotExecutable(ProcessException): - """ Indicates that the filespec cannot be executed because its path - resolves to a file which is not executable, or which is a directory. """ + """Indicates that the filespec cannot be executed because its path + resolves to a file which is not executable, or which is a directory.""" + class NotFound(ProcessException): - """ Indicates that the filespec cannot be executed because it could not - be found """ + """Indicates that the filespec cannot be executed because it could not + be found""" + class NoPermission(ProcessException): - """ Indicates that the file cannot be executed because the supervisor + """Indicates that the file cannot be executed because the supervisor process does not possess the appropriate UNIX filesystem permission - to execute the file. """ + to execute the file.""" From 1df357428dd2b9da7c69f63cb755c8d45141e5a7 Mon Sep 17 00:00:00 2001 From: Alexey Nazarov Date: Fri, 19 Jul 2024 17:52:04 +0200 Subject: [PATCH 2/4] Replace f-strings with format() to ensure compatibility with Python 3.5 --- supervisor/options.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/supervisor/options.py b/supervisor/options.py index 4c0bba7f0..540b3f3fc 100644 --- a/supervisor/options.py +++ b/supervisor/options.py @@ -442,8 +442,8 @@ def process_config_file(self, do_usage=True): # if this is called from an RPC method, raise an error raise ValueError(msg) if hasattr(self, 'minfds') and hasattr(self, 'minprocs'): - print(f"Debug: minfds = {self.minfds}") - print(f"Debug: minprocs = {self.minprocs}") + print("Debug: minfds = {}".format(self.minfds)) + print("Debug: minprocs = {}".format(self.minprocs)) # Previous version # def process_config_file(self, do_usage): # # Process config file @@ -1930,8 +1930,9 @@ def set_rlimits_or_exit(self): usage() if any rlimits could not be set.""" limits = [] if hasattr(self, 'minfds') and hasattr(self, 'minprocs'): - print(f"Debug: minfds = {self.minfds}") - print(f"Debug: minprocs = {self.minprocs}") + print("Debug: minfds = {}".format(self.minfds)) + print("Debug: minprocs = {}".format(self.minprocs)) + if hasattr(resource, "RLIMIT_NOFILE"): if self.minfds is None: From 2d63ef73a11673f42ad2ba518b524292d51d21c5 Mon Sep 17 00:00:00 2001 From: Alexey Nazarov Date: Fri, 19 Jul 2024 17:59:16 +0200 Subject: [PATCH 3/4] Replace f-strings with format() to ensure compatibility with Python 2.7 --- supervisor/options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supervisor/options.py b/supervisor/options.py index 540b3f3fc..22f712850 100644 --- a/supervisor/options.py +++ b/supervisor/options.py @@ -839,7 +839,7 @@ def process_config(self, do_usage=True): if getattr(self, name) is None: self.usage(message) - print(f"Before setting defaults - minfds: {self.minfds}, minprocs: {self.minprocs}") + print("Before setting defaults - minfds: {}, minprocs: {}".format(self.minfds, self.minprocs)) if self.minfds is None: self.minfds = 1024 From ccc7f6985bb094875d7d4248b127c382070d8f5e Mon Sep 17 00:00:00 2001 From: Alexey Nazarov Date: Fri, 19 Jul 2024 18:08:10 +0200 Subject: [PATCH 4/4] Replace f-strings with format() to ensure compatibility with Python 2.7 --- supervisor/options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supervisor/options.py b/supervisor/options.py index 22f712850..a0190eaec 100644 --- a/supervisor/options.py +++ b/supervisor/options.py @@ -846,7 +846,7 @@ def process_config(self, do_usage=True): if self.minprocs is None: self.minprocs = 200 - print(f"After setting defaults - minfds: {self.minfds}, minprocs: {self.minprocs}") + print("After setting defaults - minfds: %s, minprocs: %s" % (self.minfds, self.minprocs))