id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
31,700
spotify/luigi
luigi/parameter.py
Parameter._get_value_from_config
def _get_value_from_config(self, section, name): """Loads the default from the config. Returns _no_value if it doesn't exist""" conf = configuration.get_config() try: value = conf.get(section, name) except (NoSectionError, NoOptionError, KeyError): return _no_value return self.parse(value)
python
def _get_value_from_config(self, section, name): """Loads the default from the config. Returns _no_value if it doesn't exist""" conf = configuration.get_config() try: value = conf.get(section, name) except (NoSectionError, NoOptionError, KeyError): return _no_value return self.parse(value)
[ "def", "_get_value_from_config", "(", "self", ",", "section", ",", "name", ")", ":", "conf", "=", "configuration", ".", "get_config", "(", ")", "try", ":", "value", "=", "conf", ".", "get", "(", "section", ",", "name", ")", "except", "(", "NoSectionError...
Loads the default from the config. Returns _no_value if it doesn't exist
[ "Loads", "the", "default", "from", "the", "config", ".", "Returns", "_no_value", "if", "it", "doesn", "t", "exist" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/parameter.py#L192-L202
31,701
spotify/luigi
luigi/parameter.py
Parameter._value_iterator
def _value_iterator(self, task_name, param_name): """ Yield the parameter values, with optional deprecation warning as second tuple value. The parameter value will be whatever non-_no_value that is yielded first. """ cp_parser = CmdlineParser.get_instance() if cp_parser: dest = self._parser_global_dest(param_name, task_name) found = getattr(cp_parser.known_args, dest, None) yield (self._parse_or_no_value(found), None) yield (self._get_value_from_config(task_name, param_name), None) if self._config_path: yield (self._get_value_from_config(self._config_path['section'], self._config_path['name']), 'The use of the configuration [{}] {} is deprecated. Please use [{}] {}'.format( self._config_path['section'], self._config_path['name'], task_name, param_name)) yield (self._default, None)
python
def _value_iterator(self, task_name, param_name): """ Yield the parameter values, with optional deprecation warning as second tuple value. The parameter value will be whatever non-_no_value that is yielded first. """ cp_parser = CmdlineParser.get_instance() if cp_parser: dest = self._parser_global_dest(param_name, task_name) found = getattr(cp_parser.known_args, dest, None) yield (self._parse_or_no_value(found), None) yield (self._get_value_from_config(task_name, param_name), None) if self._config_path: yield (self._get_value_from_config(self._config_path['section'], self._config_path['name']), 'The use of the configuration [{}] {} is deprecated. Please use [{}] {}'.format( self._config_path['section'], self._config_path['name'], task_name, param_name)) yield (self._default, None)
[ "def", "_value_iterator", "(", "self", ",", "task_name", ",", "param_name", ")", ":", "cp_parser", "=", "CmdlineParser", ".", "get_instance", "(", ")", "if", "cp_parser", ":", "dest", "=", "self", ".", "_parser_global_dest", "(", "param_name", ",", "task_name"...
Yield the parameter values, with optional deprecation warning as second tuple value. The parameter value will be whatever non-_no_value that is yielded first.
[ "Yield", "the", "parameter", "values", "with", "optional", "deprecation", "warning", "as", "second", "tuple", "value", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/parameter.py#L212-L228
31,702
spotify/luigi
luigi/parameter.py
Parameter._parse_list
def _parse_list(self, xs): """ Parse a list of values from the scheduler. Only possible if this is_batchable() is True. This will combine the list into a single parameter value using batch method. This should never need to be overridden. :param xs: list of values to parse and combine :return: the combined parsed values """ if not self._is_batchable(): raise NotImplementedError('No batch method found') elif not xs: raise ValueError('Empty parameter list passed to parse_list') else: return self._batch_method(map(self.parse, xs))
python
def _parse_list(self, xs): """ Parse a list of values from the scheduler. Only possible if this is_batchable() is True. This will combine the list into a single parameter value using batch method. This should never need to be overridden. :param xs: list of values to parse and combine :return: the combined parsed values """ if not self._is_batchable(): raise NotImplementedError('No batch method found') elif not xs: raise ValueError('Empty parameter list passed to parse_list') else: return self._batch_method(map(self.parse, xs))
[ "def", "_parse_list", "(", "self", ",", "xs", ")", ":", "if", "not", "self", ".", "_is_batchable", "(", ")", ":", "raise", "NotImplementedError", "(", "'No batch method found'", ")", "elif", "not", "xs", ":", "raise", "ValueError", "(", "'Empty parameter list ...
Parse a list of values from the scheduler. Only possible if this is_batchable() is True. This will combine the list into a single parameter value using batch method. This should never need to be overridden. :param xs: list of values to parse and combine :return: the combined parsed values
[ "Parse", "a", "list", "of", "values", "from", "the", "scheduler", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/parameter.py#L255-L270
31,703
spotify/luigi
luigi/parameter.py
_DateParameterBase.parse
def parse(self, s): """ Parses a date string formatted like ``YYYY-MM-DD``. """ return datetime.datetime.strptime(s, self.date_format).date()
python
def parse(self, s): """ Parses a date string formatted like ``YYYY-MM-DD``. """ return datetime.datetime.strptime(s, self.date_format).date()
[ "def", "parse", "(", "self", ",", "s", ")", ":", "return", "datetime", ".", "datetime", ".", "strptime", "(", "s", ",", "self", ".", "date_format", ")", ".", "date", "(", ")" ]
Parses a date string formatted like ``YYYY-MM-DD``.
[ "Parses", "a", "date", "string", "formatted", "like", "YYYY", "-", "MM", "-", "DD", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/parameter.py#L373-L377
31,704
spotify/luigi
luigi/parameter.py
MonthParameter._add_months
def _add_months(self, date, months): """ Add ``months`` months to ``date``. Unfortunately we can't use timedeltas to add months because timedelta counts in days and there's no foolproof way to add N months in days without counting the number of days per month. """ year = date.year + (date.month + months - 1) // 12 month = (date.month + months - 1) % 12 + 1 return datetime.date(year=year, month=month, day=1)
python
def _add_months(self, date, months): """ Add ``months`` months to ``date``. Unfortunately we can't use timedeltas to add months because timedelta counts in days and there's no foolproof way to add N months in days without counting the number of days per month. """ year = date.year + (date.month + months - 1) // 12 month = (date.month + months - 1) % 12 + 1 return datetime.date(year=year, month=month, day=1)
[ "def", "_add_months", "(", "self", ",", "date", ",", "months", ")", ":", "year", "=", "date", ".", "year", "+", "(", "date", ".", "month", "+", "months", "-", "1", ")", "//", "12", "month", "=", "(", "date", ".", "month", "+", "months", "-", "1...
Add ``months`` months to ``date``. Unfortunately we can't use timedeltas to add months because timedelta counts in days and there's no foolproof way to add N months in days without counting the number of days per month.
[ "Add", "months", "months", "to", "date", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/parameter.py#L447-L457
31,705
spotify/luigi
luigi/parameter.py
BoolParameter.parse
def parse(self, val): """ Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case. """ s = str(val).lower() if s == "true": return True elif s == "false": return False else: raise ValueError("cannot interpret '{}' as boolean".format(val))
python
def parse(self, val): """ Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case. """ s = str(val).lower() if s == "true": return True elif s == "false": return False else: raise ValueError("cannot interpret '{}' as boolean".format(val))
[ "def", "parse", "(", "self", ",", "val", ")", ":", "s", "=", "str", "(", "val", ")", ".", "lower", "(", ")", "if", "s", "==", "\"true\"", ":", "return", "True", "elif", "s", "==", "\"false\"", ":", "return", "False", "else", ":", "raise", "ValueE...
Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case.
[ "Parses", "a", "bool", "from", "the", "string", "matching", "true", "or", "false", "ignoring", "case", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/parameter.py#L686-L696
31,706
spotify/luigi
luigi/parameter.py
TimeDeltaParameter.parse
def parse(self, input): """ Parses a time delta from the input. See :py:class:`TimeDeltaParameter` for details on supported formats. """ result = self._parseIso8601(input) if not result: result = self._parseSimple(input) if result is not None: return result else: raise ParameterException("Invalid time delta - could not parse %s" % input)
python
def parse(self, input): """ Parses a time delta from the input. See :py:class:`TimeDeltaParameter` for details on supported formats. """ result = self._parseIso8601(input) if not result: result = self._parseSimple(input) if result is not None: return result else: raise ParameterException("Invalid time delta - could not parse %s" % input)
[ "def", "parse", "(", "self", ",", "input", ")", ":", "result", "=", "self", ".", "_parseIso8601", "(", "input", ")", "if", "not", "result", ":", "result", "=", "self", ".", "_parseSimple", "(", "input", ")", "if", "result", "is", "not", "None", ":", ...
Parses a time delta from the input. See :py:class:`TimeDeltaParameter` for details on supported formats.
[ "Parses", "a", "time", "delta", "from", "the", "input", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/parameter.py#L790-L802
31,707
spotify/luigi
luigi/parameter.py
TimeDeltaParameter.serialize
def serialize(self, x): """ Converts datetime.timedelta to a string :param x: the value to serialize. """ weeks = x.days // 7 days = x.days % 7 hours = x.seconds // 3600 minutes = (x.seconds % 3600) // 60 seconds = (x.seconds % 3600) % 60 result = "{} w {} d {} h {} m {} s".format(weeks, days, hours, minutes, seconds) return result
python
def serialize(self, x): """ Converts datetime.timedelta to a string :param x: the value to serialize. """ weeks = x.days // 7 days = x.days % 7 hours = x.seconds // 3600 minutes = (x.seconds % 3600) // 60 seconds = (x.seconds % 3600) % 60 result = "{} w {} d {} h {} m {} s".format(weeks, days, hours, minutes, seconds) return result
[ "def", "serialize", "(", "self", ",", "x", ")", ":", "weeks", "=", "x", ".", "days", "//", "7", "days", "=", "x", ".", "days", "%", "7", "hours", "=", "x", ".", "seconds", "//", "3600", "minutes", "=", "(", "x", ".", "seconds", "%", "3600", "...
Converts datetime.timedelta to a string :param x: the value to serialize.
[ "Converts", "datetime", ".", "timedelta", "to", "a", "string" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/parameter.py#L804-L816
31,708
spotify/luigi
luigi/parameter.py
TupleParameter.parse
def parse(self, x): """ Parse an individual value from the input. :param str x: the value to parse. :return: the parsed value. """ # Since the result of json.dumps(tuple) differs from a tuple string, we must handle either case. # A tuple string may come from a config file or from cli execution. # t = ((1, 2), (3, 4)) # t_str = '((1,2),(3,4))' # t_json_str = json.dumps(t) # t_json_str == '[[1, 2], [3, 4]]' # json.loads(t_json_str) == t # json.loads(t_str) == ValueError: No JSON object could be decoded # Therefore, if json.loads(x) returns a ValueError, try ast.literal_eval(x). # ast.literal_eval(t_str) == t try: # loop required to parse tuple of tuples return tuple(tuple(x) for x in json.loads(x, object_pairs_hook=_FrozenOrderedDict)) except (ValueError, TypeError): return tuple(literal_eval(x))
python
def parse(self, x): """ Parse an individual value from the input. :param str x: the value to parse. :return: the parsed value. """ # Since the result of json.dumps(tuple) differs from a tuple string, we must handle either case. # A tuple string may come from a config file or from cli execution. # t = ((1, 2), (3, 4)) # t_str = '((1,2),(3,4))' # t_json_str = json.dumps(t) # t_json_str == '[[1, 2], [3, 4]]' # json.loads(t_json_str) == t # json.loads(t_str) == ValueError: No JSON object could be decoded # Therefore, if json.loads(x) returns a ValueError, try ast.literal_eval(x). # ast.literal_eval(t_str) == t try: # loop required to parse tuple of tuples return tuple(tuple(x) for x in json.loads(x, object_pairs_hook=_FrozenOrderedDict)) except (ValueError, TypeError): return tuple(literal_eval(x))
[ "def", "parse", "(", "self", ",", "x", ")", ":", "# Since the result of json.dumps(tuple) differs from a tuple string, we must handle either case.", "# A tuple string may come from a config file or from cli execution.", "# t = ((1, 2), (3, 4))", "# t_str = '((1,2),(3,4))'", "# t_json_str = j...
Parse an individual value from the input. :param str x: the value to parse. :return: the parsed value.
[ "Parse", "an", "individual", "value", "from", "the", "input", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/parameter.py#L1096-L1119
31,709
spotify/luigi
luigi/contrib/hadoop.py
create_packages_archive
def create_packages_archive(packages, filename): """ Create a tar archive which will contain the files for the packages listed in packages. """ import tarfile tar = tarfile.open(filename, "w") def add(src, dst): logger.debug('adding to tar: %s -> %s', src, dst) tar.add(src, dst) def add_files_for_package(sub_package_path, root_package_path, root_package_name): for root, dirs, files in os.walk(sub_package_path): if '.svn' in dirs: dirs.remove('.svn') for f in files: if not f.endswith(".pyc") and not f.startswith("."): add(dereference(root + "/" + f), root.replace(root_package_path, root_package_name) + "/" + f) for package in packages: # Put a submodule's entire package in the archive. This is the # magic that usually packages everything you need without # having to attach packages/modules explicitly if not getattr(package, "__path__", None) and '.' in package.__name__: package = __import__(package.__name__.rpartition('.')[0], None, None, 'non_empty') n = package.__name__.replace(".", "/") if getattr(package, "__path__", None): # TODO: (BUG) picking only the first path does not # properly deal with namespaced packages in different # directories p = package.__path__[0] if p.endswith('.egg') and os.path.isfile(p): raise 'egg files not supported!!!' # Add the entire egg file # p = p[:p.find('.egg') + 4] # add(dereference(p), os.path.basename(p)) else: # include __init__ files from parent projects root = [] for parent in package.__name__.split('.')[0:-1]: root.append(parent) module_name = '.'.join(root) directory = '/'.join(root) add(dereference(__import__(module_name, None, None, 'non_empty').__path__[0] + "/__init__.py"), directory + "/__init__.py") add_files_for_package(p, p, n) # include egg-info directories that are parallel: for egg_info_path in glob.glob(p + '*.egg-info'): logger.debug( 'Adding package metadata to archive for "%s" found at "%s"', package.__name__, egg_info_path ) add_files_for_package(egg_info_path, p, n) else: f = package.__file__ if f.endswith("pyc"): f = f[:-3] + "py" if n.find(".") == -1: add(dereference(f), os.path.basename(f)) else: add(dereference(f), n + ".py") tar.close()
python
def create_packages_archive(packages, filename): """ Create a tar archive which will contain the files for the packages listed in packages. """ import tarfile tar = tarfile.open(filename, "w") def add(src, dst): logger.debug('adding to tar: %s -> %s', src, dst) tar.add(src, dst) def add_files_for_package(sub_package_path, root_package_path, root_package_name): for root, dirs, files in os.walk(sub_package_path): if '.svn' in dirs: dirs.remove('.svn') for f in files: if not f.endswith(".pyc") and not f.startswith("."): add(dereference(root + "/" + f), root.replace(root_package_path, root_package_name) + "/" + f) for package in packages: # Put a submodule's entire package in the archive. This is the # magic that usually packages everything you need without # having to attach packages/modules explicitly if not getattr(package, "__path__", None) and '.' in package.__name__: package = __import__(package.__name__.rpartition('.')[0], None, None, 'non_empty') n = package.__name__.replace(".", "/") if getattr(package, "__path__", None): # TODO: (BUG) picking only the first path does not # properly deal with namespaced packages in different # directories p = package.__path__[0] if p.endswith('.egg') and os.path.isfile(p): raise 'egg files not supported!!!' # Add the entire egg file # p = p[:p.find('.egg') + 4] # add(dereference(p), os.path.basename(p)) else: # include __init__ files from parent projects root = [] for parent in package.__name__.split('.')[0:-1]: root.append(parent) module_name = '.'.join(root) directory = '/'.join(root) add(dereference(__import__(module_name, None, None, 'non_empty').__path__[0] + "/__init__.py"), directory + "/__init__.py") add_files_for_package(p, p, n) # include egg-info directories that are parallel: for egg_info_path in glob.glob(p + '*.egg-info'): logger.debug( 'Adding package metadata to archive for "%s" found at "%s"', package.__name__, egg_info_path ) add_files_for_package(egg_info_path, p, n) else: f = package.__file__ if f.endswith("pyc"): f = f[:-3] + "py" if n.find(".") == -1: add(dereference(f), os.path.basename(f)) else: add(dereference(f), n + ".py") tar.close()
[ "def", "create_packages_archive", "(", "packages", ",", "filename", ")", ":", "import", "tarfile", "tar", "=", "tarfile", ".", "open", "(", "filename", ",", "\"w\"", ")", "def", "add", "(", "src", ",", "dst", ")", ":", "logger", ".", "debug", "(", "'ad...
Create a tar archive which will contain the files for the packages listed in packages.
[ "Create", "a", "tar", "archive", "which", "will", "contain", "the", "files", "for", "the", "packages", "listed", "in", "packages", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L124-L194
31,710
spotify/luigi
luigi/contrib/hadoop.py
flatten
def flatten(sequence): """ A simple generator which flattens a sequence. Only one level is flattened. .. code-block:: python (1, (2, 3), 4) -> (1, 2, 3, 4) """ for item in sequence: if hasattr(item, "__iter__") and not isinstance(item, str) and not isinstance(item, bytes): for i in item: yield i else: yield item
python
def flatten(sequence): """ A simple generator which flattens a sequence. Only one level is flattened. .. code-block:: python (1, (2, 3), 4) -> (1, 2, 3, 4) """ for item in sequence: if hasattr(item, "__iter__") and not isinstance(item, str) and not isinstance(item, bytes): for i in item: yield i else: yield item
[ "def", "flatten", "(", "sequence", ")", ":", "for", "item", "in", "sequence", ":", "if", "hasattr", "(", "item", ",", "\"__iter__\"", ")", "and", "not", "isinstance", "(", "item", ",", "str", ")", "and", "not", "isinstance", "(", "item", ",", "bytes", ...
A simple generator which flattens a sequence. Only one level is flattened. .. code-block:: python (1, (2, 3), 4) -> (1, 2, 3, 4)
[ "A", "simple", "generator", "which", "flattens", "a", "sequence", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L197-L213
31,711
spotify/luigi
luigi/contrib/hadoop.py
fetch_task_failures
def fetch_task_failures(tracking_url): """ Uses mechanize to fetch the actual task logs from the task tracker. This is highly opportunistic, and we might not succeed. So we set a low timeout and hope it works. If it does not, it's not the end of the world. TODO: Yarn has a REST API that we should probably use instead: http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html """ import mechanize timeout = 3.0 failures_url = tracking_url.replace('jobdetails.jsp', 'jobfailures.jsp') + '&cause=failed' logger.debug('Fetching data from %s', failures_url) b = mechanize.Browser() b.open(failures_url, timeout=timeout) links = list(b.links(text_regex='Last 4KB')) # For some reason text_regex='All' doesn't work... no idea why links = random.sample(links, min(10, len(links))) # Fetch a random subset of all failed tasks, so not to be biased towards the early fails error_text = [] for link in links: task_url = link.url.replace('&start=-4097', '&start=-100000') # Increase the offset logger.debug('Fetching data from %s', task_url) b2 = mechanize.Browser() try: r = b2.open(task_url, timeout=timeout) data = r.read() except Exception as e: logger.debug('Error fetching data from %s: %s', task_url, e) continue # Try to get the hex-encoded traceback back from the output for exc in re.findall(r'luigi-exc-hex=[0-9a-f]+', data): error_text.append('---------- %s:' % task_url) error_text.append(exc.split('=')[-1].decode('hex')) return '\n'.join(error_text)
python
def fetch_task_failures(tracking_url): """ Uses mechanize to fetch the actual task logs from the task tracker. This is highly opportunistic, and we might not succeed. So we set a low timeout and hope it works. If it does not, it's not the end of the world. TODO: Yarn has a REST API that we should probably use instead: http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html """ import mechanize timeout = 3.0 failures_url = tracking_url.replace('jobdetails.jsp', 'jobfailures.jsp') + '&cause=failed' logger.debug('Fetching data from %s', failures_url) b = mechanize.Browser() b.open(failures_url, timeout=timeout) links = list(b.links(text_regex='Last 4KB')) # For some reason text_regex='All' doesn't work... no idea why links = random.sample(links, min(10, len(links))) # Fetch a random subset of all failed tasks, so not to be biased towards the early fails error_text = [] for link in links: task_url = link.url.replace('&start=-4097', '&start=-100000') # Increase the offset logger.debug('Fetching data from %s', task_url) b2 = mechanize.Browser() try: r = b2.open(task_url, timeout=timeout) data = r.read() except Exception as e: logger.debug('Error fetching data from %s: %s', task_url, e) continue # Try to get the hex-encoded traceback back from the output for exc in re.findall(r'luigi-exc-hex=[0-9a-f]+', data): error_text.append('---------- %s:' % task_url) error_text.append(exc.split('=')[-1].decode('hex')) return '\n'.join(error_text)
[ "def", "fetch_task_failures", "(", "tracking_url", ")", ":", "import", "mechanize", "timeout", "=", "3.0", "failures_url", "=", "tracking_url", ".", "replace", "(", "'jobdetails.jsp'", ",", "'jobfailures.jsp'", ")", "+", "'&cause=failed'", "logger", ".", "debug", ...
Uses mechanize to fetch the actual task logs from the task tracker. This is highly opportunistic, and we might not succeed. So we set a low timeout and hope it works. If it does not, it's not the end of the world. TODO: Yarn has a REST API that we should probably use instead: http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html
[ "Uses", "mechanize", "to", "fetch", "the", "actual", "task", "logs", "from", "the", "task", "tracker", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L356-L391
31,712
spotify/luigi
luigi/contrib/hadoop.py
JobTask.job_runner
def job_runner(self): # We recommend that you define a subclass, override this method and set up your own config """ Get the MapReduce runner for this job. If all outputs are HdfsTargets, the DefaultHadoopJobRunner will be used. Otherwise, the LocalJobRunner which streams all data through the local machine will be used (great for testing). """ outputs = luigi.task.flatten(self.output()) for output in outputs: if not isinstance(output, luigi.contrib.hdfs.HdfsTarget): warnings.warn("Job is using one or more non-HdfsTarget outputs" + " so it will be run in local mode") return LocalJobRunner() else: return DefaultHadoopJobRunner()
python
def job_runner(self): # We recommend that you define a subclass, override this method and set up your own config """ Get the MapReduce runner for this job. If all outputs are HdfsTargets, the DefaultHadoopJobRunner will be used. Otherwise, the LocalJobRunner which streams all data through the local machine will be used (great for testing). """ outputs = luigi.task.flatten(self.output()) for output in outputs: if not isinstance(output, luigi.contrib.hdfs.HdfsTarget): warnings.warn("Job is using one or more non-HdfsTarget outputs" + " so it will be run in local mode") return LocalJobRunner() else: return DefaultHadoopJobRunner()
[ "def", "job_runner", "(", "self", ")", ":", "# We recommend that you define a subclass, override this method and set up your own config", "outputs", "=", "luigi", ".", "task", ".", "flatten", "(", "self", ".", "output", "(", ")", ")", "for", "output", "in", "outputs",...
Get the MapReduce runner for this job. If all outputs are HdfsTargets, the DefaultHadoopJobRunner will be used. Otherwise, the LocalJobRunner which streams all data through the local machine will be used (great for testing).
[ "Get", "the", "MapReduce", "runner", "for", "this", "job", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L813-L829
31,713
spotify/luigi
luigi/contrib/hadoop.py
JobTask.writer
def writer(self, outputs, stdout, stderr=sys.stderr): """ Writer format is a method which iterates over the output records from the reducer and formats them for output. The default implementation outputs tab separated items. """ for output in outputs: try: output = flatten(output) if self.data_interchange_format == "json": # Only dump one json string, and skip another one, maybe key or value. output = filter(lambda x: x, output) else: # JSON is already serialized, so we put `self.serialize` in a else statement. output = map(self.serialize, output) print("\t".join(output), file=stdout) except BaseException: print(output, file=stderr) raise
python
def writer(self, outputs, stdout, stderr=sys.stderr): """ Writer format is a method which iterates over the output records from the reducer and formats them for output. The default implementation outputs tab separated items. """ for output in outputs: try: output = flatten(output) if self.data_interchange_format == "json": # Only dump one json string, and skip another one, maybe key or value. output = filter(lambda x: x, output) else: # JSON is already serialized, so we put `self.serialize` in a else statement. output = map(self.serialize, output) print("\t".join(output), file=stdout) except BaseException: print(output, file=stderr) raise
[ "def", "writer", "(", "self", ",", "outputs", ",", "stdout", ",", "stderr", "=", "sys", ".", "stderr", ")", ":", "for", "output", "in", "outputs", ":", "try", ":", "output", "=", "flatten", "(", "output", ")", "if", "self", ".", "data_interchange_forma...
Writer format is a method which iterates over the output records from the reducer and formats them for output. The default implementation outputs tab separated items.
[ "Writer", "format", "is", "a", "method", "which", "iterates", "over", "the", "output", "records", "from", "the", "reducer", "and", "formats", "them", "for", "output", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L839-L858
31,714
spotify/luigi
luigi/contrib/hadoop.py
JobTask._flush_batch_incr_counter
def _flush_batch_incr_counter(self): """ Increments any unflushed counter values. """ for key, count in six.iteritems(self._counter_dict): if count == 0: continue args = list(key) + [count] self._incr_counter(*args) self._counter_dict[key] = 0
python
def _flush_batch_incr_counter(self): """ Increments any unflushed counter values. """ for key, count in six.iteritems(self._counter_dict): if count == 0: continue args = list(key) + [count] self._incr_counter(*args) self._counter_dict[key] = 0
[ "def", "_flush_batch_incr_counter", "(", "self", ")", ":", "for", "key", ",", "count", "in", "six", ".", "iteritems", "(", "self", ".", "_counter_dict", ")", ":", "if", "count", "==", "0", ":", "continue", "args", "=", "list", "(", "key", ")", "+", "...
Increments any unflushed counter values.
[ "Increments", "any", "unflushed", "counter", "values", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L893-L902
31,715
spotify/luigi
luigi/contrib/hadoop.py
JobTask._map_input
def _map_input(self, input_stream): """ Iterate over input and call the mapper for each item. If the job has a parser defined, the return values from the parser will be passed as arguments to the mapper. If the input is coded output from a previous run, the arguments will be splitted in key and value. """ for record in self.reader(input_stream): for output in self.mapper(*record): yield output if self.final_mapper != NotImplemented: for output in self.final_mapper(): yield output self._flush_batch_incr_counter()
python
def _map_input(self, input_stream): """ Iterate over input and call the mapper for each item. If the job has a parser defined, the return values from the parser will be passed as arguments to the mapper. If the input is coded output from a previous run, the arguments will be splitted in key and value. """ for record in self.reader(input_stream): for output in self.mapper(*record): yield output if self.final_mapper != NotImplemented: for output in self.final_mapper(): yield output self._flush_batch_incr_counter()
[ "def", "_map_input", "(", "self", ",", "input_stream", ")", ":", "for", "record", "in", "self", ".", "reader", "(", "input_stream", ")", ":", "for", "output", "in", "self", ".", "mapper", "(", "*", "record", ")", ":", "yield", "output", "if", "self", ...
Iterate over input and call the mapper for each item. If the job has a parser defined, the return values from the parser will be passed as arguments to the mapper. If the input is coded output from a previous run, the arguments will be splitted in key and value.
[ "Iterate", "over", "input", "and", "call", "the", "mapper", "for", "each", "item", ".", "If", "the", "job", "has", "a", "parser", "defined", "the", "return", "values", "from", "the", "parser", "will", "be", "passed", "as", "arguments", "to", "the", "mapp...
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L989-L1004
31,716
spotify/luigi
luigi/contrib/hadoop.py
JobTask._reduce_input
def _reduce_input(self, inputs, reducer, final=NotImplemented): """ Iterate over input, collect values with the same key, and call the reducer for each unique key. """ for key, values in groupby(inputs, key=lambda x: self.internal_serialize(x[0])): for output in reducer(self.deserialize(key), (v[1] for v in values)): yield output if final != NotImplemented: for output in final(): yield output self._flush_batch_incr_counter()
python
def _reduce_input(self, inputs, reducer, final=NotImplemented): """ Iterate over input, collect values with the same key, and call the reducer for each unique key. """ for key, values in groupby(inputs, key=lambda x: self.internal_serialize(x[0])): for output in reducer(self.deserialize(key), (v[1] for v in values)): yield output if final != NotImplemented: for output in final(): yield output self._flush_batch_incr_counter()
[ "def", "_reduce_input", "(", "self", ",", "inputs", ",", "reducer", ",", "final", "=", "NotImplemented", ")", ":", "for", "key", ",", "values", "in", "groupby", "(", "inputs", ",", "key", "=", "lambda", "x", ":", "self", ".", "internal_serialize", "(", ...
Iterate over input, collect values with the same key, and call the reducer for each unique key.
[ "Iterate", "over", "input", "collect", "values", "with", "the", "same", "key", "and", "call", "the", "reducer", "for", "each", "unique", "key", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L1006-L1016
31,717
spotify/luigi
luigi/contrib/hadoop.py
JobTask.run_mapper
def run_mapper(self, stdin=sys.stdin, stdout=sys.stdout): """ Run the mapper on the hadoop node. """ self.init_hadoop() self.init_mapper() outputs = self._map_input((line[:-1] for line in stdin)) if self.reducer == NotImplemented: self.writer(outputs, stdout) else: self.internal_writer(outputs, stdout)
python
def run_mapper(self, stdin=sys.stdin, stdout=sys.stdout): """ Run the mapper on the hadoop node. """ self.init_hadoop() self.init_mapper() outputs = self._map_input((line[:-1] for line in stdin)) if self.reducer == NotImplemented: self.writer(outputs, stdout) else: self.internal_writer(outputs, stdout)
[ "def", "run_mapper", "(", "self", ",", "stdin", "=", "sys", ".", "stdin", ",", "stdout", "=", "sys", ".", "stdout", ")", ":", "self", ".", "init_hadoop", "(", ")", "self", ".", "init_mapper", "(", ")", "outputs", "=", "self", ".", "_map_input", "(", ...
Run the mapper on the hadoop node.
[ "Run", "the", "mapper", "on", "the", "hadoop", "node", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L1018-L1028
31,718
spotify/luigi
luigi/contrib/hadoop.py
JobTask.run_reducer
def run_reducer(self, stdin=sys.stdin, stdout=sys.stdout): """ Run the reducer on the hadoop node. """ self.init_hadoop() self.init_reducer() outputs = self._reduce_input(self.internal_reader((line[:-1] for line in stdin)), self.reducer, self.final_reducer) self.writer(outputs, stdout)
python
def run_reducer(self, stdin=sys.stdin, stdout=sys.stdout): """ Run the reducer on the hadoop node. """ self.init_hadoop() self.init_reducer() outputs = self._reduce_input(self.internal_reader((line[:-1] for line in stdin)), self.reducer, self.final_reducer) self.writer(outputs, stdout)
[ "def", "run_reducer", "(", "self", ",", "stdin", "=", "sys", ".", "stdin", ",", "stdout", "=", "sys", ".", "stdout", ")", ":", "self", ".", "init_hadoop", "(", ")", "self", ".", "init_reducer", "(", ")", "outputs", "=", "self", ".", "_reduce_input", ...
Run the reducer on the hadoop node.
[ "Run", "the", "reducer", "on", "the", "hadoop", "node", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L1030-L1037
31,719
spotify/luigi
luigi/contrib/hadoop.py
JobTask.internal_reader
def internal_reader(self, input_stream): """ Reader which uses python eval on each part of a tab separated string. Yields a tuple of python objects. """ for input_line in input_stream: yield list(map(self.deserialize, input_line.split("\t")))
python
def internal_reader(self, input_stream): """ Reader which uses python eval on each part of a tab separated string. Yields a tuple of python objects. """ for input_line in input_stream: yield list(map(self.deserialize, input_line.split("\t")))
[ "def", "internal_reader", "(", "self", ",", "input_stream", ")", ":", "for", "input_line", "in", "input_stream", ":", "yield", "list", "(", "map", "(", "self", ".", "deserialize", ",", "input_line", ".", "split", "(", "\"\\t\"", ")", ")", ")" ]
Reader which uses python eval on each part of a tab separated string. Yields a tuple of python objects.
[ "Reader", "which", "uses", "python", "eval", "on", "each", "part", "of", "a", "tab", "separated", "string", ".", "Yields", "a", "tuple", "of", "python", "objects", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L1045-L1051
31,720
spotify/luigi
luigi/contrib/hadoop.py
JobTask.internal_writer
def internal_writer(self, outputs, stdout): """ Writer which outputs the python repr for each item. """ for output in outputs: print("\t".join(map(self.internal_serialize, output)), file=stdout)
python
def internal_writer(self, outputs, stdout): """ Writer which outputs the python repr for each item. """ for output in outputs: print("\t".join(map(self.internal_serialize, output)), file=stdout)
[ "def", "internal_writer", "(", "self", ",", "outputs", ",", "stdout", ")", ":", "for", "output", "in", "outputs", ":", "print", "(", "\"\\t\"", ".", "join", "(", "map", "(", "self", ".", "internal_serialize", ",", "output", ")", ")", ",", "file", "=", ...
Writer which outputs the python repr for each item.
[ "Writer", "which", "outputs", "the", "python", "repr", "for", "each", "item", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L1053-L1058
31,721
spotify/luigi
luigi/contrib/postgres.py
PostgresTarget.connect
def connect(self): """ Get a psycopg2 connection object to the database where the table is. """ connection = psycopg2.connect( host=self.host, port=self.port, database=self.database, user=self.user, password=self.password) connection.set_client_encoding('utf-8') return connection
python
def connect(self): """ Get a psycopg2 connection object to the database where the table is. """ connection = psycopg2.connect( host=self.host, port=self.port, database=self.database, user=self.user, password=self.password) connection.set_client_encoding('utf-8') return connection
[ "def", "connect", "(", "self", ")", ":", "connection", "=", "psycopg2", ".", "connect", "(", "host", "=", "self", ".", "host", ",", "port", "=", "self", ".", "port", ",", "database", "=", "self", ".", "database", ",", "user", "=", "self", ".", "use...
Get a psycopg2 connection object to the database where the table is.
[ "Get", "a", "psycopg2", "connection", "object", "to", "the", "database", "where", "the", "table", "is", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/postgres.py#L187-L198
31,722
spotify/luigi
luigi/contrib/postgres.py
CopyToTable.map_column
def map_column(self, value): """ Applied to each column of every row returned by `rows`. Default behaviour is to escape special characters and identify any self.null_values. """ if value in self.null_values: return r'\\N' else: return default_escape(six.text_type(value))
python
def map_column(self, value): """ Applied to each column of every row returned by `rows`. Default behaviour is to escape special characters and identify any self.null_values. """ if value in self.null_values: return r'\\N' else: return default_escape(six.text_type(value))
[ "def", "map_column", "(", "self", ",", "value", ")", ":", "if", "value", "in", "self", ".", "null_values", ":", "return", "r'\\\\N'", "else", ":", "return", "default_escape", "(", "six", ".", "text_type", "(", "value", ")", ")" ]
Applied to each column of every row returned by `rows`. Default behaviour is to escape special characters and identify any self.null_values.
[ "Applied", "to", "each", "column", "of", "every", "row", "returned", "by", "rows", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/postgres.py#L255-L264
31,723
spotify/luigi
luigi/contrib/postgres.py
CopyToTable.output
def output(self): """ Returns a PostgresTarget representing the inserted dataset. Normally you don't override this. """ return PostgresTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id, port=self.port )
python
def output(self): """ Returns a PostgresTarget representing the inserted dataset. Normally you don't override this. """ return PostgresTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id, port=self.port )
[ "def", "output", "(", "self", ")", ":", "return", "PostgresTarget", "(", "host", "=", "self", ".", "host", ",", "database", "=", "self", ".", "database", ",", "user", "=", "self", ".", "user", ",", "password", "=", "self", ".", "password", ",", "tabl...
Returns a PostgresTarget representing the inserted dataset. Normally you don't override this.
[ "Returns", "a", "PostgresTarget", "representing", "the", "inserted", "dataset", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/postgres.py#L268-L282
31,724
spotify/luigi
luigi/configuration/core.py
get_config
def get_config(parser=PARSER): """Get configs singleton for parser """ parser_class = PARSERS[parser] _check_parser(parser_class, parser) return parser_class.instance()
python
def get_config(parser=PARSER): """Get configs singleton for parser """ parser_class = PARSERS[parser] _check_parser(parser_class, parser) return parser_class.instance()
[ "def", "get_config", "(", "parser", "=", "PARSER", ")", ":", "parser_class", "=", "PARSERS", "[", "parser", "]", "_check_parser", "(", "parser_class", ",", "parser", ")", "return", "parser_class", ".", "instance", "(", ")" ]
Get configs singleton for parser
[ "Get", "configs", "singleton", "for", "parser" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/configuration/core.py#L53-L58
31,725
spotify/luigi
luigi/configuration/core.py
add_config_path
def add_config_path(path): """Select config parser by file extension and add path into parser. """ if not os.path.isfile(path): warnings.warn("Config file does not exist: {path}".format(path=path)) return False # select parser by file extension _base, ext = os.path.splitext(path) if ext and ext[1:] in PARSERS: parser = ext[1:] else: parser = PARSER parser_class = PARSERS[parser] _check_parser(parser_class, parser) if parser != PARSER: msg = ( "Config for {added} parser added, but used {used} parser. " "Set up right parser via env var: " "export LUIGI_CONFIG_PARSER={added}" ) warnings.warn(msg.format(added=parser, used=PARSER)) # add config path to parser parser_class.add_config_path(path) return True
python
def add_config_path(path): """Select config parser by file extension and add path into parser. """ if not os.path.isfile(path): warnings.warn("Config file does not exist: {path}".format(path=path)) return False # select parser by file extension _base, ext = os.path.splitext(path) if ext and ext[1:] in PARSERS: parser = ext[1:] else: parser = PARSER parser_class = PARSERS[parser] _check_parser(parser_class, parser) if parser != PARSER: msg = ( "Config for {added} parser added, but used {used} parser. " "Set up right parser via env var: " "export LUIGI_CONFIG_PARSER={added}" ) warnings.warn(msg.format(added=parser, used=PARSER)) # add config path to parser parser_class.add_config_path(path) return True
[ "def", "add_config_path", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "warnings", ".", "warn", "(", "\"Config file does not exist: {path}\"", ".", "format", "(", "path", "=", "path", ")", ")", "return", "F...
Select config parser by file extension and add path into parser.
[ "Select", "config", "parser", "by", "file", "extension", "and", "add", "path", "into", "parser", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/configuration/core.py#L61-L87
31,726
spotify/luigi
luigi/contrib/spark.py
PySparkTask._setup_packages
def _setup_packages(self, sc): """ This method compresses and uploads packages to the cluster """ packages = self.py_packages if not packages: return for package in packages: mod = importlib.import_module(package) try: mod_path = mod.__path__[0] except AttributeError: mod_path = mod.__file__ tar_path = os.path.join(self.run_path, package + '.tar.gz') tar = tarfile.open(tar_path, "w:gz") tar.add(mod_path, os.path.basename(mod_path)) tar.close() sc.addPyFile(tar_path)
python
def _setup_packages(self, sc): """ This method compresses and uploads packages to the cluster """ packages = self.py_packages if not packages: return for package in packages: mod = importlib.import_module(package) try: mod_path = mod.__path__[0] except AttributeError: mod_path = mod.__file__ tar_path = os.path.join(self.run_path, package + '.tar.gz') tar = tarfile.open(tar_path, "w:gz") tar.add(mod_path, os.path.basename(mod_path)) tar.close() sc.addPyFile(tar_path)
[ "def", "_setup_packages", "(", "self", ",", "sc", ")", ":", "packages", "=", "self", ".", "py_packages", "if", "not", "packages", ":", "return", "for", "package", "in", "packages", ":", "mod", "=", "importlib", ".", "import_module", "(", "package", ")", ...
This method compresses and uploads packages to the cluster
[ "This", "method", "compresses", "and", "uploads", "packages", "to", "the", "cluster" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/spark.py#L323-L341
31,727
spotify/luigi
luigi/contrib/mrrunner.py
main
def main(args=None, stdin=sys.stdin, stdout=sys.stdout, print_exception=print_exception): """ Run either the mapper, combiner, or reducer from the class instance in the file "job-instance.pickle". Arguments: kind -- is either map, combiner, or reduce """ try: # Set up logging. logging.basicConfig(level=logging.WARN) kind = args is not None and args[1] or sys.argv[1] Runner().run(kind, stdin=stdin, stdout=stdout) except Exception as exc: # Dump encoded data that we will try to fetch using mechanize print_exception(exc) raise
python
def main(args=None, stdin=sys.stdin, stdout=sys.stdout, print_exception=print_exception): """ Run either the mapper, combiner, or reducer from the class instance in the file "job-instance.pickle". Arguments: kind -- is either map, combiner, or reduce """ try: # Set up logging. logging.basicConfig(level=logging.WARN) kind = args is not None and args[1] or sys.argv[1] Runner().run(kind, stdin=stdin, stdout=stdout) except Exception as exc: # Dump encoded data that we will try to fetch using mechanize print_exception(exc) raise
[ "def", "main", "(", "args", "=", "None", ",", "stdin", "=", "sys", ".", "stdin", ",", "stdout", "=", "sys", ".", "stdout", ",", "print_exception", "=", "print_exception", ")", ":", "try", ":", "# Set up logging.", "logging", ".", "basicConfig", "(", "lev...
Run either the mapper, combiner, or reducer from the class instance in the file "job-instance.pickle". Arguments: kind -- is either map, combiner, or reduce
[ "Run", "either", "the", "mapper", "combiner", "or", "reducer", "from", "the", "class", "instance", "in", "the", "file", "job", "-", "instance", ".", "pickle", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/mrrunner.py#L80-L97
31,728
spotify/luigi
luigi/retcodes.py
run_with_retcodes
def run_with_retcodes(argv): """ Run luigi with command line parsing, but raise ``SystemExit`` with the configured exit code. Note: Usually you use the luigi binary directly and don't call this function yourself. :param argv: Should (conceptually) be ``sys.argv[1:]`` """ logger = logging.getLogger('luigi-interface') with luigi.cmdline_parser.CmdlineParser.global_instance(argv): retcodes = retcode() worker = None try: worker = luigi.interface._run(argv).worker except luigi.interface.PidLockAlreadyTakenExit: sys.exit(retcodes.already_running) except Exception: # Some errors occur before logging is set up, we set it up now env_params = luigi.interface.core() InterfaceLogging.setup(env_params) logger.exception("Uncaught exception in luigi") sys.exit(retcodes.unhandled_exception) with luigi.cmdline_parser.CmdlineParser.global_instance(argv): task_sets = luigi.execution_summary._summary_dict(worker) root_task = luigi.execution_summary._root_task(worker) non_empty_categories = {k: v for k, v in task_sets.items() if v}.keys() def has(status): assert status in luigi.execution_summary._ORDERED_STATUSES return status in non_empty_categories codes_and_conds = ( (retcodes.missing_data, has('still_pending_ext')), (retcodes.task_failed, has('failed')), (retcodes.already_running, has('run_by_other_worker')), (retcodes.scheduling_error, has('scheduling_error')), (retcodes.not_run, has('not_run')), ) expected_ret_code = max(code * (1 if cond else 0) for code, cond in codes_and_conds) if expected_ret_code == 0 and \ root_task not in task_sets["completed"] and \ root_task not in task_sets["already_done"]: sys.exit(retcodes.not_run) else: sys.exit(expected_ret_code)
python
def run_with_retcodes(argv): """ Run luigi with command line parsing, but raise ``SystemExit`` with the configured exit code. Note: Usually you use the luigi binary directly and don't call this function yourself. :param argv: Should (conceptually) be ``sys.argv[1:]`` """ logger = logging.getLogger('luigi-interface') with luigi.cmdline_parser.CmdlineParser.global_instance(argv): retcodes = retcode() worker = None try: worker = luigi.interface._run(argv).worker except luigi.interface.PidLockAlreadyTakenExit: sys.exit(retcodes.already_running) except Exception: # Some errors occur before logging is set up, we set it up now env_params = luigi.interface.core() InterfaceLogging.setup(env_params) logger.exception("Uncaught exception in luigi") sys.exit(retcodes.unhandled_exception) with luigi.cmdline_parser.CmdlineParser.global_instance(argv): task_sets = luigi.execution_summary._summary_dict(worker) root_task = luigi.execution_summary._root_task(worker) non_empty_categories = {k: v for k, v in task_sets.items() if v}.keys() def has(status): assert status in luigi.execution_summary._ORDERED_STATUSES return status in non_empty_categories codes_and_conds = ( (retcodes.missing_data, has('still_pending_ext')), (retcodes.task_failed, has('failed')), (retcodes.already_running, has('run_by_other_worker')), (retcodes.scheduling_error, has('scheduling_error')), (retcodes.not_run, has('not_run')), ) expected_ret_code = max(code * (1 if cond else 0) for code, cond in codes_and_conds) if expected_ret_code == 0 and \ root_task not in task_sets["completed"] and \ root_task not in task_sets["already_done"]: sys.exit(retcodes.not_run) else: sys.exit(expected_ret_code)
[ "def", "run_with_retcodes", "(", "argv", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "'luigi-interface'", ")", "with", "luigi", ".", "cmdline_parser", ".", "CmdlineParser", ".", "global_instance", "(", "argv", ")", ":", "retcodes", "=", "retcode...
Run luigi with command line parsing, but raise ``SystemExit`` with the configured exit code. Note: Usually you use the luigi binary directly and don't call this function yourself. :param argv: Should (conceptually) be ``sys.argv[1:]``
[ "Run", "luigi", "with", "command", "line", "parsing", "but", "raise", "SystemExit", "with", "the", "configured", "exit", "code", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/retcodes.py#L61-L108
31,729
spotify/luigi
luigi/tools/deps.py
find_deps_cli
def find_deps_cli(): ''' Finds all tasks on all paths from provided CLI task ''' cmdline_args = sys.argv[1:] with CmdlineParser.global_instance(cmdline_args) as cp: return find_deps(cp.get_task_obj(), upstream().family)
python
def find_deps_cli(): ''' Finds all tasks on all paths from provided CLI task ''' cmdline_args = sys.argv[1:] with CmdlineParser.global_instance(cmdline_args) as cp: return find_deps(cp.get_task_obj(), upstream().family)
[ "def", "find_deps_cli", "(", ")", ":", "cmdline_args", "=", "sys", ".", "argv", "[", "1", ":", "]", "with", "CmdlineParser", ".", "global_instance", "(", "cmdline_args", ")", "as", "cp", ":", "return", "find_deps", "(", "cp", ".", "get_task_obj", "(", ")...
Finds all tasks on all paths from provided CLI task
[ "Finds", "all", "tasks", "on", "all", "paths", "from", "provided", "CLI", "task" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/deps.py#L85-L91
31,730
spotify/luigi
luigi/tools/deps.py
get_task_output_description
def get_task_output_description(task_output): ''' Returns a task's output as a string ''' output_description = "n/a" if isinstance(task_output, RemoteTarget): output_description = "[SSH] {0}:{1}".format(task_output._fs.remote_context.host, task_output.path) elif isinstance(task_output, S3Target): output_description = "[S3] {0}".format(task_output.path) elif isinstance(task_output, FileSystemTarget): output_description = "[FileSystem] {0}".format(task_output.path) elif isinstance(task_output, PostgresTarget): output_description = "[DB] {0}:{1}".format(task_output.host, task_output.table) else: output_description = "to be determined" return output_description
python
def get_task_output_description(task_output): ''' Returns a task's output as a string ''' output_description = "n/a" if isinstance(task_output, RemoteTarget): output_description = "[SSH] {0}:{1}".format(task_output._fs.remote_context.host, task_output.path) elif isinstance(task_output, S3Target): output_description = "[S3] {0}".format(task_output.path) elif isinstance(task_output, FileSystemTarget): output_description = "[FileSystem] {0}".format(task_output.path) elif isinstance(task_output, PostgresTarget): output_description = "[DB] {0}:{1}".format(task_output.host, task_output.table) else: output_description = "to be determined" return output_description
[ "def", "get_task_output_description", "(", "task_output", ")", ":", "output_description", "=", "\"n/a\"", "if", "isinstance", "(", "task_output", ",", "RemoteTarget", ")", ":", "output_description", "=", "\"[SSH] {0}:{1}\"", ".", "format", "(", "task_output", ".", "...
Returns a task's output as a string
[ "Returns", "a", "task", "s", "output", "as", "a", "string" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/deps.py#L94-L111
31,731
spotify/luigi
luigi/tools/range.py
_constrain_glob
def _constrain_glob(glob, paths, limit=5): """ Tweaks glob into a list of more specific globs that together still cover paths and not too much extra. Saves us minutes long listings for long dataset histories. Specifically, in this implementation the leftmost occurrences of "[0-9]" give rise to a few separate globs that each specialize the expression to digits that actually occur in paths. """ def digit_set_wildcard(chars): """ Makes a wildcard expression for the set, a bit readable, e.g. [1-5]. """ chars = sorted(chars) if len(chars) > 1 and ord(chars[-1]) - ord(chars[0]) == len(chars) - 1: return '[%s-%s]' % (chars[0], chars[-1]) else: return '[%s]' % ''.join(chars) current = {glob: paths} while True: pos = list(current.keys())[0].find('[0-9]') if pos == -1: # no wildcard expressions left to specialize in the glob return list(current.keys()) char_sets = {} for g, p in six.iteritems(current): char_sets[g] = sorted({path[pos] for path in p}) if sum(len(s) for s in char_sets.values()) > limit: return [g.replace('[0-9]', digit_set_wildcard(char_sets[g]), 1) for g in current] for g, s in six.iteritems(char_sets): for c in s: new_glob = g.replace('[0-9]', c, 1) new_paths = list(filter(lambda p: p[pos] == c, current[g])) current[new_glob] = new_paths del current[g]
python
def _constrain_glob(glob, paths, limit=5): """ Tweaks glob into a list of more specific globs that together still cover paths and not too much extra. Saves us minutes long listings for long dataset histories. Specifically, in this implementation the leftmost occurrences of "[0-9]" give rise to a few separate globs that each specialize the expression to digits that actually occur in paths. """ def digit_set_wildcard(chars): """ Makes a wildcard expression for the set, a bit readable, e.g. [1-5]. """ chars = sorted(chars) if len(chars) > 1 and ord(chars[-1]) - ord(chars[0]) == len(chars) - 1: return '[%s-%s]' % (chars[0], chars[-1]) else: return '[%s]' % ''.join(chars) current = {glob: paths} while True: pos = list(current.keys())[0].find('[0-9]') if pos == -1: # no wildcard expressions left to specialize in the glob return list(current.keys()) char_sets = {} for g, p in six.iteritems(current): char_sets[g] = sorted({path[pos] for path in p}) if sum(len(s) for s in char_sets.values()) > limit: return [g.replace('[0-9]', digit_set_wildcard(char_sets[g]), 1) for g in current] for g, s in six.iteritems(char_sets): for c in s: new_glob = g.replace('[0-9]', c, 1) new_paths = list(filter(lambda p: p[pos] == c, current[g])) current[new_glob] = new_paths del current[g]
[ "def", "_constrain_glob", "(", "glob", ",", "paths", ",", "limit", "=", "5", ")", ":", "def", "digit_set_wildcard", "(", "chars", ")", ":", "\"\"\"\n Makes a wildcard expression for the set, a bit readable, e.g. [1-5].\n \"\"\"", "chars", "=", "sorted", "(",...
Tweaks glob into a list of more specific globs that together still cover paths and not too much extra. Saves us minutes long listings for long dataset histories. Specifically, in this implementation the leftmost occurrences of "[0-9]" give rise to a few separate globs that each specialize the expression to digits that actually occur in paths.
[ "Tweaks", "glob", "into", "a", "list", "of", "more", "specific", "globs", "that", "together", "still", "cover", "paths", "and", "not", "too", "much", "extra", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L491-L528
31,732
spotify/luigi
luigi/tools/range.py
_get_per_location_glob
def _get_per_location_glob(tasks, outputs, regexes): """ Builds a glob listing existing output paths. Esoteric reverse engineering, but worth it given that (compared to an equivalent contiguousness guarantee by naive complete() checks) requests to the filesystem are cut by orders of magnitude, and users don't even have to retrofit existing tasks anyhow. """ paths = [o.path for o in outputs] # naive, because some matches could be confused by numbers earlier # in path, e.g. /foo/fifa2000k/bar/2000-12-31/00 matches = [r.search(p) for r, p in zip(regexes, paths)] for m, p, t in zip(matches, paths, tasks): if m is None: raise NotImplementedError("Couldn't deduce datehour representation in output path %r of task %s" % (p, t)) n_groups = len(matches[0].groups()) # the most common position of every group is likely # to be conclusive hit or miss positions = [most_common((m.start(i), m.end(i)) for m in matches)[0] for i in range(1, n_groups + 1)] glob = list(paths[0]) # FIXME sanity check that it's the same for all paths for start, end in positions: glob = glob[:start] + ['[0-9]'] * (end - start) + glob[end:] # chop off the last path item # (wouldn't need to if `hadoop fs -ls -d` equivalent were available) return ''.join(glob).rsplit('/', 1)[0]
python
def _get_per_location_glob(tasks, outputs, regexes): """ Builds a glob listing existing output paths. Esoteric reverse engineering, but worth it given that (compared to an equivalent contiguousness guarantee by naive complete() checks) requests to the filesystem are cut by orders of magnitude, and users don't even have to retrofit existing tasks anyhow. """ paths = [o.path for o in outputs] # naive, because some matches could be confused by numbers earlier # in path, e.g. /foo/fifa2000k/bar/2000-12-31/00 matches = [r.search(p) for r, p in zip(regexes, paths)] for m, p, t in zip(matches, paths, tasks): if m is None: raise NotImplementedError("Couldn't deduce datehour representation in output path %r of task %s" % (p, t)) n_groups = len(matches[0].groups()) # the most common position of every group is likely # to be conclusive hit or miss positions = [most_common((m.start(i), m.end(i)) for m in matches)[0] for i in range(1, n_groups + 1)] glob = list(paths[0]) # FIXME sanity check that it's the same for all paths for start, end in positions: glob = glob[:start] + ['[0-9]'] * (end - start) + glob[end:] # chop off the last path item # (wouldn't need to if `hadoop fs -ls -d` equivalent were available) return ''.join(glob).rsplit('/', 1)[0]
[ "def", "_get_per_location_glob", "(", "tasks", ",", "outputs", ",", "regexes", ")", ":", "paths", "=", "[", "o", ".", "path", "for", "o", "in", "outputs", "]", "# naive, because some matches could be confused by numbers earlier", "# in path, e.g. /foo/fifa2000k/bar/2000-1...
Builds a glob listing existing output paths. Esoteric reverse engineering, but worth it given that (compared to an equivalent contiguousness guarantee by naive complete() checks) requests to the filesystem are cut by orders of magnitude, and users don't even have to retrofit existing tasks anyhow.
[ "Builds", "a", "glob", "listing", "existing", "output", "paths", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L542-L570
31,733
spotify/luigi
luigi/tools/range.py
_list_existing
def _list_existing(filesystem, glob, paths): """ Get all the paths that do in fact exist. Returns a set of all existing paths. Takes a luigi.target.FileSystem object, a str which represents a glob and a list of strings representing paths. """ globs = _constrain_glob(glob, paths) time_start = time.time() listing = [] for g in sorted(globs): logger.debug('Listing %s', g) if filesystem.exists(g): listing.extend(filesystem.listdir(g)) logger.debug('%d %s listings took %f s to return %d items', len(globs), filesystem.__class__.__name__, time.time() - time_start, len(listing)) return set(listing)
python
def _list_existing(filesystem, glob, paths): """ Get all the paths that do in fact exist. Returns a set of all existing paths. Takes a luigi.target.FileSystem object, a str which represents a glob and a list of strings representing paths. """ globs = _constrain_glob(glob, paths) time_start = time.time() listing = [] for g in sorted(globs): logger.debug('Listing %s', g) if filesystem.exists(g): listing.extend(filesystem.listdir(g)) logger.debug('%d %s listings took %f s to return %d items', len(globs), filesystem.__class__.__name__, time.time() - time_start, len(listing)) return set(listing)
[ "def", "_list_existing", "(", "filesystem", ",", "glob", ",", "paths", ")", ":", "globs", "=", "_constrain_glob", "(", "glob", ",", "paths", ")", "time_start", "=", "time", ".", "time", "(", ")", "listing", "=", "[", "]", "for", "g", "in", "sorted", ...
Get all the paths that do in fact exist. Returns a set of all existing paths. Takes a luigi.target.FileSystem object, a str which represents a glob and a list of strings representing paths.
[ "Get", "all", "the", "paths", "that", "do", "in", "fact", "exist", ".", "Returns", "a", "set", "of", "all", "existing", "paths", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L603-L619
31,734
spotify/luigi
luigi/tools/range.py
infer_bulk_complete_from_fs
def infer_bulk_complete_from_fs(datetimes, datetime_to_task, datetime_to_re): """ Efficiently determines missing datetimes by filesystem listing. The current implementation works for the common case of a task writing output to a ``FileSystemTarget`` whose path is built using strftime with format like '...%Y...%m...%d...%H...', without custom ``complete()`` or ``exists()``. (Eventually Luigi could have ranges of completion as first-class citizens. Then this listing business could be factored away/be provided for explicitly in target API or some kind of a history server.) """ filesystems_and_globs_by_location = _get_filesystems_and_globs(datetime_to_task, datetime_to_re) paths_by_datetime = [[o.path for o in flatten_output(datetime_to_task(d))] for d in datetimes] listing = set() for (f, g), p in zip(filesystems_and_globs_by_location, zip(*paths_by_datetime)): # transposed, so here we're iterating over logical outputs, not datetimes listing |= _list_existing(f, g, p) # quickly learn everything that's missing missing_datetimes = [] for d, p in zip(datetimes, paths_by_datetime): if not set(p) <= listing: missing_datetimes.append(d) return missing_datetimes
python
def infer_bulk_complete_from_fs(datetimes, datetime_to_task, datetime_to_re): """ Efficiently determines missing datetimes by filesystem listing. The current implementation works for the common case of a task writing output to a ``FileSystemTarget`` whose path is built using strftime with format like '...%Y...%m...%d...%H...', without custom ``complete()`` or ``exists()``. (Eventually Luigi could have ranges of completion as first-class citizens. Then this listing business could be factored away/be provided for explicitly in target API or some kind of a history server.) """ filesystems_and_globs_by_location = _get_filesystems_and_globs(datetime_to_task, datetime_to_re) paths_by_datetime = [[o.path for o in flatten_output(datetime_to_task(d))] for d in datetimes] listing = set() for (f, g), p in zip(filesystems_and_globs_by_location, zip(*paths_by_datetime)): # transposed, so here we're iterating over logical outputs, not datetimes listing |= _list_existing(f, g, p) # quickly learn everything that's missing missing_datetimes = [] for d, p in zip(datetimes, paths_by_datetime): if not set(p) <= listing: missing_datetimes.append(d) return missing_datetimes
[ "def", "infer_bulk_complete_from_fs", "(", "datetimes", ",", "datetime_to_task", ",", "datetime_to_re", ")", ":", "filesystems_and_globs_by_location", "=", "_get_filesystems_and_globs", "(", "datetime_to_task", ",", "datetime_to_re", ")", "paths_by_datetime", "=", "[", "[",...
Efficiently determines missing datetimes by filesystem listing. The current implementation works for the common case of a task writing output to a ``FileSystemTarget`` whose path is built using strftime with format like '...%Y...%m...%d...%H...', without custom ``complete()`` or ``exists()``. (Eventually Luigi could have ranges of completion as first-class citizens. Then this listing business could be factored away/be provided for explicitly in target API or some kind of a history server.)
[ "Efficiently", "determines", "missing", "datetimes", "by", "filesystem", "listing", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L622-L647
31,735
spotify/luigi
luigi/tools/range.py
RangeBase.of_cls
def of_cls(self): """ DONT USE. Will be deleted soon. Use ``self.of``! """ if isinstance(self.of, six.string_types): warnings.warn('When using Range programatically, dont pass "of" param as string!') return Register.get_task_cls(self.of) return self.of
python
def of_cls(self): """ DONT USE. Will be deleted soon. Use ``self.of``! """ if isinstance(self.of, six.string_types): warnings.warn('When using Range programatically, dont pass "of" param as string!') return Register.get_task_cls(self.of) return self.of
[ "def", "of_cls", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "of", ",", "six", ".", "string_types", ")", ":", "warnings", ".", "warn", "(", "'When using Range programatically, dont pass \"of\" param as string!'", ")", "return", "Register", ".", ...
DONT USE. Will be deleted soon. Use ``self.of``!
[ "DONT", "USE", ".", "Will", "be", "deleted", "soon", ".", "Use", "self", ".", "of", "!" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L117-L124
31,736
spotify/luigi
luigi/tools/range.py
RangeBase.missing_datetimes
def missing_datetimes(self, finite_datetimes): """ Override in subclasses to do bulk checks. Returns a sorted list. This is a conservative base implementation that brutally checks completeness, instance by instance. Inadvisable as it may be slow. """ return [d for d in finite_datetimes if not self._instantiate_task_cls(self.datetime_to_parameter(d)).complete()]
python
def missing_datetimes(self, finite_datetimes): """ Override in subclasses to do bulk checks. Returns a sorted list. This is a conservative base implementation that brutally checks completeness, instance by instance. Inadvisable as it may be slow. """ return [d for d in finite_datetimes if not self._instantiate_task_cls(self.datetime_to_parameter(d)).complete()]
[ "def", "missing_datetimes", "(", "self", ",", "finite_datetimes", ")", ":", "return", "[", "d", "for", "d", "in", "finite_datetimes", "if", "not", "self", ".", "_instantiate_task_cls", "(", "self", ".", "datetime_to_parameter", "(", "d", ")", ")", ".", "comp...
Override in subclasses to do bulk checks. Returns a sorted list. This is a conservative base implementation that brutally checks completeness, instance by instance. Inadvisable as it may be slow.
[ "Override", "in", "subclasses", "to", "do", "bulk", "checks", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L255-L265
31,737
spotify/luigi
luigi/tools/range.py
RangeDailyBase.parameters_to_datetime
def parameters_to_datetime(self, p): """ Given a dictionary of parameters, will extract the ranged task parameter value """ dt = p[self._param_name] return datetime(dt.year, dt.month, dt.day)
python
def parameters_to_datetime(self, p): """ Given a dictionary of parameters, will extract the ranged task parameter value """ dt = p[self._param_name] return datetime(dt.year, dt.month, dt.day)
[ "def", "parameters_to_datetime", "(", "self", ",", "p", ")", ":", "dt", "=", "p", "[", "self", ".", "_param_name", "]", "return", "datetime", "(", "dt", ".", "year", ",", "dt", ".", "month", ",", "dt", ".", "day", ")" ]
Given a dictionary of parameters, will extract the ranged task parameter value
[ "Given", "a", "dictionary", "of", "parameters", "will", "extract", "the", "ranged", "task", "parameter", "value" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L316-L321
31,738
spotify/luigi
luigi/tools/range.py
RangeDailyBase.finite_datetimes
def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to turn of day. """ date_start = datetime(finite_start.year, finite_start.month, finite_start.day) dates = [] for i in itertools.count(): t = date_start + timedelta(days=i) if t >= finite_stop: return dates if t >= finite_start: dates.append(t)
python
def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to turn of day. """ date_start = datetime(finite_start.year, finite_start.month, finite_start.day) dates = [] for i in itertools.count(): t = date_start + timedelta(days=i) if t >= finite_stop: return dates if t >= finite_start: dates.append(t)
[ "def", "finite_datetimes", "(", "self", ",", "finite_start", ",", "finite_stop", ")", ":", "date_start", "=", "datetime", "(", "finite_start", ".", "year", ",", "finite_start", ".", "month", ",", "finite_start", ".", "day", ")", "dates", "=", "[", "]", "fo...
Simply returns the points in time that correspond to turn of day.
[ "Simply", "returns", "the", "points", "in", "time", "that", "correspond", "to", "turn", "of", "day", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L329-L340
31,739
spotify/luigi
luigi/tools/range.py
RangeHourlyBase.finite_datetimes
def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to whole hours. """ datehour_start = datetime(finite_start.year, finite_start.month, finite_start.day, finite_start.hour) datehours = [] for i in itertools.count(): t = datehour_start + timedelta(hours=i) if t >= finite_stop: return datehours if t >= finite_start: datehours.append(t)
python
def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to whole hours. """ datehour_start = datetime(finite_start.year, finite_start.month, finite_start.day, finite_start.hour) datehours = [] for i in itertools.count(): t = datehour_start + timedelta(hours=i) if t >= finite_stop: return datehours if t >= finite_start: datehours.append(t)
[ "def", "finite_datetimes", "(", "self", ",", "finite_start", ",", "finite_stop", ")", ":", "datehour_start", "=", "datetime", "(", "finite_start", ".", "year", ",", "finite_start", ".", "month", ",", "finite_start", ".", "day", ",", "finite_start", ".", "hour"...
Simply returns the points in time that correspond to whole hours.
[ "Simply", "returns", "the", "points", "in", "time", "that", "correspond", "to", "whole", "hours", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L391-L402
31,740
spotify/luigi
luigi/tools/range.py
RangeByMinutesBase.finite_datetimes
def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to a whole number of minutes intervals. """ # Validate that the minutes_interval can divide 60 and it is greater than 0 and lesser than 60 if not (0 < self.minutes_interval < 60): raise ParameterException('minutes-interval must be within 0..60') if (60 / self.minutes_interval) * self.minutes_interval != 60: raise ParameterException('minutes-interval does not evenly divide 60') # start of a complete interval, e.g. 20:13 and the interval is 5 -> 20:10 start_minute = int(finite_start.minute/self.minutes_interval)*self.minutes_interval datehour_start = datetime( year=finite_start.year, month=finite_start.month, day=finite_start.day, hour=finite_start.hour, minute=start_minute) datehours = [] for i in itertools.count(): t = datehour_start + timedelta(minutes=i*self.minutes_interval) if t >= finite_stop: return datehours if t >= finite_start: datehours.append(t)
python
def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to a whole number of minutes intervals. """ # Validate that the minutes_interval can divide 60 and it is greater than 0 and lesser than 60 if not (0 < self.minutes_interval < 60): raise ParameterException('minutes-interval must be within 0..60') if (60 / self.minutes_interval) * self.minutes_interval != 60: raise ParameterException('minutes-interval does not evenly divide 60') # start of a complete interval, e.g. 20:13 and the interval is 5 -> 20:10 start_minute = int(finite_start.minute/self.minutes_interval)*self.minutes_interval datehour_start = datetime( year=finite_start.year, month=finite_start.month, day=finite_start.day, hour=finite_start.hour, minute=start_minute) datehours = [] for i in itertools.count(): t = datehour_start + timedelta(minutes=i*self.minutes_interval) if t >= finite_stop: return datehours if t >= finite_start: datehours.append(t)
[ "def", "finite_datetimes", "(", "self", ",", "finite_start", ",", "finite_stop", ")", ":", "# Validate that the minutes_interval can divide 60 and it is greater than 0 and lesser than 60", "if", "not", "(", "0", "<", "self", ".", "minutes_interval", "<", "60", ")", ":", ...
Simply returns the points in time that correspond to a whole number of minutes intervals.
[ "Simply", "returns", "the", "points", "in", "time", "that", "correspond", "to", "a", "whole", "number", "of", "minutes", "intervals", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L462-L485
31,741
spotify/luigi
luigi/tools/range.py
RangeMonthly.finite_datetimes
def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to turn of month. """ start_date = self._align(finite_start) aligned_stop = self._align(finite_stop) dates = [] for m in itertools.count(): t = start_date + relativedelta(months=m) if t >= aligned_stop: return dates if t >= finite_start: dates.append(t)
python
def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to turn of month. """ start_date = self._align(finite_start) aligned_stop = self._align(finite_stop) dates = [] for m in itertools.count(): t = start_date + relativedelta(months=m) if t >= aligned_stop: return dates if t >= finite_start: dates.append(t)
[ "def", "finite_datetimes", "(", "self", ",", "finite_start", ",", "finite_stop", ")", ":", "start_date", "=", "self", ".", "_align", "(", "finite_start", ")", "aligned_stop", "=", "self", ".", "_align", "(", "finite_stop", ")", "dates", "=", "[", "]", "for...
Simply returns the points in time that correspond to turn of month.
[ "Simply", "returns", "the", "points", "in", "time", "that", "correspond", "to", "turn", "of", "month", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L709-L721
31,742
spotify/luigi
luigi/contrib/mssqldb.py
MSSqlTarget.connect
def connect(self): """ Create a SQL Server connection and return a connection object """ connection = _mssql.connect(user=self.user, password=self.password, server=self.host, port=self.port, database=self.database) return connection
python
def connect(self): """ Create a SQL Server connection and return a connection object """ connection = _mssql.connect(user=self.user, password=self.password, server=self.host, port=self.port, database=self.database) return connection
[ "def", "connect", "(", "self", ")", ":", "connection", "=", "_mssql", ".", "connect", "(", "user", "=", "self", ".", "user", ",", "password", "=", "self", ".", "password", ",", "server", "=", "self", ".", "host", ",", "port", "=", "self", ".", "por...
Create a SQL Server connection and return a connection object
[ "Create", "a", "SQL", "Server", "connection", "and", "return", "a", "connection", "object" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/mssqldb.py#L119-L128
31,743
spotify/luigi
luigi/contrib/opener.py
OpenerRegistry.get_opener
def get_opener(self, name): """Retrieve an opener for the given protocol :param name: name of the opener to open :type name: string :raises NoOpenerError: if no opener has been registered of that name """ if name not in self.registry: raise NoOpenerError("No opener for %s" % name) index = self.registry[name] return self.openers[index]
python
def get_opener(self, name): """Retrieve an opener for the given protocol :param name: name of the opener to open :type name: string :raises NoOpenerError: if no opener has been registered of that name """ if name not in self.registry: raise NoOpenerError("No opener for %s" % name) index = self.registry[name] return self.openers[index]
[ "def", "get_opener", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "registry", ":", "raise", "NoOpenerError", "(", "\"No opener for %s\"", "%", "name", ")", "index", "=", "self", ".", "registry", "[", "name", "]", "return", ...
Retrieve an opener for the given protocol :param name: name of the opener to open :type name: string :raises NoOpenerError: if no opener has been registered of that name
[ "Retrieve", "an", "opener", "for", "the", "given", "protocol" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/opener.py#L89-L100
31,744
spotify/luigi
luigi/contrib/opener.py
OpenerRegistry.add
def add(self, opener): """Adds an opener to the registry :param opener: Opener object :type opener: Opener inherited object """ index = len(self.openers) self.openers[index] = opener for name in opener.names: self.registry[name] = index
python
def add(self, opener): """Adds an opener to the registry :param opener: Opener object :type opener: Opener inherited object """ index = len(self.openers) self.openers[index] = opener for name in opener.names: self.registry[name] = index
[ "def", "add", "(", "self", ",", "opener", ")", ":", "index", "=", "len", "(", "self", ".", "openers", ")", "self", ".", "openers", "[", "index", "]", "=", "opener", "for", "name", "in", "opener", ".", "names", ":", "self", ".", "registry", "[", "...
Adds an opener to the registry :param opener: Opener object :type opener: Opener inherited object
[ "Adds", "an", "opener", "to", "the", "registry" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/opener.py#L102-L113
31,745
spotify/luigi
luigi/contrib/opener.py
OpenerRegistry.open
def open(self, target_uri, **kwargs): """Open target uri. :param target_uri: Uri to open :type target_uri: string :returns: Target object """ target = urlsplit(target_uri, scheme=self.default_opener) opener = self.get_opener(target.scheme) query = opener.conform_query(target.query) target = opener.get_target( target.scheme, target.path, target.fragment, target.username, target.password, target.hostname, target.port, query, **kwargs ) target.opener_path = target_uri return target
python
def open(self, target_uri, **kwargs): """Open target uri. :param target_uri: Uri to open :type target_uri: string :returns: Target object """ target = urlsplit(target_uri, scheme=self.default_opener) opener = self.get_opener(target.scheme) query = opener.conform_query(target.query) target = opener.get_target( target.scheme, target.path, target.fragment, target.username, target.password, target.hostname, target.port, query, **kwargs ) target.opener_path = target_uri return target
[ "def", "open", "(", "self", ",", "target_uri", ",", "*", "*", "kwargs", ")", ":", "target", "=", "urlsplit", "(", "target_uri", ",", "scheme", "=", "self", ".", "default_opener", ")", "opener", "=", "self", ".", "get_opener", "(", "target", ".", "schem...
Open target uri. :param target_uri: Uri to open :type target_uri: string :returns: Target object
[ "Open", "target", "uri", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/opener.py#L115-L142
31,746
spotify/luigi
luigi/contrib/opener.py
Opener.conform_query
def conform_query(cls, query): """Converts the query string from a target uri, uses cls.allowed_kwargs, and cls.filter_kwargs to drive logic. :param query: Unparsed query string :type query: urllib.parse.unsplit(uri).query :returns: Dictionary of parsed values, everything in cls.allowed_kwargs with values set to True will be parsed as json strings. """ query = parse_qs(query, keep_blank_values=True) # Remove any unexpected keywords from the query string. if cls.filter_kwargs: query = {x: y for x, y in query.items() if x in cls.allowed_kwargs} for key, vals in query.items(): # Multiple values of the same name could be passed use first # Also params without strings will be treated as true values if cls.allowed_kwargs.get(key, False): val = json.loads(vals[0] or 'true') else: val = vals[0] or 'true' query[key] = val return query
python
def conform_query(cls, query): """Converts the query string from a target uri, uses cls.allowed_kwargs, and cls.filter_kwargs to drive logic. :param query: Unparsed query string :type query: urllib.parse.unsplit(uri).query :returns: Dictionary of parsed values, everything in cls.allowed_kwargs with values set to True will be parsed as json strings. """ query = parse_qs(query, keep_blank_values=True) # Remove any unexpected keywords from the query string. if cls.filter_kwargs: query = {x: y for x, y in query.items() if x in cls.allowed_kwargs} for key, vals in query.items(): # Multiple values of the same name could be passed use first # Also params without strings will be treated as true values if cls.allowed_kwargs.get(key, False): val = json.loads(vals[0] or 'true') else: val = vals[0] or 'true' query[key] = val return query
[ "def", "conform_query", "(", "cls", ",", "query", ")", ":", "query", "=", "parse_qs", "(", "query", ",", "keep_blank_values", "=", "True", ")", "# Remove any unexpected keywords from the query string.", "if", "cls", ".", "filter_kwargs", ":", "query", "=", "{", ...
Converts the query string from a target uri, uses cls.allowed_kwargs, and cls.filter_kwargs to drive logic. :param query: Unparsed query string :type query: urllib.parse.unsplit(uri).query :returns: Dictionary of parsed values, everything in cls.allowed_kwargs with values set to True will be parsed as json strings.
[ "Converts", "the", "query", "string", "from", "a", "target", "uri", "uses", "cls", ".", "allowed_kwargs", "and", "cls", ".", "filter_kwargs", "to", "drive", "logic", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/opener.py#L157-L183
31,747
spotify/luigi
luigi/interface.py
run
def run(*args, **kwargs): """ Please dont use. Instead use `luigi` binary. Run from cmdline using argparse. :param use_dynamic_argparse: Deprecated and ignored """ luigi_run_result = _run(*args, **kwargs) return luigi_run_result if kwargs.get('detailed_summary') else luigi_run_result.scheduling_succeeded
python
def run(*args, **kwargs): """ Please dont use. Instead use `luigi` binary. Run from cmdline using argparse. :param use_dynamic_argparse: Deprecated and ignored """ luigi_run_result = _run(*args, **kwargs) return luigi_run_result if kwargs.get('detailed_summary') else luigi_run_result.scheduling_succeeded
[ "def", "run", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "luigi_run_result", "=", "_run", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "luigi_run_result", "if", "kwargs", ".", "get", "(", "'detailed_summary'", ")", "else", "luigi...
Please dont use. Instead use `luigi` binary. Run from cmdline using argparse. :param use_dynamic_argparse: Deprecated and ignored
[ "Please", "dont", "use", ".", "Instead", "use", "luigi", "binary", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/interface.py#L186-L195
31,748
spotify/luigi
luigi/interface.py
build
def build(tasks, worker_scheduler_factory=None, detailed_summary=False, **env_params): """ Run internally, bypassing the cmdline parsing. Useful if you have some luigi code that you want to run internally. Example: .. code-block:: python luigi.build([MyTask1(), MyTask2()], local_scheduler=True) One notable difference is that `build` defaults to not using the identical process lock. Otherwise, `build` would only be callable once from each process. :param tasks: :param worker_scheduler_factory: :param env_params: :return: True if there were no scheduling errors, even if tasks may fail. """ if "no_lock" not in env_params: env_params["no_lock"] = True luigi_run_result = _schedule_and_run(tasks, worker_scheduler_factory, override_defaults=env_params) return luigi_run_result if detailed_summary else luigi_run_result.scheduling_succeeded
python
def build(tasks, worker_scheduler_factory=None, detailed_summary=False, **env_params): """ Run internally, bypassing the cmdline parsing. Useful if you have some luigi code that you want to run internally. Example: .. code-block:: python luigi.build([MyTask1(), MyTask2()], local_scheduler=True) One notable difference is that `build` defaults to not using the identical process lock. Otherwise, `build` would only be callable once from each process. :param tasks: :param worker_scheduler_factory: :param env_params: :return: True if there were no scheduling errors, even if tasks may fail. """ if "no_lock" not in env_params: env_params["no_lock"] = True luigi_run_result = _schedule_and_run(tasks, worker_scheduler_factory, override_defaults=env_params) return luigi_run_result if detailed_summary else luigi_run_result.scheduling_succeeded
[ "def", "build", "(", "tasks", ",", "worker_scheduler_factory", "=", "None", ",", "detailed_summary", "=", "False", ",", "*", "*", "env_params", ")", ":", "if", "\"no_lock\"", "not", "in", "env_params", ":", "env_params", "[", "\"no_lock\"", "]", "=", "True",...
Run internally, bypassing the cmdline parsing. Useful if you have some luigi code that you want to run internally. Example: .. code-block:: python luigi.build([MyTask1(), MyTask2()], local_scheduler=True) One notable difference is that `build` defaults to not using the identical process lock. Otherwise, `build` would only be callable once from each process. :param tasks: :param worker_scheduler_factory: :param env_params: :return: True if there were no scheduling errors, even if tasks may fail.
[ "Run", "internally", "bypassing", "the", "cmdline", "parsing", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/interface.py#L214-L238
31,749
spotify/luigi
luigi/contrib/hadoop_jar.py
fix_paths
def fix_paths(job): """ Coerce input arguments to use temporary files when used for output. Return a list of temporary file pairs (tmpfile, destination path) and a list of arguments. Converts each HdfsTarget to a string for the path. """ tmp_files = [] args = [] for x in job.args(): if isinstance(x, luigi.contrib.hdfs.HdfsTarget): # input/output if x.exists() or not job.atomic_output(): # input args.append(x.path) else: # output x_path_no_slash = x.path[:-1] if x.path[-1] == '/' else x.path y = luigi.contrib.hdfs.HdfsTarget(x_path_no_slash + '-luigi-tmp-%09d' % random.randrange(0, 1e10)) tmp_files.append((y, x_path_no_slash)) logger.info('Using temp path: %s for path %s', y.path, x.path) args.append(y.path) else: try: # hopefully the target has a path to use args.append(x.path) except AttributeError: # if there's no path then hope converting it to a string will work args.append(str(x)) return (tmp_files, args)
python
def fix_paths(job): """ Coerce input arguments to use temporary files when used for output. Return a list of temporary file pairs (tmpfile, destination path) and a list of arguments. Converts each HdfsTarget to a string for the path. """ tmp_files = [] args = [] for x in job.args(): if isinstance(x, luigi.contrib.hdfs.HdfsTarget): # input/output if x.exists() or not job.atomic_output(): # input args.append(x.path) else: # output x_path_no_slash = x.path[:-1] if x.path[-1] == '/' else x.path y = luigi.contrib.hdfs.HdfsTarget(x_path_no_slash + '-luigi-tmp-%09d' % random.randrange(0, 1e10)) tmp_files.append((y, x_path_no_slash)) logger.info('Using temp path: %s for path %s', y.path, x.path) args.append(y.path) else: try: # hopefully the target has a path to use args.append(x.path) except AttributeError: # if there's no path then hope converting it to a string will work args.append(str(x)) return (tmp_files, args)
[ "def", "fix_paths", "(", "job", ")", ":", "tmp_files", "=", "[", "]", "args", "=", "[", "]", "for", "x", "in", "job", ".", "args", "(", ")", ":", "if", "isinstance", "(", "x", ",", "luigi", ".", "contrib", ".", "hdfs", ".", "HdfsTarget", ")", "...
Coerce input arguments to use temporary files when used for output. Return a list of temporary file pairs (tmpfile, destination path) and a list of arguments. Converts each HdfsTarget to a string for the path.
[ "Coerce", "input", "arguments", "to", "use", "temporary", "files", "when", "used", "for", "output", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop_jar.py#L33-L62
31,750
spotify/luigi
luigi/contrib/batch.py
BatchClient.get_active_queue
def get_active_queue(self): """Get name of first active job queue""" # Get dict of active queues keyed by name queues = {q['jobQueueName']: q for q in self._client.describe_job_queues()['jobQueues'] if q['state'] == 'ENABLED' and q['status'] == 'VALID'} if not queues: raise Exception('No job queues with state=ENABLED and status=VALID') # Pick the first queue as default return list(queues.keys())[0]
python
def get_active_queue(self): """Get name of first active job queue""" # Get dict of active queues keyed by name queues = {q['jobQueueName']: q for q in self._client.describe_job_queues()['jobQueues'] if q['state'] == 'ENABLED' and q['status'] == 'VALID'} if not queues: raise Exception('No job queues with state=ENABLED and status=VALID') # Pick the first queue as default return list(queues.keys())[0]
[ "def", "get_active_queue", "(", "self", ")", ":", "# Get dict of active queues keyed by name", "queues", "=", "{", "q", "[", "'jobQueueName'", "]", ":", "q", "for", "q", "in", "self", ".", "_client", ".", "describe_job_queues", "(", ")", "[", "'jobQueues'", "]...
Get name of first active job queue
[ "Get", "name", "of", "first", "active", "job", "queue" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/batch.py#L96-L106
31,751
spotify/luigi
luigi/contrib/batch.py
BatchClient.get_job_id_from_name
def get_job_id_from_name(self, job_name): """Retrieve the first job ID matching the given name""" jobs = self._client.list_jobs(jobQueue=self._queue, jobStatus='RUNNING')['jobSummaryList'] matching_jobs = [job for job in jobs if job['jobName'] == job_name] if matching_jobs: return matching_jobs[0]['jobId']
python
def get_job_id_from_name(self, job_name): """Retrieve the first job ID matching the given name""" jobs = self._client.list_jobs(jobQueue=self._queue, jobStatus='RUNNING')['jobSummaryList'] matching_jobs = [job for job in jobs if job['jobName'] == job_name] if matching_jobs: return matching_jobs[0]['jobId']
[ "def", "get_job_id_from_name", "(", "self", ",", "job_name", ")", ":", "jobs", "=", "self", ".", "_client", ".", "list_jobs", "(", "jobQueue", "=", "self", ".", "_queue", ",", "jobStatus", "=", "'RUNNING'", ")", "[", "'jobSummaryList'", "]", "matching_jobs",...
Retrieve the first job ID matching the given name
[ "Retrieve", "the", "first", "job", "ID", "matching", "the", "given", "name" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/batch.py#L108-L113
31,752
spotify/luigi
luigi/contrib/batch.py
BatchClient.get_logs
def get_logs(self, log_stream_name, get_last=50): """Retrieve log stream from CloudWatch""" response = self._log_client.get_log_events( logGroupName='/aws/batch/job', logStreamName=log_stream_name, startFromHead=False) events = response['events'] return '\n'.join(e['message'] for e in events[-get_last:])
python
def get_logs(self, log_stream_name, get_last=50): """Retrieve log stream from CloudWatch""" response = self._log_client.get_log_events( logGroupName='/aws/batch/job', logStreamName=log_stream_name, startFromHead=False) events = response['events'] return '\n'.join(e['message'] for e in events[-get_last:])
[ "def", "get_logs", "(", "self", ",", "log_stream_name", ",", "get_last", "=", "50", ")", ":", "response", "=", "self", ".", "_log_client", ".", "get_log_events", "(", "logGroupName", "=", "'/aws/batch/job'", ",", "logStreamName", "=", "log_stream_name", ",", "...
Retrieve log stream from CloudWatch
[ "Retrieve", "log", "stream", "from", "CloudWatch" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/batch.py#L132-L139
31,753
spotify/luigi
luigi/contrib/batch.py
BatchClient.submit_job
def submit_job(self, job_definition, parameters, job_name=None, queue=None): """Wrap submit_job with useful defaults""" if job_name is None: job_name = _random_id() response = self._client.submit_job( jobName=job_name, jobQueue=queue or self.get_active_queue(), jobDefinition=job_definition, parameters=parameters ) return response['jobId']
python
def submit_job(self, job_definition, parameters, job_name=None, queue=None): """Wrap submit_job with useful defaults""" if job_name is None: job_name = _random_id() response = self._client.submit_job( jobName=job_name, jobQueue=queue or self.get_active_queue(), jobDefinition=job_definition, parameters=parameters ) return response['jobId']
[ "def", "submit_job", "(", "self", ",", "job_definition", ",", "parameters", ",", "job_name", "=", "None", ",", "queue", "=", "None", ")", ":", "if", "job_name", "is", "None", ":", "job_name", "=", "_random_id", "(", ")", "response", "=", "self", ".", "...
Wrap submit_job with useful defaults
[ "Wrap", "submit_job", "with", "useful", "defaults" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/batch.py#L141-L151
31,754
spotify/luigi
luigi/contrib/batch.py
BatchClient.register_job_definition
def register_job_definition(self, json_fpath): """Register a job definition with AWS Batch, using a JSON""" with open(json_fpath) as f: job_def = json.load(f) response = self._client.register_job_definition(**job_def) status_code = response['ResponseMetadata']['HTTPStatusCode'] if status_code != 200: msg = 'Register job definition request received status code {0}:\n{1}' raise Exception(msg.format(status_code, response)) return response
python
def register_job_definition(self, json_fpath): """Register a job definition with AWS Batch, using a JSON""" with open(json_fpath) as f: job_def = json.load(f) response = self._client.register_job_definition(**job_def) status_code = response['ResponseMetadata']['HTTPStatusCode'] if status_code != 200: msg = 'Register job definition request received status code {0}:\n{1}' raise Exception(msg.format(status_code, response)) return response
[ "def", "register_job_definition", "(", "self", ",", "json_fpath", ")", ":", "with", "open", "(", "json_fpath", ")", "as", "f", ":", "job_def", "=", "json", ".", "load", "(", "f", ")", "response", "=", "self", ".", "_client", ".", "register_job_definition",...
Register a job definition with AWS Batch, using a JSON
[ "Register", "a", "job", "definition", "with", "AWS", "Batch", "using", "a", "JSON" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/batch.py#L176-L185
31,755
spotify/luigi
luigi/contrib/bigquery_avro.py
BigQueryLoadAvro._get_input_schema
def _get_input_schema(self): """Arbitrarily picks an object in input and reads the Avro schema from it.""" assert avro, 'avro module required' input_target = flatten(self.input())[0] input_fs = input_target.fs if hasattr(input_target, 'fs') else GCSClient() input_uri = self.source_uris()[0] if '*' in input_uri: file_uris = list(input_fs.list_wildcard(input_uri)) if file_uris: input_uri = file_uris[0] else: raise RuntimeError('No match for ' + input_uri) schema = [] exception_reading_schema = [] def read_schema(fp): # fp contains the file part downloaded thus far. We rely on that the DataFileReader # initializes itself fine as soon as the file header with schema is downloaded, without # requiring the remainder of the file... try: reader = avro.datafile.DataFileReader(fp, avro.io.DatumReader()) schema[:] = [reader.datum_reader.writers_schema] except Exception as e: # Save but assume benign unless schema reading ultimately fails. The benign # exception in case of insufficiently big downloaded file part seems to be: # TypeError('ord() expected a character, but string of length 0 found',). exception_reading_schema[:] = [e] return False return True input_fs.download(input_uri, 64 * 1024, read_schema).close() if not schema: raise exception_reading_schema[0] return schema[0]
python
def _get_input_schema(self): """Arbitrarily picks an object in input and reads the Avro schema from it.""" assert avro, 'avro module required' input_target = flatten(self.input())[0] input_fs = input_target.fs if hasattr(input_target, 'fs') else GCSClient() input_uri = self.source_uris()[0] if '*' in input_uri: file_uris = list(input_fs.list_wildcard(input_uri)) if file_uris: input_uri = file_uris[0] else: raise RuntimeError('No match for ' + input_uri) schema = [] exception_reading_schema = [] def read_schema(fp): # fp contains the file part downloaded thus far. We rely on that the DataFileReader # initializes itself fine as soon as the file header with schema is downloaded, without # requiring the remainder of the file... try: reader = avro.datafile.DataFileReader(fp, avro.io.DatumReader()) schema[:] = [reader.datum_reader.writers_schema] except Exception as e: # Save but assume benign unless schema reading ultimately fails. The benign # exception in case of insufficiently big downloaded file part seems to be: # TypeError('ord() expected a character, but string of length 0 found',). exception_reading_schema[:] = [e] return False return True input_fs.download(input_uri, 64 * 1024, read_schema).close() if not schema: raise exception_reading_schema[0] return schema[0]
[ "def", "_get_input_schema", "(", "self", ")", ":", "assert", "avro", ",", "'avro module required'", "input_target", "=", "flatten", "(", "self", ".", "input", "(", ")", ")", "[", "0", "]", "input_fs", "=", "input_target", ".", "fs", "if", "hasattr", "(", ...
Arbitrarily picks an object in input and reads the Avro schema from it.
[ "Arbitrarily", "picks", "an", "object", "in", "input", "and", "reads", "the", "Avro", "schema", "from", "it", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/bigquery_avro.py#L40-L75
31,756
spotify/luigi
luigi/contrib/bigquery.py
BigQueryClient.table_exists
def table_exists(self, table): """Returns whether the given table exists. :param table: :type table: BQTable """ if not self.dataset_exists(table.dataset): return False try: self.client.tables().get(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id).execute() except http.HttpError as ex: if ex.resp.status == 404: return False raise return True
python
def table_exists(self, table): """Returns whether the given table exists. :param table: :type table: BQTable """ if not self.dataset_exists(table.dataset): return False try: self.client.tables().get(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id).execute() except http.HttpError as ex: if ex.resp.status == 404: return False raise return True
[ "def", "table_exists", "(", "self", ",", "table", ")", ":", "if", "not", "self", ".", "dataset_exists", "(", "table", ".", "dataset", ")", ":", "return", "False", "try", ":", "self", ".", "client", ".", "tables", "(", ")", ".", "get", "(", "projectId...
Returns whether the given table exists. :param table: :type table: BQTable
[ "Returns", "whether", "the", "given", "table", "exists", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/bigquery.py#L157-L175
31,757
spotify/luigi
luigi/contrib/bigquery.py
BigQueryClient.make_dataset
def make_dataset(self, dataset, raise_if_exists=False, body=None): """Creates a new dataset with the default permissions. :param dataset: :type dataset: BQDataset :param raise_if_exists: whether to raise an exception if the dataset already exists. :raises luigi.target.FileAlreadyExists: if raise_if_exists=True and the dataset exists """ if body is None: body = {} try: # Construct a message body in the format required by # https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.datasets.html#insert body['datasetReference'] = { 'projectId': dataset.project_id, 'datasetId': dataset.dataset_id } if dataset.location is not None: body['location'] = dataset.location self.client.datasets().insert(projectId=dataset.project_id, body=body).execute() except http.HttpError as ex: if ex.resp.status == 409: if raise_if_exists: raise luigi.target.FileAlreadyExists() else: raise
python
def make_dataset(self, dataset, raise_if_exists=False, body=None): """Creates a new dataset with the default permissions. :param dataset: :type dataset: BQDataset :param raise_if_exists: whether to raise an exception if the dataset already exists. :raises luigi.target.FileAlreadyExists: if raise_if_exists=True and the dataset exists """ if body is None: body = {} try: # Construct a message body in the format required by # https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.datasets.html#insert body['datasetReference'] = { 'projectId': dataset.project_id, 'datasetId': dataset.dataset_id } if dataset.location is not None: body['location'] = dataset.location self.client.datasets().insert(projectId=dataset.project_id, body=body).execute() except http.HttpError as ex: if ex.resp.status == 409: if raise_if_exists: raise luigi.target.FileAlreadyExists() else: raise
[ "def", "make_dataset", "(", "self", ",", "dataset", ",", "raise_if_exists", "=", "False", ",", "body", "=", "None", ")", ":", "if", "body", "is", "None", ":", "body", "=", "{", "}", "try", ":", "# Construct a message body in the format required by", "# https:/...
Creates a new dataset with the default permissions. :param dataset: :type dataset: BQDataset :param raise_if_exists: whether to raise an exception if the dataset already exists. :raises luigi.target.FileAlreadyExists: if raise_if_exists=True and the dataset exists
[ "Creates", "a", "new", "dataset", "with", "the", "default", "permissions", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/bigquery.py#L177-L204
31,758
spotify/luigi
luigi/contrib/bigquery.py
BigQueryClient.delete_table
def delete_table(self, table): """Deletes a table, if it exists. :param table: :type table: BQTable """ if not self.table_exists(table): return self.client.tables().delete(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id).execute()
python
def delete_table(self, table): """Deletes a table, if it exists. :param table: :type table: BQTable """ if not self.table_exists(table): return self.client.tables().delete(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id).execute()
[ "def", "delete_table", "(", "self", ",", "table", ")", ":", "if", "not", "self", ".", "table_exists", "(", "table", ")", ":", "return", "self", ".", "client", ".", "tables", "(", ")", ".", "delete", "(", "projectId", "=", "table", ".", "project_id", ...
Deletes a table, if it exists. :param table: :type table: BQTable
[ "Deletes", "a", "table", "if", "it", "exists", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/bigquery.py#L221-L233
31,759
spotify/luigi
luigi/contrib/bigquery.py
BigQueryClient.list_datasets
def list_datasets(self, project_id): """Returns the list of datasets in a given project. :param project_id: :type project_id: str """ request = self.client.datasets().list(projectId=project_id, maxResults=1000) response = request.execute() while response is not None: for ds in response.get('datasets', []): yield ds['datasetReference']['datasetId'] request = self.client.datasets().list_next(request, response) if request is None: break response = request.execute()
python
def list_datasets(self, project_id): """Returns the list of datasets in a given project. :param project_id: :type project_id: str """ request = self.client.datasets().list(projectId=project_id, maxResults=1000) response = request.execute() while response is not None: for ds in response.get('datasets', []): yield ds['datasetReference']['datasetId'] request = self.client.datasets().list_next(request, response) if request is None: break response = request.execute()
[ "def", "list_datasets", "(", "self", ",", "project_id", ")", ":", "request", "=", "self", ".", "client", ".", "datasets", "(", ")", ".", "list", "(", "projectId", "=", "project_id", ",", "maxResults", "=", "1000", ")", "response", "=", "request", ".", ...
Returns the list of datasets in a given project. :param project_id: :type project_id: str
[ "Returns", "the", "list", "of", "datasets", "in", "a", "given", "project", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/bigquery.py#L235-L254
31,760
spotify/luigi
luigi/contrib/bigquery.py
BigQueryClient.list_tables
def list_tables(self, dataset): """Returns the list of tables in a given dataset. :param dataset: :type dataset: BQDataset """ request = self.client.tables().list(projectId=dataset.project_id, datasetId=dataset.dataset_id, maxResults=1000) response = request.execute() while response is not None: for t in response.get('tables', []): yield t['tableReference']['tableId'] request = self.client.tables().list_next(request, response) if request is None: break response = request.execute()
python
def list_tables(self, dataset): """Returns the list of tables in a given dataset. :param dataset: :type dataset: BQDataset """ request = self.client.tables().list(projectId=dataset.project_id, datasetId=dataset.dataset_id, maxResults=1000) response = request.execute() while response is not None: for t in response.get('tables', []): yield t['tableReference']['tableId'] request = self.client.tables().list_next(request, response) if request is None: break response = request.execute()
[ "def", "list_tables", "(", "self", ",", "dataset", ")", ":", "request", "=", "self", ".", "client", ".", "tables", "(", ")", ".", "list", "(", "projectId", "=", "dataset", ".", "project_id", ",", "datasetId", "=", "dataset", ".", "dataset_id", ",", "ma...
Returns the list of tables in a given dataset. :param dataset: :type dataset: BQDataset
[ "Returns", "the", "list", "of", "tables", "in", "a", "given", "dataset", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/bigquery.py#L256-L276
31,761
spotify/luigi
luigi/contrib/bigquery.py
BigQueryClient.get_view
def get_view(self, table): """Returns the SQL query for a view, or None if it doesn't exist or is not a view. :param table: The table containing the view. :type table: BQTable """ request = self.client.tables().get(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id) try: response = request.execute() except http.HttpError as ex: if ex.resp.status == 404: return None raise return response['view']['query'] if 'view' in response else None
python
def get_view(self, table): """Returns the SQL query for a view, or None if it doesn't exist or is not a view. :param table: The table containing the view. :type table: BQTable """ request = self.client.tables().get(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id) try: response = request.execute() except http.HttpError as ex: if ex.resp.status == 404: return None raise return response['view']['query'] if 'view' in response else None
[ "def", "get_view", "(", "self", ",", "table", ")", ":", "request", "=", "self", ".", "client", ".", "tables", "(", ")", ".", "get", "(", "projectId", "=", "table", ".", "project_id", ",", "datasetId", "=", "table", ".", "dataset_id", ",", "tableId", ...
Returns the SQL query for a view, or None if it doesn't exist or is not a view. :param table: The table containing the view. :type table: BQTable
[ "Returns", "the", "SQL", "query", "for", "a", "view", "or", "None", "if", "it", "doesn", "t", "exist", "or", "is", "not", "a", "view", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/bigquery.py#L278-L296
31,762
spotify/luigi
luigi/contrib/bigquery.py
BigQueryClient.update_view
def update_view(self, table, view): """Updates the SQL query for a view. If the output table exists, it is replaced with the supplied view query. Otherwise a new table is created with this view. :param table: The table to contain the view. :type table: BQTable :param view: The SQL query for the view. :type view: str """ body = { 'tableReference': { 'projectId': table.project_id, 'datasetId': table.dataset_id, 'tableId': table.table_id }, 'view': { 'query': view } } if self.table_exists(table): self.client.tables().update(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id, body=body).execute() else: self.client.tables().insert(projectId=table.project_id, datasetId=table.dataset_id, body=body).execute()
python
def update_view(self, table, view): """Updates the SQL query for a view. If the output table exists, it is replaced with the supplied view query. Otherwise a new table is created with this view. :param table: The table to contain the view. :type table: BQTable :param view: The SQL query for the view. :type view: str """ body = { 'tableReference': { 'projectId': table.project_id, 'datasetId': table.dataset_id, 'tableId': table.table_id }, 'view': { 'query': view } } if self.table_exists(table): self.client.tables().update(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id, body=body).execute() else: self.client.tables().insert(projectId=table.project_id, datasetId=table.dataset_id, body=body).execute()
[ "def", "update_view", "(", "self", ",", "table", ",", "view", ")", ":", "body", "=", "{", "'tableReference'", ":", "{", "'projectId'", ":", "table", ".", "project_id", ",", "'datasetId'", ":", "table", ".", "dataset_id", ",", "'tableId'", ":", "table", "...
Updates the SQL query for a view. If the output table exists, it is replaced with the supplied view query. Otherwise a new table is created with this view. :param table: The table to contain the view. :type table: BQTable :param view: The SQL query for the view. :type view: str
[ "Updates", "the", "SQL", "query", "for", "a", "view", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/bigquery.py#L298-L329
31,763
spotify/luigi
luigi/contrib/bigquery.py
BigQueryClient.run_job
def run_job(self, project_id, body, dataset=None): """Runs a BigQuery "job". See the documentation for the format of body. .. note:: You probably don't need to use this directly. Use the tasks defined below. :param dataset: :type dataset: BQDataset """ if dataset and not self.dataset_exists(dataset): self.make_dataset(dataset) new_job = self.client.jobs().insert(projectId=project_id, body=body).execute() job_id = new_job['jobReference']['jobId'] logger.info('Started import job %s:%s', project_id, job_id) while True: status = self.client.jobs().get(projectId=project_id, jobId=job_id).execute(num_retries=10) if status['status']['state'] == 'DONE': if status['status'].get('errorResult'): raise Exception('BigQuery job failed: {}'.format(status['status']['errorResult'])) return logger.info('Waiting for job %s:%s to complete...', project_id, job_id) time.sleep(5)
python
def run_job(self, project_id, body, dataset=None): """Runs a BigQuery "job". See the documentation for the format of body. .. note:: You probably don't need to use this directly. Use the tasks defined below. :param dataset: :type dataset: BQDataset """ if dataset and not self.dataset_exists(dataset): self.make_dataset(dataset) new_job = self.client.jobs().insert(projectId=project_id, body=body).execute() job_id = new_job['jobReference']['jobId'] logger.info('Started import job %s:%s', project_id, job_id) while True: status = self.client.jobs().get(projectId=project_id, jobId=job_id).execute(num_retries=10) if status['status']['state'] == 'DONE': if status['status'].get('errorResult'): raise Exception('BigQuery job failed: {}'.format(status['status']['errorResult'])) return logger.info('Waiting for job %s:%s to complete...', project_id, job_id) time.sleep(5)
[ "def", "run_job", "(", "self", ",", "project_id", ",", "body", ",", "dataset", "=", "None", ")", ":", "if", "dataset", "and", "not", "self", ".", "dataset_exists", "(", "dataset", ")", ":", "self", ".", "make_dataset", "(", "dataset", ")", "new_job", "...
Runs a BigQuery "job". See the documentation for the format of body. .. note:: You probably don't need to use this directly. Use the tasks defined below. :param dataset: :type dataset: BQDataset
[ "Runs", "a", "BigQuery", "job", ".", "See", "the", "documentation", "for", "the", "format", "of", "body", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/bigquery.py#L331-L355
31,764
spotify/luigi
luigi/contrib/bigquery.py
BigQueryLoadTask.source_uris
def source_uris(self): """The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name.""" return [x.path for x in luigi.task.flatten(self.input())]
python
def source_uris(self): """The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name.""" return [x.path for x in luigi.task.flatten(self.input())]
[ "def", "source_uris", "(", "self", ")", ":", "return", "[", "x", ".", "path", "for", "x", "in", "luigi", ".", "task", ".", "flatten", "(", "self", ".", "input", "(", ")", ")", "]" ]
The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name.
[ "The", "fully", "-", "qualified", "URIs", "that", "point", "to", "your", "data", "in", "Google", "Cloud", "Storage", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/bigquery.py#L487-L491
31,765
spotify/luigi
luigi/contrib/ssh.py
RemoteContext.Popen
def Popen(self, cmd, **kwargs): """ Remote Popen. """ prefixed_cmd = self._prepare_cmd(cmd) return subprocess.Popen(prefixed_cmd, **kwargs)
python
def Popen(self, cmd, **kwargs): """ Remote Popen. """ prefixed_cmd = self._prepare_cmd(cmd) return subprocess.Popen(prefixed_cmd, **kwargs)
[ "def", "Popen", "(", "self", ",", "cmd", ",", "*", "*", "kwargs", ")", ":", "prefixed_cmd", "=", "self", ".", "_prepare_cmd", "(", "cmd", ")", "return", "subprocess", ".", "Popen", "(", "prefixed_cmd", ",", "*", "*", "kwargs", ")" ]
Remote Popen.
[ "Remote", "Popen", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/ssh.py#L116-L121
31,766
spotify/luigi
luigi/contrib/ssh.py
RemoteContext.check_output
def check_output(self, cmd): """ Execute a shell command remotely and return the output. Simplified version of Popen when you only want the output as a string and detect any errors. """ p = self.Popen(cmd, stdout=subprocess.PIPE) output, _ = p.communicate() if p.returncode != 0: raise RemoteCalledProcessError(p.returncode, cmd, self.host, output=output) return output
python
def check_output(self, cmd): """ Execute a shell command remotely and return the output. Simplified version of Popen when you only want the output as a string and detect any errors. """ p = self.Popen(cmd, stdout=subprocess.PIPE) output, _ = p.communicate() if p.returncode != 0: raise RemoteCalledProcessError(p.returncode, cmd, self.host, output=output) return output
[ "def", "check_output", "(", "self", ",", "cmd", ")", ":", "p", "=", "self", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "output", ",", "_", "=", "p", ".", "communicate", "(", ")", "if", "p", ".", "returncode", "!...
Execute a shell command remotely and return the output. Simplified version of Popen when you only want the output as a string and detect any errors.
[ "Execute", "a", "shell", "command", "remotely", "and", "return", "the", "output", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/ssh.py#L123-L133
31,767
spotify/luigi
luigi/contrib/ssh.py
RemoteFileSystem.isdir
def isdir(self, path): """ Return `True` if directory at `path` exist, False otherwise. """ try: self.remote_context.check_output(["test", "-d", path]) except subprocess.CalledProcessError as e: if e.returncode == 1: return False else: raise return True
python
def isdir(self, path): """ Return `True` if directory at `path` exist, False otherwise. """ try: self.remote_context.check_output(["test", "-d", path]) except subprocess.CalledProcessError as e: if e.returncode == 1: return False else: raise return True
[ "def", "isdir", "(", "self", ",", "path", ")", ":", "try", ":", "self", ".", "remote_context", ".", "check_output", "(", "[", "\"test\"", ",", "\"-d\"", ",", "path", "]", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "if", "e"...
Return `True` if directory at `path` exist, False otherwise.
[ "Return", "True", "if", "directory", "at", "path", "exist", "False", "otherwise", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/ssh.py#L184-L195
31,768
spotify/luigi
luigi/lock.py
getpcmd
def getpcmd(pid): """ Returns command of process. :param pid: """ if os.name == "nt": # Use wmic command instead of ps on Windows. cmd = 'wmic path win32_process where ProcessID=%s get Commandline 2> nul' % (pid, ) with os.popen(cmd, 'r') as p: lines = [line for line in p.readlines() if line.strip("\r\n ") != ""] if lines: _, val = lines return val elif sys.platform == "darwin": # Use pgrep instead of /proc on macOS. pidfile = ".%d.pid" % (pid, ) with open(pidfile, 'w') as f: f.write(str(pid)) try: p = Popen(['pgrep', '-lf', '-F', pidfile], stdout=PIPE) stdout, _ = p.communicate() line = stdout.decode('utf8').strip() if line: _, scmd = line.split(' ', 1) return scmd finally: os.unlink(pidfile) else: # Use the /proc filesystem # At least on android there have been some issues with not all # process infos being readable. In these cases using the `ps` command # worked. See the pull request at # https://github.com/spotify/luigi/pull/1876 try: with open('/proc/{0}/cmdline'.format(pid), 'r') as fh: if six.PY3: return fh.read().replace('\0', ' ').rstrip() else: return fh.read().replace('\0', ' ').decode('utf8').rstrip() except IOError: # the system may not allow reading the command line # of a process owned by another user pass # Fallback instead of None, for e.g. Cygwin where -o is an "unknown option" for the ps command: return '[PROCESS_WITH_PID={}]'.format(pid)
python
def getpcmd(pid): """ Returns command of process. :param pid: """ if os.name == "nt": # Use wmic command instead of ps on Windows. cmd = 'wmic path win32_process where ProcessID=%s get Commandline 2> nul' % (pid, ) with os.popen(cmd, 'r') as p: lines = [line for line in p.readlines() if line.strip("\r\n ") != ""] if lines: _, val = lines return val elif sys.platform == "darwin": # Use pgrep instead of /proc on macOS. pidfile = ".%d.pid" % (pid, ) with open(pidfile, 'w') as f: f.write(str(pid)) try: p = Popen(['pgrep', '-lf', '-F', pidfile], stdout=PIPE) stdout, _ = p.communicate() line = stdout.decode('utf8').strip() if line: _, scmd = line.split(' ', 1) return scmd finally: os.unlink(pidfile) else: # Use the /proc filesystem # At least on android there have been some issues with not all # process infos being readable. In these cases using the `ps` command # worked. See the pull request at # https://github.com/spotify/luigi/pull/1876 try: with open('/proc/{0}/cmdline'.format(pid), 'r') as fh: if six.PY3: return fh.read().replace('\0', ' ').rstrip() else: return fh.read().replace('\0', ' ').decode('utf8').rstrip() except IOError: # the system may not allow reading the command line # of a process owned by another user pass # Fallback instead of None, for e.g. Cygwin where -o is an "unknown option" for the ps command: return '[PROCESS_WITH_PID={}]'.format(pid)
[ "def", "getpcmd", "(", "pid", ")", ":", "if", "os", ".", "name", "==", "\"nt\"", ":", "# Use wmic command instead of ps on Windows.", "cmd", "=", "'wmic path win32_process where ProcessID=%s get Commandline 2> nul'", "%", "(", "pid", ",", ")", "with", "os", ".", "po...
Returns command of process. :param pid:
[ "Returns", "command", "of", "process", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/lock.py#L33-L79
31,769
spotify/luigi
luigi/lock.py
acquire_for
def acquire_for(pid_dir, num_available=1, kill_signal=None): """ Makes sure the process is only run once at the same time with the same name. Notice that we since we check the process name, different parameters to the same command can spawn multiple processes at the same time, i.e. running "/usr/bin/my_process" does not prevent anyone from launching "/usr/bin/my_process --foo bar". """ my_pid, my_cmd, pid_file = get_info(pid_dir) # Create a pid file if it does not exist try: os.mkdir(pid_dir) os.chmod(pid_dir, 0o777) except OSError as exc: if exc.errno != errno.EEXIST: raise pass # Let variable "pids" be all pids who exist in the .pid-file who are still # about running the same command. pids = {pid for pid in _read_pids_file(pid_file) if getpcmd(pid) == my_cmd} if kill_signal is not None: for pid in pids: os.kill(pid, kill_signal) print('Sent kill signal to Pids: {}'.format(pids)) # We allow for the killer to progress, yet we don't want these to stack # up! So we only allow it once. num_available += 1 if len(pids) >= num_available: # We are already running under a different pid print('Pid(s) {} already running'.format(pids)) if kill_signal is not None: print('Note: There have (probably) been 1 other "--take-lock"' ' process which continued to run! Probably no need to run' ' this one as well.') return False _write_pids_file(pid_file, pids | {my_pid}) return True
python
def acquire_for(pid_dir, num_available=1, kill_signal=None): """ Makes sure the process is only run once at the same time with the same name. Notice that we since we check the process name, different parameters to the same command can spawn multiple processes at the same time, i.e. running "/usr/bin/my_process" does not prevent anyone from launching "/usr/bin/my_process --foo bar". """ my_pid, my_cmd, pid_file = get_info(pid_dir) # Create a pid file if it does not exist try: os.mkdir(pid_dir) os.chmod(pid_dir, 0o777) except OSError as exc: if exc.errno != errno.EEXIST: raise pass # Let variable "pids" be all pids who exist in the .pid-file who are still # about running the same command. pids = {pid for pid in _read_pids_file(pid_file) if getpcmd(pid) == my_cmd} if kill_signal is not None: for pid in pids: os.kill(pid, kill_signal) print('Sent kill signal to Pids: {}'.format(pids)) # We allow for the killer to progress, yet we don't want these to stack # up! So we only allow it once. num_available += 1 if len(pids) >= num_available: # We are already running under a different pid print('Pid(s) {} already running'.format(pids)) if kill_signal is not None: print('Note: There have (probably) been 1 other "--take-lock"' ' process which continued to run! Probably no need to run' ' this one as well.') return False _write_pids_file(pid_file, pids | {my_pid}) return True
[ "def", "acquire_for", "(", "pid_dir", ",", "num_available", "=", "1", ",", "kill_signal", "=", "None", ")", ":", "my_pid", ",", "my_cmd", ",", "pid_file", "=", "get_info", "(", "pid_dir", ")", "# Create a pid file if it does not exist", "try", ":", "os", ".", ...
Makes sure the process is only run once at the same time with the same name. Notice that we since we check the process name, different parameters to the same command can spawn multiple processes at the same time, i.e. running "/usr/bin/my_process" does not prevent anyone from launching "/usr/bin/my_process --foo bar".
[ "Makes", "sure", "the", "process", "is", "only", "run", "once", "at", "the", "same", "time", "with", "the", "same", "name", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/lock.py#L94-L138
31,770
spotify/luigi
luigi/scheduler.py
Failures.add_failure
def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time)
python
def add_failure(self): """ Add a failure event with the current timestamp. """ failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time)
[ "def", "add_failure", "(", "self", ")", ":", "failure_time", "=", "time", ".", "time", "(", ")", "if", "not", "self", ".", "first_failure_time", ":", "self", ".", "first_failure_time", "=", "failure_time", "self", ".", "failures", ".", "append", "(", "fail...
Add a failure event with the current timestamp.
[ "Add", "a", "failure", "event", "with", "the", "current", "timestamp", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/scheduler.py#L179-L188
31,771
spotify/luigi
luigi/scheduler.py
Failures.num_failures
def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and self.failures[0] < min_time: self.failures.popleft() return len(self.failures)
python
def num_failures(self): """ Return the number of failures in the window. """ min_time = time.time() - self.window while self.failures and self.failures[0] < min_time: self.failures.popleft() return len(self.failures)
[ "def", "num_failures", "(", "self", ")", ":", "min_time", "=", "time", ".", "time", "(", ")", "-", "self", ".", "window", "while", "self", ".", "failures", "and", "self", ".", "failures", "[", "0", "]", "<", "min_time", ":", "self", ".", "failures", ...
Return the number of failures in the window.
[ "Return", "the", "number", "of", "failures", "in", "the", "window", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/scheduler.py#L190-L199
31,772
spotify/luigi
luigi/scheduler.py
Worker.is_trivial_worker
def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_tasks(state, PENDING))
python
def is_trivial_worker(self, state): """ If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons. """ if self.assistant: return False return all(not task.resources for task in self.get_tasks(state, PENDING))
[ "def", "is_trivial_worker", "(", "self", ",", "state", ")", ":", "if", "self", ".", "assistant", ":", "return", "False", "return", "all", "(", "not", "task", ".", "resources", "for", "task", "in", "self", ".", "get_tasks", "(", "state", ",", "PENDING", ...
If it's not an assistant having only tasks that are without requirements. We have to pass the state parameter for optimization reasons.
[ "If", "it", "s", "not", "an", "assistant", "having", "only", "tasks", "that", "are", "without", "requirements", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/scheduler.py#L401-L410
31,773
spotify/luigi
luigi/scheduler.py
Scheduler._update_priority
def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker)
python
def _update_priority(self, task, prio, worker): """ Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled. """ task.priority = prio = max(prio, task.priority) for dep in task.deps or []: t = self._state.get_task(dep) if t is not None and prio > t.priority: self._update_priority(t, prio, worker)
[ "def", "_update_priority", "(", "self", ",", "task", ",", "prio", ",", "worker", ")", ":", "task", ".", "priority", "=", "prio", "=", "max", "(", "prio", ",", "task", ".", "priority", ")", "for", "dep", "in", "task", ".", "deps", "or", "[", "]", ...
Update priority of the given task. Priority can only be increased. If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
[ "Update", "priority", "of", "the", "given", "task", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/scheduler.py#L771-L782
31,774
spotify/luigi
luigi/scheduler.py
Scheduler._traverse_graph
def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: def dep_func(t): return t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.debug('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family_match = TASK_FAMILY_RE.match(task_id) family = family_match.group(1) if family_match else UNKNOWN params = {'task_id': task_id} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'display_name': task_id, 'priority': 0, } else: deps = dep_func(task) if not include_done: deps = list(self._filter_done(deps)) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if task_id != root_task_id: del serialized[task_id]['display_name'] if len(serialized) >= self._config.max_graph_nodes: break return serialized
python
def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True): """ Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node """ if seen is None: seen = set() elif root_task_id in seen: return {} if dep_func is None: def dep_func(t): return t.deps seen.add(root_task_id) serialized = {} queue = collections.deque([root_task_id]) while queue: task_id = queue.popleft() task = self._state.get_task(task_id) if task is None or not task.family: logger.debug('Missing task for id [%s]', task_id) # NOTE : If a dependency is missing from self._state there is no way to deduce the # task family and parameters. family_match = TASK_FAMILY_RE.match(task_id) family = family_match.group(1) if family_match else UNKNOWN params = {'task_id': task_id} serialized[task_id] = { 'deps': [], 'status': UNKNOWN, 'workers': [], 'start_time': UNKNOWN, 'params': params, 'name': family, 'display_name': task_id, 'priority': 0, } else: deps = dep_func(task) if not include_done: deps = list(self._filter_done(deps)) serialized[task_id] = self._serialize_task(task_id, deps=deps) for dep in sorted(deps): if dep not in seen: seen.add(dep) queue.append(dep) if task_id != root_task_id: del serialized[task_id]['display_name'] if len(serialized) >= self._config.max_graph_nodes: break return serialized
[ "def", "_traverse_graph", "(", "self", ",", "root_task_id", ",", "seen", "=", "None", ",", "dep_func", "=", "None", ",", "include_done", "=", "True", ")", ":", "if", "seen", "is", "None", ":", "seen", "=", "set", "(", ")", "elif", "root_task_id", "in",...
Returns the dependency graph rooted at task_id This does a breadth-first traversal to find the nodes closest to the root before hitting the scheduler.max_graph_nodes limit. :param root_task_id: the id of the graph's root :return: A map of task id to serialized node
[ "Returns", "the", "dependency", "graph", "rooted", "at", "task_id" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/scheduler.py#L1343-L1402
31,775
spotify/luigi
luigi/scheduler.py
Scheduler.task_list
def task_list(self, status='', upstream_status='', limit=True, search=None, max_shown_tasks=None, **kwargs): """ Query for a subset of tasks by status. """ if not search: count_limit = max_shown_tasks or self._config.max_shown_tasks pre_count = self._state.get_active_task_count_for_status(status) if limit and pre_count > count_limit: return {'num_tasks': -1 if upstream_status else pre_count} self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: def filter_func(_): return True else: terms = search.split() def filter_func(t): return all(term in t.pretty_id for term in terms) tasks = self._state.get_active_tasks_by_status(status) if status else self._state.get_active_tasks() for task in filter(filter_func, tasks): if task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table): serialized = self._serialize_task(task.id, include_deps=False) result[task.id] = serialized if limit and len(result) > (max_shown_tasks or self._config.max_shown_tasks): return {'num_tasks': len(result)} return result
python
def task_list(self, status='', upstream_status='', limit=True, search=None, max_shown_tasks=None, **kwargs): """ Query for a subset of tasks by status. """ if not search: count_limit = max_shown_tasks or self._config.max_shown_tasks pre_count = self._state.get_active_task_count_for_status(status) if limit and pre_count > count_limit: return {'num_tasks': -1 if upstream_status else pre_count} self.prune() result = {} upstream_status_table = {} # used to memoize upstream status if search is None: def filter_func(_): return True else: terms = search.split() def filter_func(t): return all(term in t.pretty_id for term in terms) tasks = self._state.get_active_tasks_by_status(status) if status else self._state.get_active_tasks() for task in filter(filter_func, tasks): if task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table): serialized = self._serialize_task(task.id, include_deps=False) result[task.id] = serialized if limit and len(result) > (max_shown_tasks or self._config.max_shown_tasks): return {'num_tasks': len(result)} return result
[ "def", "task_list", "(", "self", ",", "status", "=", "''", ",", "upstream_status", "=", "''", ",", "limit", "=", "True", ",", "search", "=", "None", ",", "max_shown_tasks", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "search", ":", ...
Query for a subset of tasks by status.
[ "Query", "for", "a", "subset", "of", "tasks", "by", "status", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/scheduler.py#L1424-L1454
31,776
spotify/luigi
luigi/scheduler.py
Scheduler.resources
def resources(self): ''' get total resources and available ones ''' used_resources = self._used_resources() ret = collections.defaultdict(dict) for resource, total in six.iteritems(self._resources): ret[resource]['total'] = total if resource in used_resources: ret[resource]['used'] = used_resources[resource] else: ret[resource]['used'] = 0 return ret
python
def resources(self): ''' get total resources and available ones ''' used_resources = self._used_resources() ret = collections.defaultdict(dict) for resource, total in six.iteritems(self._resources): ret[resource]['total'] = total if resource in used_resources: ret[resource]['used'] = used_resources[resource] else: ret[resource]['used'] = 0 return ret
[ "def", "resources", "(", "self", ")", ":", "used_resources", "=", "self", ".", "_used_resources", "(", ")", "ret", "=", "collections", ".", "defaultdict", "(", "dict", ")", "for", "resource", ",", "total", "in", "six", ".", "iteritems", "(", "self", ".",...
get total resources and available ones
[ "get", "total", "resources", "and", "available", "ones" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/scheduler.py#L1523-L1533
31,777
spotify/luigi
luigi/scheduler.py
Scheduler.task_search
def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, include_deps=False) result[task.status][task.id] = serialized return result
python
def task_search(self, task_str, **kwargs): """ Query for a subset of tasks by task_id. :param task_str: :return: """ self.prune() result = collections.defaultdict(dict) for task in self._state.get_active_tasks(): if task.id.find(task_str) != -1: serialized = self._serialize_task(task.id, include_deps=False) result[task.status][task.id] = serialized return result
[ "def", "task_search", "(", "self", ",", "task_str", ",", "*", "*", "kwargs", ")", ":", "self", ".", "prune", "(", ")", "result", "=", "collections", ".", "defaultdict", "(", "dict", ")", "for", "task", "in", "self", ".", "_state", ".", "get_active_task...
Query for a subset of tasks by task_id. :param task_str: :return:
[ "Query", "for", "a", "subset", "of", "tasks", "by", "task_id", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/scheduler.py#L1536-L1549
31,778
spotify/luigi
luigi/target.py
FileSystemTarget.exists
def exists(self): """ Returns ``True`` if the path for this FileSystemTarget exists; ``False`` otherwise. This method is implemented by using :py:attr:`fs`. """ path = self.path if '*' in path or '?' in path or '[' in path or '{' in path: logger.warning("Using wildcards in path %s might lead to processing of an incomplete dataset; " "override exists() to suppress the warning.", path) return self.fs.exists(path)
python
def exists(self): """ Returns ``True`` if the path for this FileSystemTarget exists; ``False`` otherwise. This method is implemented by using :py:attr:`fs`. """ path = self.path if '*' in path or '?' in path or '[' in path or '{' in path: logger.warning("Using wildcards in path %s might lead to processing of an incomplete dataset; " "override exists() to suppress the warning.", path) return self.fs.exists(path)
[ "def", "exists", "(", "self", ")", ":", "path", "=", "self", ".", "path", "if", "'*'", "in", "path", "or", "'?'", "in", "path", "or", "'['", "in", "path", "or", "'{'", "in", "path", ":", "logger", ".", "warning", "(", "\"Using wildcards in path %s migh...
Returns ``True`` if the path for this FileSystemTarget exists; ``False`` otherwise. This method is implemented by using :py:attr:`fs`.
[ "Returns", "True", "if", "the", "path", "for", "this", "FileSystemTarget", "exists", ";", "False", "otherwise", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/target.py#L242-L252
31,779
spotify/luigi
luigi/contrib/esindex.py
ElasticsearchTarget.marker_index_document_id
def marker_index_document_id(self): """ Generate an id for the indicator document. """ params = '%s:%s:%s' % (self.index, self.doc_type, self.update_id) return hashlib.sha1(params.encode('utf-8')).hexdigest()
python
def marker_index_document_id(self): """ Generate an id for the indicator document. """ params = '%s:%s:%s' % (self.index, self.doc_type, self.update_id) return hashlib.sha1(params.encode('utf-8')).hexdigest()
[ "def", "marker_index_document_id", "(", "self", ")", ":", "params", "=", "'%s:%s:%s'", "%", "(", "self", ".", "index", ",", "self", ".", "doc_type", ",", "self", ".", "update_id", ")", "return", "hashlib", ".", "sha1", "(", "params", ".", "encode", "(", ...
Generate an id for the indicator document.
[ "Generate", "an", "id", "for", "the", "indicator", "document", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/esindex.py#L161-L166
31,780
spotify/luigi
luigi/contrib/esindex.py
ElasticsearchTarget.exists
def exists(self): """ Test, if this task has been run. """ try: self.es.get(index=self.marker_index, doc_type=self.marker_doc_type, id=self.marker_index_document_id()) return True except elasticsearch.NotFoundError: logger.debug('Marker document not found.') except elasticsearch.ElasticsearchException as err: logger.warn(err) return False
python
def exists(self): """ Test, if this task has been run. """ try: self.es.get(index=self.marker_index, doc_type=self.marker_doc_type, id=self.marker_index_document_id()) return True except elasticsearch.NotFoundError: logger.debug('Marker document not found.') except elasticsearch.ElasticsearchException as err: logger.warn(err) return False
[ "def", "exists", "(", "self", ")", ":", "try", ":", "self", ".", "es", ".", "get", "(", "index", "=", "self", ".", "marker_index", ",", "doc_type", "=", "self", ".", "marker_doc_type", ",", "id", "=", "self", ".", "marker_index_document_id", "(", ")", ...
Test, if this task has been run.
[ "Test", "if", "this", "task", "has", "been", "run", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/esindex.py#L186-L197
31,781
spotify/luigi
luigi/contrib/esindex.py
ElasticsearchTarget.create_marker_index
def create_marker_index(self): """ Create the index that will keep track of the tasks if necessary. """ if not self.es.indices.exists(index=self.marker_index): self.es.indices.create(index=self.marker_index)
python
def create_marker_index(self): """ Create the index that will keep track of the tasks if necessary. """ if not self.es.indices.exists(index=self.marker_index): self.es.indices.create(index=self.marker_index)
[ "def", "create_marker_index", "(", "self", ")", ":", "if", "not", "self", ".", "es", ".", "indices", ".", "exists", "(", "index", "=", "self", ".", "marker_index", ")", ":", "self", ".", "es", ".", "indices", ".", "create", "(", "index", "=", "self",...
Create the index that will keep track of the tasks if necessary.
[ "Create", "the", "index", "that", "will", "keep", "track", "of", "the", "tasks", "if", "necessary", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/esindex.py#L199-L204
31,782
spotify/luigi
luigi/contrib/esindex.py
CopyToIndex._docs
def _docs(self): """ Since `self.docs` may yield documents that do not explicitly contain `_index` or `_type`, add those attributes here, if necessary. """ iterdocs = iter(self.docs()) first = next(iterdocs) needs_parsing = False if isinstance(first, six.string_types): needs_parsing = True elif isinstance(first, dict): pass else: raise RuntimeError('Document must be either JSON strings or dict.') for doc in itertools.chain([first], iterdocs): if needs_parsing: doc = json.loads(doc) if '_index' not in doc: doc['_index'] = self.index if '_type' not in doc: doc['_type'] = self.doc_type yield doc
python
def _docs(self): """ Since `self.docs` may yield documents that do not explicitly contain `_index` or `_type`, add those attributes here, if necessary. """ iterdocs = iter(self.docs()) first = next(iterdocs) needs_parsing = False if isinstance(first, six.string_types): needs_parsing = True elif isinstance(first, dict): pass else: raise RuntimeError('Document must be either JSON strings or dict.') for doc in itertools.chain([first], iterdocs): if needs_parsing: doc = json.loads(doc) if '_index' not in doc: doc['_index'] = self.index if '_type' not in doc: doc['_type'] = self.doc_type yield doc
[ "def", "_docs", "(", "self", ")", ":", "iterdocs", "=", "iter", "(", "self", ".", "docs", "(", ")", ")", "first", "=", "next", "(", "iterdocs", ")", "needs_parsing", "=", "False", "if", "isinstance", "(", "first", ",", "six", ".", "string_types", ")"...
Since `self.docs` may yield documents that do not explicitly contain `_index` or `_type`, add those attributes here, if necessary.
[ "Since", "self", ".", "docs", "may", "yield", "documents", "that", "do", "not", "explicitly", "contain", "_index", "or", "_type", "add", "those", "attributes", "here", "if", "necessary", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/esindex.py#L361-L382
31,783
spotify/luigi
luigi/contrib/esindex.py
CopyToIndex.create_index
def create_index(self): """ Override to provide code for creating the target index. By default it will be created without any special settings or mappings. """ es = self._init_connection() if not es.indices.exists(index=self.index): es.indices.create(index=self.index, body=self.settings)
python
def create_index(self): """ Override to provide code for creating the target index. By default it will be created without any special settings or mappings. """ es = self._init_connection() if not es.indices.exists(index=self.index): es.indices.create(index=self.index, body=self.settings)
[ "def", "create_index", "(", "self", ")", ":", "es", "=", "self", ".", "_init_connection", "(", ")", "if", "not", "es", ".", "indices", ".", "exists", "(", "index", "=", "self", ".", "index", ")", ":", "es", ".", "indices", ".", "create", "(", "inde...
Override to provide code for creating the target index. By default it will be created without any special settings or mappings.
[ "Override", "to", "provide", "code", "for", "creating", "the", "target", "index", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/esindex.py#L394-L402
31,784
spotify/luigi
luigi/contrib/esindex.py
CopyToIndex.delete_index
def delete_index(self): """ Delete the index, if it exists. """ es = self._init_connection() if es.indices.exists(index=self.index): es.indices.delete(index=self.index)
python
def delete_index(self): """ Delete the index, if it exists. """ es = self._init_connection() if es.indices.exists(index=self.index): es.indices.delete(index=self.index)
[ "def", "delete_index", "(", "self", ")", ":", "es", "=", "self", ".", "_init_connection", "(", ")", "if", "es", ".", "indices", ".", "exists", "(", "index", "=", "self", ".", "index", ")", ":", "es", ".", "indices", ".", "delete", "(", "index", "="...
Delete the index, if it exists.
[ "Delete", "the", "index", "if", "it", "exists", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/esindex.py#L404-L410
31,785
spotify/luigi
luigi/contrib/esindex.py
CopyToIndex.output
def output(self): """ Returns a ElasticsearchTarget representing the inserted dataset. Normally you don't override this. """ return ElasticsearchTarget( host=self.host, port=self.port, http_auth=self.http_auth, index=self.index, doc_type=self.doc_type, update_id=self.update_id(), marker_index_hist_size=self.marker_index_hist_size, timeout=self.timeout, extra_elasticsearch_args=self.extra_elasticsearch_args )
python
def output(self): """ Returns a ElasticsearchTarget representing the inserted dataset. Normally you don't override this. """ return ElasticsearchTarget( host=self.host, port=self.port, http_auth=self.http_auth, index=self.index, doc_type=self.doc_type, update_id=self.update_id(), marker_index_hist_size=self.marker_index_hist_size, timeout=self.timeout, extra_elasticsearch_args=self.extra_elasticsearch_args )
[ "def", "output", "(", "self", ")", ":", "return", "ElasticsearchTarget", "(", "host", "=", "self", ".", "host", ",", "port", "=", "self", ".", "port", ",", "http_auth", "=", "self", ".", "http_auth", ",", "index", "=", "self", ".", "index", ",", "doc...
Returns a ElasticsearchTarget representing the inserted dataset. Normally you don't override this.
[ "Returns", "a", "ElasticsearchTarget", "representing", "the", "inserted", "dataset", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/esindex.py#L418-L434
31,786
spotify/luigi
luigi/contrib/kubernetes.py
KubernetesJobTask.__track_job
def __track_job(self): """Poll job status while active""" while not self.__verify_job_has_started(): time.sleep(self.__POLL_TIME) self.__logger.debug("Waiting for Kubernetes job " + self.uu_name + " to start") self.__print_kubectl_hints() status = self.__get_job_status() while status == "RUNNING": self.__logger.debug("Kubernetes job " + self.uu_name + " is running") time.sleep(self.__POLL_TIME) status = self.__get_job_status() assert status != "FAILED", "Kubernetes job " + self.uu_name + " failed" # status == "SUCCEEDED" self.__logger.info("Kubernetes job " + self.uu_name + " succeeded") self.signal_complete()
python
def __track_job(self): """Poll job status while active""" while not self.__verify_job_has_started(): time.sleep(self.__POLL_TIME) self.__logger.debug("Waiting for Kubernetes job " + self.uu_name + " to start") self.__print_kubectl_hints() status = self.__get_job_status() while status == "RUNNING": self.__logger.debug("Kubernetes job " + self.uu_name + " is running") time.sleep(self.__POLL_TIME) status = self.__get_job_status() assert status != "FAILED", "Kubernetes job " + self.uu_name + " failed" # status == "SUCCEEDED" self.__logger.info("Kubernetes job " + self.uu_name + " succeeded") self.signal_complete()
[ "def", "__track_job", "(", "self", ")", ":", "while", "not", "self", ".", "__verify_job_has_started", "(", ")", ":", "time", ".", "sleep", "(", "self", ".", "__POLL_TIME", ")", "self", ".", "__logger", ".", "debug", "(", "\"Waiting for Kubernetes job \"", "+...
Poll job status while active
[ "Poll", "job", "status", "while", "active" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/kubernetes.py#L211-L228
31,787
spotify/luigi
luigi/contrib/kubernetes.py
KubernetesJobTask.__verify_job_has_started
def __verify_job_has_started(self): """Asserts that the job has successfully started""" # Verify that the job started self.__get_job() # Verify that the pod started pods = self.__get_pods() assert len(pods) > 0, "No pod scheduled by " + self.uu_name for pod in pods: status = pod.obj['status'] for cont_stats in status.get('containerStatuses', []): if 'terminated' in cont_stats['state']: t = cont_stats['state']['terminated'] err_msg = "Pod %s %s (exit code %d). Logs: `kubectl logs pod/%s`" % ( pod.name, t['reason'], t['exitCode'], pod.name) assert t['exitCode'] == 0, err_msg if 'waiting' in cont_stats['state']: wr = cont_stats['state']['waiting']['reason'] assert wr == 'ContainerCreating', "Pod %s %s. Logs: `kubectl logs pod/%s`" % ( pod.name, wr, pod.name) for cond in status.get('conditions', []): if 'message' in cond: if cond['reason'] == 'ContainersNotReady': return False assert cond['status'] != 'False', \ "[ERROR] %s - %s" % (cond['reason'], cond['message']) return True
python
def __verify_job_has_started(self): """Asserts that the job has successfully started""" # Verify that the job started self.__get_job() # Verify that the pod started pods = self.__get_pods() assert len(pods) > 0, "No pod scheduled by " + self.uu_name for pod in pods: status = pod.obj['status'] for cont_stats in status.get('containerStatuses', []): if 'terminated' in cont_stats['state']: t = cont_stats['state']['terminated'] err_msg = "Pod %s %s (exit code %d). Logs: `kubectl logs pod/%s`" % ( pod.name, t['reason'], t['exitCode'], pod.name) assert t['exitCode'] == 0, err_msg if 'waiting' in cont_stats['state']: wr = cont_stats['state']['waiting']['reason'] assert wr == 'ContainerCreating', "Pod %s %s. Logs: `kubectl logs pod/%s`" % ( pod.name, wr, pod.name) for cond in status.get('conditions', []): if 'message' in cond: if cond['reason'] == 'ContainersNotReady': return False assert cond['status'] != 'False', \ "[ERROR] %s - %s" % (cond['reason'], cond['message']) return True
[ "def", "__verify_job_has_started", "(", "self", ")", ":", "# Verify that the job started", "self", ".", "__get_job", "(", ")", "# Verify that the pod started", "pods", "=", "self", ".", "__get_pods", "(", ")", "assert", "len", "(", "pods", ")", ">", "0", ",", ...
Asserts that the job has successfully started
[ "Asserts", "that", "the", "job", "has", "successfully", "started" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/kubernetes.py#L267-L296
31,788
spotify/luigi
luigi/contrib/kubernetes.py
KubernetesJobTask.__get_job_status
def __get_job_status(self): """Return the Kubernetes job status""" # Figure out status and return it job = self.__get_job() if "succeeded" in job.obj["status"] and job.obj["status"]["succeeded"] > 0: job.scale(replicas=0) if self.print_pod_logs_on_exit: self.__print_pod_logs() if self.delete_on_success: self.__delete_job_cascade(job) return "SUCCEEDED" if "failed" in job.obj["status"]: failed_cnt = job.obj["status"]["failed"] self.__logger.debug("Kubernetes job " + self.uu_name + " status.failed: " + str(failed_cnt)) if self.print_pod_logs_on_exit: self.__print_pod_logs() if failed_cnt > self.max_retrials: job.scale(replicas=0) # avoid more retrials return "FAILED" return "RUNNING"
python
def __get_job_status(self): """Return the Kubernetes job status""" # Figure out status and return it job = self.__get_job() if "succeeded" in job.obj["status"] and job.obj["status"]["succeeded"] > 0: job.scale(replicas=0) if self.print_pod_logs_on_exit: self.__print_pod_logs() if self.delete_on_success: self.__delete_job_cascade(job) return "SUCCEEDED" if "failed" in job.obj["status"]: failed_cnt = job.obj["status"]["failed"] self.__logger.debug("Kubernetes job " + self.uu_name + " status.failed: " + str(failed_cnt)) if self.print_pod_logs_on_exit: self.__print_pod_logs() if failed_cnt > self.max_retrials: job.scale(replicas=0) # avoid more retrials return "FAILED" return "RUNNING"
[ "def", "__get_job_status", "(", "self", ")", ":", "# Figure out status and return it", "job", "=", "self", ".", "__get_job", "(", ")", "if", "\"succeeded\"", "in", "job", ".", "obj", "[", "\"status\"", "]", "and", "job", ".", "obj", "[", "\"status\"", "]", ...
Return the Kubernetes job status
[ "Return", "the", "Kubernetes", "job", "status" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/kubernetes.py#L298-L320
31,789
spotify/luigi
luigi/contrib/sqla.py
SQLAlchemyTarget.engine
def engine(self): """ Return an engine instance, creating it if it doesn't exist. Recreate the engine connection if it wasn't originally created by the current process. """ pid = os.getpid() conn = SQLAlchemyTarget._engine_dict.get(self.connection_string) if not conn or conn.pid != pid: # create and reset connection engine = sqlalchemy.create_engine( self.connection_string, connect_args=self.connect_args, echo=self.echo ) SQLAlchemyTarget._engine_dict[self.connection_string] = self.Connection(engine, pid) return SQLAlchemyTarget._engine_dict[self.connection_string].engine
python
def engine(self): """ Return an engine instance, creating it if it doesn't exist. Recreate the engine connection if it wasn't originally created by the current process. """ pid = os.getpid() conn = SQLAlchemyTarget._engine_dict.get(self.connection_string) if not conn or conn.pid != pid: # create and reset connection engine = sqlalchemy.create_engine( self.connection_string, connect_args=self.connect_args, echo=self.echo ) SQLAlchemyTarget._engine_dict[self.connection_string] = self.Connection(engine, pid) return SQLAlchemyTarget._engine_dict[self.connection_string].engine
[ "def", "engine", "(", "self", ")", ":", "pid", "=", "os", ".", "getpid", "(", ")", "conn", "=", "SQLAlchemyTarget", ".", "_engine_dict", ".", "get", "(", "self", ".", "connection_string", ")", "if", "not", "conn", "or", "conn", ".", "pid", "!=", "pid...
Return an engine instance, creating it if it doesn't exist. Recreate the engine connection if it wasn't originally created by the current process.
[ "Return", "an", "engine", "instance", "creating", "it", "if", "it", "doesn", "t", "exist", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/sqla.py#L193-L210
31,790
spotify/luigi
luigi/contrib/hdfs/target.py
HdfsTarget.rename
def rename(self, path, raise_if_exists=False): """ Does not change self.path. Unlike ``move_dir()``, ``rename()`` might cause nested directories. See spotify/luigi#522 """ if isinstance(path, HdfsTarget): path = path.path if raise_if_exists and self.fs.exists(path): raise RuntimeError('Destination exists: %s' % path) self.fs.rename(self.path, path)
python
def rename(self, path, raise_if_exists=False): """ Does not change self.path. Unlike ``move_dir()``, ``rename()`` might cause nested directories. See spotify/luigi#522 """ if isinstance(path, HdfsTarget): path = path.path if raise_if_exists and self.fs.exists(path): raise RuntimeError('Destination exists: %s' % path) self.fs.rename(self.path, path)
[ "def", "rename", "(", "self", ",", "path", ",", "raise_if_exists", "=", "False", ")", ":", "if", "isinstance", "(", "path", ",", "HdfsTarget", ")", ":", "path", "=", "path", ".", "path", "if", "raise_if_exists", "and", "self", ".", "fs", ".", "exists",...
Does not change self.path. Unlike ``move_dir()``, ``rename()`` might cause nested directories. See spotify/luigi#522
[ "Does", "not", "change", "self", ".", "path", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hdfs/target.py#L121-L132
31,791
spotify/luigi
luigi/contrib/hdfs/target.py
HdfsTarget.is_writable
def is_writable(self): """ Currently only works with hadoopcli """ if "/" in self.path: # example path: /log/ap/2013-01-17/00 parts = self.path.split("/") # start with the full path and then up the tree until we can check length = len(parts) for part in range(length): path = "/".join(parts[0:length - part]) + "/" if self.fs.exists(path): # if the path exists and we can write there, great! if self._is_writable(path): return True # if it exists and we can't =( sad panda else: return False # We went through all parts of the path and we still couldn't find # one that exists. return False
python
def is_writable(self): """ Currently only works with hadoopcli """ if "/" in self.path: # example path: /log/ap/2013-01-17/00 parts = self.path.split("/") # start with the full path and then up the tree until we can check length = len(parts) for part in range(length): path = "/".join(parts[0:length - part]) + "/" if self.fs.exists(path): # if the path exists and we can write there, great! if self._is_writable(path): return True # if it exists and we can't =( sad panda else: return False # We went through all parts of the path and we still couldn't find # one that exists. return False
[ "def", "is_writable", "(", "self", ")", ":", "if", "\"/\"", "in", "self", ".", "path", ":", "# example path: /log/ap/2013-01-17/00", "parts", "=", "self", ".", "path", ".", "split", "(", "\"/\"", ")", "# start with the full path and then up the tree until we can check...
Currently only works with hadoopcli
[ "Currently", "only", "works", "with", "hadoopcli" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hdfs/target.py#L158-L178
31,792
spotify/luigi
luigi/execution_summary.py
_partition_tasks
def _partition_tasks(worker): """ Takes a worker and sorts out tasks based on their status. Still_pending_not_ext is only used to get upstream_failure, upstream_missing_dependency and run_by_other_worker """ task_history = worker._add_task_history pending_tasks = {task for(task, status, ext) in task_history if status == 'PENDING'} set_tasks = {} set_tasks["completed"] = {task for (task, status, ext) in task_history if status == 'DONE' and task in pending_tasks} set_tasks["already_done"] = {task for (task, status, ext) in task_history if status == 'DONE' and task not in pending_tasks and task not in set_tasks["completed"]} set_tasks["ever_failed"] = {task for (task, status, ext) in task_history if status == 'FAILED'} set_tasks["failed"] = set_tasks["ever_failed"] - set_tasks["completed"] set_tasks["scheduling_error"] = {task for(task, status, ext) in task_history if status == 'UNKNOWN'} set_tasks["still_pending_ext"] = {task for (task, status, ext) in task_history if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and not ext} set_tasks["still_pending_not_ext"] = {task for (task, status, ext) in task_history if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and ext} set_tasks["run_by_other_worker"] = set() set_tasks["upstream_failure"] = set() set_tasks["upstream_missing_dependency"] = set() set_tasks["upstream_run_by_other_worker"] = set() set_tasks["upstream_scheduling_error"] = set() set_tasks["not_run"] = set() return set_tasks
python
def _partition_tasks(worker): """ Takes a worker and sorts out tasks based on their status. Still_pending_not_ext is only used to get upstream_failure, upstream_missing_dependency and run_by_other_worker """ task_history = worker._add_task_history pending_tasks = {task for(task, status, ext) in task_history if status == 'PENDING'} set_tasks = {} set_tasks["completed"] = {task for (task, status, ext) in task_history if status == 'DONE' and task in pending_tasks} set_tasks["already_done"] = {task for (task, status, ext) in task_history if status == 'DONE' and task not in pending_tasks and task not in set_tasks["completed"]} set_tasks["ever_failed"] = {task for (task, status, ext) in task_history if status == 'FAILED'} set_tasks["failed"] = set_tasks["ever_failed"] - set_tasks["completed"] set_tasks["scheduling_error"] = {task for(task, status, ext) in task_history if status == 'UNKNOWN'} set_tasks["still_pending_ext"] = {task for (task, status, ext) in task_history if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and not ext} set_tasks["still_pending_not_ext"] = {task for (task, status, ext) in task_history if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and ext} set_tasks["run_by_other_worker"] = set() set_tasks["upstream_failure"] = set() set_tasks["upstream_missing_dependency"] = set() set_tasks["upstream_run_by_other_worker"] = set() set_tasks["upstream_scheduling_error"] = set() set_tasks["not_run"] = set() return set_tasks
[ "def", "_partition_tasks", "(", "worker", ")", ":", "task_history", "=", "worker", ".", "_add_task_history", "pending_tasks", "=", "{", "task", "for", "(", "task", ",", "status", ",", "ext", ")", "in", "task_history", "if", "status", "==", "'PENDING'", "}", ...
Takes a worker and sorts out tasks based on their status. Still_pending_not_ext is only used to get upstream_failure, upstream_missing_dependency and run_by_other_worker
[ "Takes", "a", "worker", "and", "sorts", "out", "tasks", "based", "on", "their", "status", ".", "Still_pending_not_ext", "is", "only", "used", "to", "get", "upstream_failure", "upstream_missing_dependency", "and", "run_by_other_worker" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/execution_summary.py#L91-L115
31,793
spotify/luigi
luigi/execution_summary.py
_depth_first_search
def _depth_first_search(set_tasks, current_task, visited): """ This dfs checks why tasks are still pending. """ visited.add(current_task) if current_task in set_tasks["still_pending_not_ext"]: upstream_failure = False upstream_missing_dependency = False upstream_run_by_other_worker = False upstream_scheduling_error = False for task in current_task._requires(): if task not in visited: _depth_first_search(set_tasks, task, visited) if task in set_tasks["ever_failed"] or task in set_tasks["upstream_failure"]: set_tasks["upstream_failure"].add(current_task) upstream_failure = True if task in set_tasks["still_pending_ext"] or task in set_tasks["upstream_missing_dependency"]: set_tasks["upstream_missing_dependency"].add(current_task) upstream_missing_dependency = True if task in set_tasks["run_by_other_worker"] or task in set_tasks["upstream_run_by_other_worker"]: set_tasks["upstream_run_by_other_worker"].add(current_task) upstream_run_by_other_worker = True if task in set_tasks["scheduling_error"]: set_tasks["upstream_scheduling_error"].add(current_task) upstream_scheduling_error = True if not upstream_failure and not upstream_missing_dependency and \ not upstream_run_by_other_worker and not upstream_scheduling_error and \ current_task not in set_tasks["run_by_other_worker"]: set_tasks["not_run"].add(current_task)
python
def _depth_first_search(set_tasks, current_task, visited): """ This dfs checks why tasks are still pending. """ visited.add(current_task) if current_task in set_tasks["still_pending_not_ext"]: upstream_failure = False upstream_missing_dependency = False upstream_run_by_other_worker = False upstream_scheduling_error = False for task in current_task._requires(): if task not in visited: _depth_first_search(set_tasks, task, visited) if task in set_tasks["ever_failed"] or task in set_tasks["upstream_failure"]: set_tasks["upstream_failure"].add(current_task) upstream_failure = True if task in set_tasks["still_pending_ext"] or task in set_tasks["upstream_missing_dependency"]: set_tasks["upstream_missing_dependency"].add(current_task) upstream_missing_dependency = True if task in set_tasks["run_by_other_worker"] or task in set_tasks["upstream_run_by_other_worker"]: set_tasks["upstream_run_by_other_worker"].add(current_task) upstream_run_by_other_worker = True if task in set_tasks["scheduling_error"]: set_tasks["upstream_scheduling_error"].add(current_task) upstream_scheduling_error = True if not upstream_failure and not upstream_missing_dependency and \ not upstream_run_by_other_worker and not upstream_scheduling_error and \ current_task not in set_tasks["run_by_other_worker"]: set_tasks["not_run"].add(current_task)
[ "def", "_depth_first_search", "(", "set_tasks", ",", "current_task", ",", "visited", ")", ":", "visited", ".", "add", "(", "current_task", ")", "if", "current_task", "in", "set_tasks", "[", "\"still_pending_not_ext\"", "]", ":", "upstream_failure", "=", "False", ...
This dfs checks why tasks are still pending.
[ "This", "dfs", "checks", "why", "tasks", "are", "still", "pending", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/execution_summary.py#L134-L162
31,794
spotify/luigi
luigi/execution_summary.py
_ranging_attributes
def _ranging_attributes(attributes, param_class): """ Checks if there is a continuous range """ next_attributes = {param_class.next_in_enumeration(attribute) for attribute in attributes} in_first = attributes.difference(next_attributes) in_second = next_attributes.difference(attributes) if len(in_first) == 1 and len(in_second) == 1: for x in attributes: if {param_class.next_in_enumeration(x)} == in_second: return next(iter(in_first)), x return None, None
python
def _ranging_attributes(attributes, param_class): """ Checks if there is a continuous range """ next_attributes = {param_class.next_in_enumeration(attribute) for attribute in attributes} in_first = attributes.difference(next_attributes) in_second = next_attributes.difference(attributes) if len(in_first) == 1 and len(in_second) == 1: for x in attributes: if {param_class.next_in_enumeration(x)} == in_second: return next(iter(in_first)), x return None, None
[ "def", "_ranging_attributes", "(", "attributes", ",", "param_class", ")", ":", "next_attributes", "=", "{", "param_class", ".", "next_in_enumeration", "(", "attribute", ")", "for", "attribute", "in", "attributes", "}", "in_first", "=", "attributes", ".", "differen...
Checks if there is a continuous range
[ "Checks", "if", "there", "is", "a", "continuous", "range" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/execution_summary.py#L257-L268
31,795
spotify/luigi
luigi/execution_summary.py
_get_comments
def _get_comments(group_tasks): """ Get the human readable comments and quantities for the task types. """ comments = {} for status, human in _COMMENTS: num_tasks = _get_number_of_tasks_for(status, group_tasks) if num_tasks: space = " " if status in _PENDING_SUB_STATUSES else "" comments[status] = '{space}* {num_tasks} {human}:\n'.format( space=space, num_tasks=num_tasks, human=human) return comments
python
def _get_comments(group_tasks): """ Get the human readable comments and quantities for the task types. """ comments = {} for status, human in _COMMENTS: num_tasks = _get_number_of_tasks_for(status, group_tasks) if num_tasks: space = " " if status in _PENDING_SUB_STATUSES else "" comments[status] = '{space}* {num_tasks} {human}:\n'.format( space=space, num_tasks=num_tasks, human=human) return comments
[ "def", "_get_comments", "(", "group_tasks", ")", ":", "comments", "=", "{", "}", "for", "status", ",", "human", "in", "_COMMENTS", ":", "num_tasks", "=", "_get_number_of_tasks_for", "(", "status", ",", "group_tasks", ")", "if", "num_tasks", ":", "space", "="...
Get the human readable comments and quantities for the task types.
[ "Get", "the", "human", "readable", "comments", "and", "quantities", "for", "the", "task", "types", "." ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/execution_summary.py#L301-L314
31,796
spotify/luigi
luigi/execution_summary.py
_get_run_by_other_worker
def _get_run_by_other_worker(worker): """ This returns a set of the tasks that are being run by other worker """ task_sets = _get_external_workers(worker).values() return functools.reduce(lambda a, b: a | b, task_sets, set())
python
def _get_run_by_other_worker(worker): """ This returns a set of the tasks that are being run by other worker """ task_sets = _get_external_workers(worker).values() return functools.reduce(lambda a, b: a | b, task_sets, set())
[ "def", "_get_run_by_other_worker", "(", "worker", ")", ":", "task_sets", "=", "_get_external_workers", "(", "worker", ")", ".", "values", "(", ")", "return", "functools", ".", "reduce", "(", "lambda", "a", ",", "b", ":", "a", "|", "b", ",", "task_sets", ...
This returns a set of the tasks that are being run by other worker
[ "This", "returns", "a", "set", "of", "the", "tasks", "that", "are", "being", "run", "by", "other", "worker" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/execution_summary.py#L350-L355
31,797
spotify/luigi
luigi/execution_summary.py
_get_external_workers
def _get_external_workers(worker): """ This returns a dict with a set of tasks for all of the other workers """ worker_that_blocked_task = collections.defaultdict(set) get_work_response_history = worker._get_work_response_history for get_work_response in get_work_response_history: if get_work_response['task_id'] is None: for running_task in get_work_response['running_tasks']: other_worker_id = running_task['worker'] other_task_id = running_task['task_id'] other_task = worker._scheduled_tasks.get(other_task_id) if other_worker_id == worker._id or not other_task: continue worker_that_blocked_task[other_worker_id].add(other_task) return worker_that_blocked_task
python
def _get_external_workers(worker): """ This returns a dict with a set of tasks for all of the other workers """ worker_that_blocked_task = collections.defaultdict(set) get_work_response_history = worker._get_work_response_history for get_work_response in get_work_response_history: if get_work_response['task_id'] is None: for running_task in get_work_response['running_tasks']: other_worker_id = running_task['worker'] other_task_id = running_task['task_id'] other_task = worker._scheduled_tasks.get(other_task_id) if other_worker_id == worker._id or not other_task: continue worker_that_blocked_task[other_worker_id].add(other_task) return worker_that_blocked_task
[ "def", "_get_external_workers", "(", "worker", ")", ":", "worker_that_blocked_task", "=", "collections", ".", "defaultdict", "(", "set", ")", "get_work_response_history", "=", "worker", ".", "_get_work_response_history", "for", "get_work_response", "in", "get_work_respons...
This returns a dict with a set of tasks for all of the other workers
[ "This", "returns", "a", "dict", "with", "a", "set", "of", "tasks", "for", "all", "of", "the", "other", "workers" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/execution_summary.py#L358-L373
31,798
spotify/luigi
luigi/execution_summary.py
_group_tasks_by_name_and_status
def _group_tasks_by_name_and_status(task_dict): """ Takes a dictionary with sets of tasks grouped by their status and returns a dictionary with dictionaries with an array of tasks grouped by their status and task name """ group_status = {} for task in task_dict: if task.task_family not in group_status: group_status[task.task_family] = [] group_status[task.task_family].append(task) return group_status
python
def _group_tasks_by_name_and_status(task_dict): """ Takes a dictionary with sets of tasks grouped by their status and returns a dictionary with dictionaries with an array of tasks grouped by their status and task name """ group_status = {} for task in task_dict: if task.task_family not in group_status: group_status[task.task_family] = [] group_status[task.task_family].append(task) return group_status
[ "def", "_group_tasks_by_name_and_status", "(", "task_dict", ")", ":", "group_status", "=", "{", "}", "for", "task", "in", "task_dict", ":", "if", "task", ".", "task_family", "not", "in", "group_status", ":", "group_status", "[", "task", ".", "task_family", "]"...
Takes a dictionary with sets of tasks grouped by their status and returns a dictionary with dictionaries with an array of tasks grouped by their status and task name
[ "Takes", "a", "dictionary", "with", "sets", "of", "tasks", "grouped", "by", "their", "status", "and", "returns", "a", "dictionary", "with", "dictionaries", "with", "an", "array", "of", "tasks", "grouped", "by", "their", "status", "and", "task", "name" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/execution_summary.py#L376-L387
31,799
spotify/luigi
luigi/execution_summary.py
_tasks_status
def _tasks_status(set_tasks): """ Given a grouped set of tasks, returns a LuigiStatusCode """ if set_tasks["ever_failed"]: if not set_tasks["failed"]: return LuigiStatusCode.SUCCESS_WITH_RETRY else: if set_tasks["scheduling_error"]: return LuigiStatusCode.FAILED_AND_SCHEDULING_FAILED return LuigiStatusCode.FAILED elif set_tasks["scheduling_error"]: return LuigiStatusCode.SCHEDULING_FAILED elif set_tasks["not_run"]: return LuigiStatusCode.NOT_RUN elif set_tasks["still_pending_ext"]: return LuigiStatusCode.MISSING_EXT else: return LuigiStatusCode.SUCCESS
python
def _tasks_status(set_tasks): """ Given a grouped set of tasks, returns a LuigiStatusCode """ if set_tasks["ever_failed"]: if not set_tasks["failed"]: return LuigiStatusCode.SUCCESS_WITH_RETRY else: if set_tasks["scheduling_error"]: return LuigiStatusCode.FAILED_AND_SCHEDULING_FAILED return LuigiStatusCode.FAILED elif set_tasks["scheduling_error"]: return LuigiStatusCode.SCHEDULING_FAILED elif set_tasks["not_run"]: return LuigiStatusCode.NOT_RUN elif set_tasks["still_pending_ext"]: return LuigiStatusCode.MISSING_EXT else: return LuigiStatusCode.SUCCESS
[ "def", "_tasks_status", "(", "set_tasks", ")", ":", "if", "set_tasks", "[", "\"ever_failed\"", "]", ":", "if", "not", "set_tasks", "[", "\"failed\"", "]", ":", "return", "LuigiStatusCode", ".", "SUCCESS_WITH_RETRY", "else", ":", "if", "set_tasks", "[", "\"sche...
Given a grouped set of tasks, returns a LuigiStatusCode
[ "Given", "a", "grouped", "set", "of", "tasks", "returns", "a", "LuigiStatusCode" ]
c5eca1c3c3ee2a7eb612486192a0da146710a1e9
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/execution_summary.py#L450-L468