_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q33900
_copy_binder_notebooks
train
def _copy_binder_notebooks(app): """Copy Jupyter notebooks to the binder notebooks directory. Copy each output gallery directory structure but only including the Jupyter notebook files.""" gallery_conf = app.config.sphinx_gallery_conf gallery_dirs = gallery_conf.get('gallery_dirs') binder_conf = gallery_conf.get('binder') notebooks_dir = os.path.join(app.outdir, binder_conf.get('notebooks_dir')) shutil.rmtree(notebooks_dir, ignore_errors=True) os.makedirs(notebooks_dir) if not isinstance(gallery_dirs, (list, tuple)): gallery_dirs = [gallery_dirs] iterator = sphinx_compatibility.status_iterator( gallery_dirs, 'copying binder notebooks...', length=len(gallery_dirs)) for i_folder in iterator: shutil.copytree(os.path.join(app.srcdir, i_folder), os.path.join(notebooks_dir, i_folder), ignore=_remove_ipynb_files)
python
{ "resource": "" }
q33901
check_binder_conf
train
def check_binder_conf(binder_conf): """Check to make sure that the Binder configuration is correct.""" # Grab the configuration and return None if it's not configured binder_conf = {} if binder_conf is None else binder_conf if not isinstance(binder_conf, dict): raise ValueError('`binder_conf` must be a dictionary or None.') if len(binder_conf) == 0: return binder_conf if binder_conf.get('url') and not binder_conf.get('binderhub_url'): logger.warning( 'Found old BinderHub URL keyword ("url"). Please update your ' 'configuration to use the new keyword ("binderhub_url"). "url" will be ' 'deprecated in sphinx-gallery v0.4') binder_conf['binderhub_url'] = binderhub_conf.get('url') # Ensure all fields are populated req_values = ['binderhub_url', 'org', 'repo', 'branch', 'dependencies'] optional_values = ['filepath_prefix', 'notebooks_dir', 'use_jupyter_lab'] missing_values = [] for val in req_values: if binder_conf.get(val) is None: missing_values.append(val) if len(missing_values) > 0: raise ValueError('binder_conf is missing values for: {}'.format( missing_values)) for key in binder_conf.keys(): if key not in (req_values + optional_values): raise ValueError("Unknown Binder config key: {}".format(key)) # Ensure we have http in the URL if not any(binder_conf['binderhub_url'].startswith(ii) for ii in ['http://', 'https://']): raise ValueError('did not supply a valid url, ' 'gave binderhub_url: {}'.format(binder_conf['binderhub_url'])) # Ensure we have at least one dependency file # Need at least one of these three files required_reqs_files = ['requirements.txt', 'environment.yml', 'Dockerfile'] path_reqs = binder_conf['dependencies'] if isinstance(path_reqs, basestring): path_reqs = [path_reqs] binder_conf['dependencies'] = path_reqs elif not isinstance(path_reqs, (list, tuple)): raise ValueError("`dependencies` value should be a list of strings. " "Got type {}.".format(type(path_reqs))) binder_conf['notebooks_dir'] = binder_conf.get('notebooks_dir', 'notebooks') path_reqs_filenames = [os.path.basename(ii) for ii in path_reqs] if not any(ii in path_reqs_filenames for ii in required_reqs_files): raise ValueError( 'Did not find one of `requirements.txt` or `environment.yml` ' 'in the "dependencies" section of the binder configuration ' 'for sphinx-gallery. A path to at least one of these files ' 'must exist in your Binder dependencies.') return binder_conf
python
{ "resource": "" }
q33902
parse_source_file
train
def parse_source_file(filename): """Parse source file into AST node Parameters ---------- filename : str File path Returns ------- node : AST node content : utf-8 encoded string """ # can't use codecs.open(filename, 'r', 'utf-8') here b/c ast doesn't # work with unicode strings in Python2.7 "SyntaxError: encoding # declaration in Unicode string" In python 2.7 the string can't be # encoded and have information about its encoding. That is particularly # problematic since source files include in their header information # about the file encoding. # Minimal example to fail: ast.parse(u'# -*- coding: utf-8 -*-') with open(filename, 'rb') as fid: content = fid.read() # change from Windows format to UNIX for uniformity content = content.replace(b'\r\n', b'\n') try: node = ast.parse(content) return node, content.decode('utf-8') except SyntaxError: return None, content.decode('utf-8')
python
{ "resource": "" }
q33903
get_docstring_and_rest
train
def get_docstring_and_rest(filename): """Separate ``filename`` content between docstring and the rest Strongly inspired from ast.get_docstring. Returns ------- docstring : str docstring of ``filename`` rest : str ``filename`` content without the docstring """ node, content = parse_source_file(filename) if node is None: return SYNTAX_ERROR_DOCSTRING, content, 1 if not isinstance(node, ast.Module): raise TypeError("This function only supports modules. " "You provided {0}".format(node.__class__.__name__)) if not (node.body and isinstance(node.body[0], ast.Expr) and isinstance(node.body[0].value, ast.Str)): raise ValueError(('Could not find docstring in file "{0}". ' 'A docstring is required by sphinx-gallery ' 'unless the file is ignored by "ignore_pattern"') .format(filename)) if LooseVersion(sys.version) >= LooseVersion('3.7'): docstring = ast.get_docstring(node) assert docstring is not None # should be guaranteed above # This is just for backward compat if len(node.body[0].value.s) and node.body[0].value.s[0] == '\n': # just for strict backward compat here docstring = '\n' + docstring ts = tokenize.tokenize(BytesIO(content.encode()).readline) # find the first string according to the tokenizer and get its end row for tk in ts: if tk.exact_type == 3: lineno, _ = tk.end break else: lineno = 0 else: # this block can be removed when python 3.6 support is dropped docstring_node = node.body[0] docstring = docstring_node.value.s # python2.7: Code was read in bytes needs decoding to utf-8 # unless future unicode_literals is imported in source which # make ast output unicode strings if hasattr(docstring, 'decode') and not isinstance(docstring, unicode): docstring = docstring.decode('utf-8') lineno = docstring_node.lineno # The last line of the string. # This get the content of the file after the docstring last line # Note: 'maxsplit' argument is not a keyword argument in python2 rest = '\n'.join(content.split('\n')[lineno:]) lineno += 1 return docstring, rest, lineno
python
{ "resource": "" }
q33904
extract_file_config
train
def extract_file_config(content): """ Pull out the file-specific config specified in the docstring. """ prop_pat = re.compile( r"^\s*#\s*sphinx_gallery_([A-Za-z0-9_]+)\s*=\s*(.+)\s*$", re.MULTILINE) file_conf = {} for match in re.finditer(prop_pat, content): name = match.group(1) value = match.group(2) try: value = ast.literal_eval(value) except (SyntaxError, ValueError): logger.warning( 'Sphinx-gallery option %s was passed invalid value %s', name, value) else: file_conf[name] = value return file_conf
python
{ "resource": "" }
q33905
split_code_and_text_blocks
train
def split_code_and_text_blocks(source_file): """Return list with source file separated into code and text blocks. Returns ------- file_conf : dict File-specific settings given in source file comments as: ``# sphinx_gallery_<name> = <value>`` blocks : list (label, content, line_number) List where each element is a tuple with the label ('text' or 'code'), the corresponding content string of block and the leading line number """ docstring, rest_of_content, lineno = get_docstring_and_rest(source_file) blocks = [('text', docstring, 1)] file_conf = extract_file_config(rest_of_content) pattern = re.compile( r'(?P<header_line>^#{20,}.*)\s(?P<text_content>(?:^#.*\s)*)', flags=re.M) sub_pat = re.compile('^#', flags=re.M) pos_so_far = 0 for match in re.finditer(pattern, rest_of_content): code_block_content = rest_of_content[pos_so_far:match.start()] if code_block_content.strip(): blocks.append(('code', code_block_content, lineno)) lineno += code_block_content.count('\n') lineno += 1 # Ignored header line of hashes. text_content = match.group('text_content') text_block_content = dedent(re.sub(sub_pat, '', text_content)).lstrip() if text_block_content.strip(): blocks.append(('text', text_block_content, lineno)) lineno += text_content.count('\n') pos_so_far = match.end() remaining_content = rest_of_content[pos_so_far:] if remaining_content.strip(): blocks.append(('code', remaining_content, lineno)) return file_conf, blocks
python
{ "resource": "" }
q33906
parse_config
train
def parse_config(app): """Process the Sphinx Gallery configuration""" try: plot_gallery = eval(app.builder.config.plot_gallery) except TypeError: plot_gallery = bool(app.builder.config.plot_gallery) src_dir = app.builder.srcdir abort_on_example_error = app.builder.config.abort_on_example_error lang = app.builder.config.highlight_language gallery_conf = _complete_gallery_conf( app.config.sphinx_gallery_conf, src_dir, plot_gallery, abort_on_example_error, lang, app.builder.name) # this assures I can call the config in other places app.config.sphinx_gallery_conf = gallery_conf app.config.html_static_path.append(glr_path_static()) return gallery_conf
python
{ "resource": "" }
q33907
get_subsections
train
def get_subsections(srcdir, examples_dir, sortkey): """Return the list of subsections of a gallery Parameters ---------- srcdir : str absolute path to directory containing conf.py examples_dir : str path to the examples directory relative to conf.py sortkey : callable The sort key to use. Returns ------- out : list sorted list of gallery subsection folder names """ subfolders = [subfolder for subfolder in os.listdir(examples_dir) if os.path.exists(os.path.join( examples_dir, subfolder, 'README.txt'))] base_examples_dir_path = os.path.relpath(examples_dir, srcdir) subfolders_with_path = [os.path.join(base_examples_dir_path, item) for item in subfolders] sorted_subfolders = sorted(subfolders_with_path, key=sortkey) return [subfolders[i] for i in [subfolders_with_path.index(item) for item in sorted_subfolders]]
python
{ "resource": "" }
q33908
_prepare_sphx_glr_dirs
train
def _prepare_sphx_glr_dirs(gallery_conf, srcdir): """Creates necessary folders for sphinx_gallery files """ examples_dirs = gallery_conf['examples_dirs'] gallery_dirs = gallery_conf['gallery_dirs'] if not isinstance(examples_dirs, list): examples_dirs = [examples_dirs] if not isinstance(gallery_dirs, list): gallery_dirs = [gallery_dirs] if bool(gallery_conf['backreferences_dir']): backreferences_dir = os.path.join( srcdir, gallery_conf['backreferences_dir']) if not os.path.exists(backreferences_dir): os.makedirs(backreferences_dir) return list(zip(examples_dirs, gallery_dirs))
python
{ "resource": "" }
q33909
generate_gallery_rst
train
def generate_gallery_rst(app): """Generate the Main examples gallery reStructuredText Start the sphinx-gallery configuration and recursively scan the examples directories in order to populate the examples gallery """ logger.info('generating gallery...', color='white') gallery_conf = parse_config(app) seen_backrefs = set() computation_times = [] workdirs = _prepare_sphx_glr_dirs(gallery_conf, app.builder.srcdir) # Check for duplicate filenames to make sure linking works as expected examples_dirs = [ex_dir for ex_dir, _ in workdirs] files = collect_gallery_files(examples_dirs) check_duplicate_filenames(files) for examples_dir, gallery_dir in workdirs: examples_dir = os.path.join(app.builder.srcdir, examples_dir) gallery_dir = os.path.join(app.builder.srcdir, gallery_dir) if not os.path.exists(os.path.join(examples_dir, 'README.txt')): raise FileNotFoundError("Main example directory {0} does not " "have a README.txt file. Please write " "one to introduce your gallery." .format(examples_dir)) # Here we don't use an os.walk, but we recurse only twice: flat is # better than nested. this_fhindex, this_computation_times = generate_dir_rst( examples_dir, gallery_dir, gallery_conf, seen_backrefs) computation_times += this_computation_times write_computation_times(gallery_conf, gallery_dir, this_computation_times) # we create an index.rst with all examples index_rst_new = os.path.join(gallery_dir, 'index.rst.new') with codecs.open(index_rst_new, 'w', encoding='utf-8') as fhindex: # :orphan: to suppress "not included in TOCTREE" sphinx warnings fhindex.write(":orphan:\n\n" + this_fhindex) for subsection in get_subsections( app.builder.srcdir, examples_dir, gallery_conf['subsection_order']): src_dir = os.path.join(examples_dir, subsection) target_dir = os.path.join(gallery_dir, subsection) this_fhindex, this_computation_times = \ generate_dir_rst(src_dir, target_dir, gallery_conf, seen_backrefs) fhindex.write(this_fhindex) computation_times += this_computation_times write_computation_times(gallery_conf, target_dir, this_computation_times) if gallery_conf['download_all_examples']: download_fhindex = generate_zipfiles(gallery_dir) fhindex.write(download_fhindex) fhindex.write(SPHX_GLR_SIG) _replace_md5(index_rst_new) finalize_backreferences(seen_backrefs, gallery_conf) if gallery_conf['plot_gallery']: logger.info("computation time summary:", color='white') for time_elapsed, fname in sorted(computation_times, reverse=True): fname = os.path.relpath(fname, os.path.normpath(gallery_conf['src_dir'])) if time_elapsed is not None: if time_elapsed >= gallery_conf['min_reported_time']: logger.info(" - %s: %.2g sec", fname, time_elapsed) else: logger.info(" - %s: not run", fname) # Also create a junit.xml file, useful e.g. on CircleCI write_junit_xml(gallery_conf, app.builder.outdir, computation_times)
python
{ "resource": "" }
q33910
_sec_to_readable
train
def _sec_to_readable(t): """Convert a number of seconds to a more readable representation.""" # This will only work for < 1 day execution time # And we reserve 2 digits for minutes because presumably # there aren't many > 99 minute scripts, but occasionally some # > 9 minute ones t = datetime(1, 1, 1) + timedelta(seconds=t) t = '{0:02d}:{1:02d}.{2:03d}'.format( t.hour * 60 + t.minute, t.second, int(round(t.microsecond / 1000.))) return t
python
{ "resource": "" }
q33911
touch_empty_backreferences
train
def touch_empty_backreferences(app, what, name, obj, options, lines): """Generate empty back-reference example files This avoids inclusion errors/warnings if there are no gallery examples for a class / module that is being parsed by autodoc""" if not bool(app.config.sphinx_gallery_conf['backreferences_dir']): return examples_path = os.path.join(app.srcdir, app.config.sphinx_gallery_conf[ "backreferences_dir"], "%s.examples" % name) if not os.path.exists(examples_path): # touch file open(examples_path, 'w').close()
python
{ "resource": "" }
q33912
_parse_failures
train
def _parse_failures(gallery_conf): """Split the failures.""" failing_examples = set(gallery_conf['failing_examples'].keys()) expected_failing_examples = set( os.path.normpath(os.path.join(gallery_conf['src_dir'], path)) for path in gallery_conf['expected_failing_examples']) failing_as_expected = failing_examples.intersection( expected_failing_examples) failing_unexpectedly = failing_examples.difference( expected_failing_examples) passing_unexpectedly = expected_failing_examples.difference( failing_examples) # filter from examples actually run passing_unexpectedly = [ src_file for src_file in passing_unexpectedly if re.search(gallery_conf.get('filename_pattern'), src_file)] return failing_as_expected, failing_unexpectedly, passing_unexpectedly
python
{ "resource": "" }
q33913
summarize_failing_examples
train
def summarize_failing_examples(app, exception): """Collects the list of falling examples and prints them with a traceback. Raises ValueError if there where failing examples. """ if exception is not None: return # Under no-plot Examples are not run so nothing to summarize if not app.config.sphinx_gallery_conf['plot_gallery']: logger.info('Sphinx-gallery gallery_conf["plot_gallery"] was ' 'False, so no examples were executed.', color='brown') return gallery_conf = app.config.sphinx_gallery_conf failing_as_expected, failing_unexpectedly, passing_unexpectedly = \ _parse_failures(gallery_conf) if failing_as_expected: logger.info("Examples failing as expected:", color='brown') for fail_example in failing_as_expected: logger.info('%s failed leaving traceback:', fail_example, color='brown') logger.info(gallery_conf['failing_examples'][fail_example], color='brown') fail_msgs = [] if failing_unexpectedly: fail_msgs.append(red("Unexpected failing examples:")) for fail_example in failing_unexpectedly: fail_msgs.append(fail_example + ' failed leaving traceback:\n' + gallery_conf['failing_examples'][fail_example] + '\n') if passing_unexpectedly: fail_msgs.append(red("Examples expected to fail, but not failing:\n") + "Please remove these examples from\n" + "sphinx_gallery_conf['expected_failing_examples']\n" + "in your conf.py file" "\n".join(passing_unexpectedly)) # standard message n_good = len(gallery_conf['passing_examples']) n_tot = len(gallery_conf['failing_examples']) + n_good n_stale = len(gallery_conf['stale_examples']) logger.info('\nSphinx-gallery successfully executed %d out of %d ' 'file%s subselected by:\n\n' ' gallery_conf["filename_pattern"] = %r\n' ' gallery_conf["ignore_pattern"] = %r\n' '\nafter excluding %d file%s that had previously been run ' '(based on MD5).\n' % (n_good, n_tot, 's' if n_tot != 1 else '', gallery_conf['filename_pattern'], gallery_conf['ignore_pattern'], n_stale, 's' if n_stale != 1 else '', ), color='brown') if fail_msgs: raise ValueError("Here is a summary of the problems encountered when " "running the examples\n\n" + "\n".join(fail_msgs) + "\n" + "-" * 79)
python
{ "resource": "" }
q33914
collect_gallery_files
train
def collect_gallery_files(examples_dirs): """Collect python files from the gallery example directories.""" files = [] for example_dir in examples_dirs: for root, dirnames, filenames in os.walk(example_dir): for filename in filenames: if filename.endswith('.py'): files.append(os.path.join(root, filename)) return files
python
{ "resource": "" }
q33915
check_duplicate_filenames
train
def check_duplicate_filenames(files): """Check for duplicate filenames across gallery directories.""" # Check whether we'll have duplicates used_names = set() dup_names = list() for this_file in files: this_fname = os.path.basename(this_file) if this_fname in used_names: dup_names.append(this_file) else: used_names.add(this_fname) if len(dup_names) > 0: logger.warning( 'Duplicate file name(s) found. Having duplicate file names will ' 'break some links. List of files: {}'.format(sorted(dup_names),))
python
{ "resource": "" }
q33916
setup
train
def setup(app): """Setup sphinx-gallery sphinx extension""" sphinx_compatibility._app = app app.add_config_value('sphinx_gallery_conf', DEFAULT_GALLERY_CONF, 'html') for key in ['plot_gallery', 'abort_on_example_error']: app.add_config_value(key, get_default_config_value(key), 'html') try: app.add_css_file('gallery.css') except AttributeError: # Sphinx < 1.8 app.add_stylesheet('gallery.css') # Sphinx < 1.6 calls it `_extensions`, >= 1.6 is `extensions`. extensions_attr = '_extensions' if hasattr( app, '_extensions') else 'extensions' if 'sphinx.ext.autodoc' in getattr(app, extensions_attr): app.connect('autodoc-process-docstring', touch_empty_backreferences) app.connect('builder-inited', generate_gallery_rst) app.connect('build-finished', copy_binder_files) app.connect('build-finished', summarize_failing_examples) app.connect('build-finished', embed_code_links) metadata = {'parallel_read_safe': True, 'parallel_write_safe': False, 'version': _sg_version} return metadata
python
{ "resource": "" }
q33917
replace_py_ipynb
train
def replace_py_ipynb(fname): """Replace .py extension in filename by .ipynb""" fname_prefix, extension = os.path.splitext(fname) allowed_extension = '.py' if extension != allowed_extension: raise ValueError( "Unrecognized file extension, expected %s, got %s" % (allowed_extension, extension)) new_extension = '.ipynb' return '{}{}'.format(fname_prefix, new_extension)
python
{ "resource": "" }
q33918
get_md5sum
train
def get_md5sum(src_file): """Returns md5sum of file""" with open(src_file, 'rb') as src_data: src_content = src_data.read() return hashlib.md5(src_content).hexdigest()
python
{ "resource": "" }
q33919
ZabbixResponse.parse
train
def parse(self, response): """Parse zabbix response.""" info = response.get('info') res = self._regex.search(info) self._processed += int(res.group(1)) self._failed += int(res.group(2)) self._total += int(res.group(3)) self._time += Decimal(res.group(4)) self._chunk += 1
python
{ "resource": "" }
q33920
ZabbixSender._load_from_config
train
def _load_from_config(self, config_file): """Load zabbix server IP address and port from zabbix agent config file. If ServerActive variable is not found in the file, it will use the default: 127.0.0.1:10051 :type config_file: str :param use_config: Path to zabbix_agentd.conf file to load settings from. If value is `True` then default config path will used: /etc/zabbix/zabbix_agentd.conf """ if config_file and isinstance(config_file, bool): config_file = '/etc/zabbix/zabbix_agentd.conf' logger.debug("Used config: %s", config_file) # This is workaround for config wile without sections with open(config_file, 'r') as f: config_file_data = "[root]\n" + f.read() params = {} try: # python2 args = inspect.getargspec( configparser.RawConfigParser.__init__).args except ValueError: # python3 args = inspect.getfullargspec( configparser.RawConfigParser.__init__).kwonlyargs if 'strict' in args: params['strict'] = True config_file_fp = StringIO(config_file_data) config = configparser.RawConfigParser(**params) config.readfp(config_file_fp) # Prefer ServerActive, then try Server and fallback to defaults if config.has_option('root', 'ServerActive'): zabbix_serveractives = config.get('root', 'ServerActive') elif config.has_option('root', 'Server'): zabbix_serveractives = config.get('root', 'Server') else: zabbix_serveractives = '127.0.0.1:10051' result = [] for serverport in zabbix_serveractives.split(','): if ':' not in serverport: serverport = "%s:%s" % (serverport.strip(), 10051) server, port = serverport.split(':') serverport = (server, int(port)) result.append(serverport) logger.debug("Loaded params: %s", result) return result
python
{ "resource": "" }
q33921
ZabbixSender._receive
train
def _receive(self, sock, count): """Reads socket to receive data from zabbix server. :type socket: :class:`socket._socketobject` :param socket: Socket to read. :type count: int :param count: Number of bytes to read from socket. """ buf = b'' while len(buf) < count: chunk = sock.recv(count - len(buf)) if not chunk: break buf += chunk return buf
python
{ "resource": "" }
q33922
ZabbixSender._create_messages
train
def _create_messages(self, metrics): """Create a list of zabbix messages from a list of ZabbixMetrics. :type metrics_array: list :param metrics_array: List of :class:`zabbix.sender.ZabbixMetric`. :rtype: list :return: List of zabbix messages. """ messages = [] # Fill the list of messages for m in metrics: messages.append(str(m)) logger.debug('Messages: %s', messages) return messages
python
{ "resource": "" }
q33923
ZabbixSender._create_request
train
def _create_request(self, messages): """Create a formatted request to zabbix from a list of messages. :type messages: list :param messages: List of zabbix messages :rtype: list :return: Formatted zabbix request """ msg = ','.join(messages) request = '{{"request":"sender data","data":[{msg}]}}'.format(msg=msg) request = request.encode("utf-8") logger.debug('Request: %s', request) return request
python
{ "resource": "" }
q33924
ZabbixSender._create_packet
train
def _create_packet(self, request): """Create a formatted packet from a request. :type request: str :param request: Formatted zabbix request :rtype: str :return: Data packet for zabbix """ data_len = struct.pack('<Q', len(request)) packet = b'ZBXD\x01' + data_len + request def ord23(x): if not isinstance(x, int): return ord(x) else: return x logger.debug('Packet [str]: %s', packet) logger.debug('Packet [hex]: %s', ':'.join(hex(ord23(x))[2:] for x in packet)) return packet
python
{ "resource": "" }
q33925
ZabbixSender._get_response
train
def _get_response(self, connection): """Get response from zabbix server, reads from self.socket. :type connection: :class:`socket._socketobject` :param connection: Socket to read. :rtype: dict :return: Response from zabbix server or False in case of error. """ response_header = self._receive(connection, 13) logger.debug('Response header: %s', response_header) if (not response_header.startswith(b'ZBXD\x01') or len(response_header) != 13): logger.debug('Zabbix return not valid response.') result = False else: response_len = struct.unpack('<Q', response_header[5:])[0] response_body = connection.recv(response_len) result = json.loads(response_body.decode("utf-8")) logger.debug('Data received: %s', result) try: connection.close() except Exception as err: pass return result
python
{ "resource": "" }
q33926
ZabbixSender._chunk_send
train
def _chunk_send(self, metrics): """Send the one chunk metrics to zabbix server. :type metrics: list :param metrics: List of :class:`zabbix.sender.ZabbixMetric` to send to Zabbix :rtype: str :return: Response from Zabbix Server """ messages = self._create_messages(metrics) request = self._create_request(messages) packet = self._create_packet(request) for host_addr in self.zabbix_uri: logger.debug('Sending data to %s', host_addr) # create socket object connection_ = socket.socket() if self.socket_wrapper: connection = self.socket_wrapper(connection_) else: connection = connection_ connection.settimeout(self.timeout) try: # server and port must be tuple connection.connect(host_addr) connection.sendall(packet) except socket.timeout: logger.error('Sending failed: Connection to %s timed out after' '%d seconds', host_addr, self.timeout) connection.close() raise socket.timeout except Exception as err: # In case of error we should close connection, otherwise # we will close it after data will be received. logger.warn('Sending failed: %s', getattr(err, 'msg', str(err))) connection.close() raise Exception(err) response = self._get_response(connection) logger.debug('%s response: %s', host_addr, response) if response and response.get('response') != 'success': logger.debug('Response error: %s}', response) raise Exception(response) return response
python
{ "resource": "" }
q33927
ZabbixSender.send
train
def send(self, metrics): """Send the metrics to zabbix server. :type metrics: list :param metrics: List of :class:`zabbix.sender.ZabbixMetric` to send to Zabbix :rtype: :class:`pyzabbix.sender.ZabbixResponse` :return: Parsed response from Zabbix Server """ result = ZabbixResponse() for m in range(0, len(metrics), self.chunk_size): result.parse(self._chunk_send(metrics[m:m + self.chunk_size])) return result
python
{ "resource": "" }
q33928
ZabbixAPI._login
train
def _login(self, user='', password=''): """Do login to zabbix server. :type user: str :param user: Zabbix user :type password: str :param password: Zabbix user password """ logger.debug("ZabbixAPI.login({0},{1})".format(user, password)) self.auth = None if self.use_authenticate: self.auth = self.user.authenticate(user=user, password=password) else: self.auth = self.user.login(user=user, password=password)
python
{ "resource": "" }
q33929
ZabbixAPI._logout
train
def _logout(self): """Do logout from zabbix server.""" if self.auth: logger.debug("ZabbixAPI.logout()") if self.user.logout(): self.auth = None
python
{ "resource": "" }
q33930
ZabbixAPI.do_request
train
def do_request(self, method, params=None): """Make request to Zabbix API. :type method: str :param method: ZabbixAPI method, like: `apiinfo.version`. :type params: str :param params: ZabbixAPI method arguments. >>> from pyzabbix import ZabbixAPI >>> z = ZabbixAPI() >>> apiinfo = z.do_request('apiinfo.version') """ request_json = { 'jsonrpc': '2.0', 'method': method, 'params': params or {}, 'id': '1', } # apiinfo.version and user.login doesn't require auth token if self.auth and (method not in ('apiinfo.version', 'user.login')): request_json['auth'] = self.auth logger.debug( 'urllib2.Request({0}, {1})'.format( self.url, json.dumps(request_json))) data = json.dumps(request_json) if not isinstance(data, bytes): data = data.encode("utf-8") req = urllib2.Request(self.url, data) req.get_method = lambda: 'POST' req.add_header('Content-Type', 'application/json-rpc') try: res = urlopen(req) res_str = res.read().decode('utf-8') res_json = json.loads(res_str) except ValueError as e: raise ZabbixAPIException("Unable to parse json: %s" % e.message) res_str = json.dumps(res_json, indent=4, separators=(',', ': ')) logger.debug("Response Body: %s", res_str) if 'error' in res_json: err = res_json['error'].copy() err.update({'json': str(request_json)}) raise ZabbixAPIException(err) return res_json
python
{ "resource": "" }
q33931
ZabbixAPI.get_id
train
def get_id(self, item_type, item=None, with_id=False, hostid=None, **args): """Return id or ids of zabbix objects. :type item_type: str :param item_type: Type of zabbix object. (eg host, item etc.) :type item: str :param item: Name of zabbix object. If it is `None`, return list of all objects in the scope. :type with_id: bool :param with_id: Returned values will be in zabbix json `id` format. Examlpe: `{'itemid: 128}` :type name: bool :param name: Return name instead of id. :type hostid: int :param hostid: Filter objects by specific hostid. :type templateids: int :param tempateids: Filter objects which only belong to specific templates by template id. :type app_name: str :param app_name: Filter object which only belong to specific application. :rtype: int or list :return: Return single `id`, `name` or list of values. """ result = None name = args.get('name', False) type_ = '{item_type}.get'.format(item_type=item_type) item_filter_name = { 'mediatype': 'description', 'trigger': 'description', 'triggerprototype': 'description', 'user': 'alias', 'usermacro': 'macro', } item_id_name = { 'discoveryrule': 'item', 'graphprototype': 'graph', 'hostgroup': 'group', 'itemprototype': 'item', 'map': 'selement', 'triggerprototype': 'trigger', 'usergroup': 'usrgrp', 'usermacro': 'hostmacro', } filter_ = { 'filter': { item_filter_name.get(item_type, 'name'): item, }, 'output': 'extend'} if hostid: filter_['filter'].update({'hostid': hostid}) if args.get('templateids'): if item_type == 'usermacro': filter_['hostids'] = args['templateids'] else: filter_['templateids'] = args['templateids'] if args.get('app_name'): filter_['application'] = args['app_name'] logger.debug( 'do_request( "{type}", {filter} )'.format( type=type_, filter=filter_)) response = self.do_request(type_, filter_)['result'] if response: item_id_str = item_id_name.get(item_type, item_type) item_id = '{item}id'.format(item=item_id_str) result = [] for obj in response: # Check if object not belong current template if args.get('templateids'): if (not obj.get('templateid') in ("0", None) or not len(obj.get('templateids', [])) == 0): continue if name: o = obj.get(item_filter_name.get(item_type, 'name')) result.append(o) elif with_id: result.append({item_id: int(obj.get(item_id))}) else: result.append(int(obj.get(item_id))) list_types = (list, type(None)) if not isinstance(item, list_types): result = result[0] return result
python
{ "resource": "" }
q33932
generate_optimized_y_move_down_x_SOL
train
def generate_optimized_y_move_down_x_SOL(y_dist): """ move down y_dist, set x=0 """ # Optimization to move N lines and go to SOL in one command. Note that some terminals # may not support this so we might have to remove this optimization or make it optional # if that winds up mattering for terminals we care about. If we had to remove we'd # want to rework things such that we used "\x1b[{0}B" but also we would want to change # our interface to this function so we didn't guarantee x=0 since caller might ultimate # want it in a different place and we don't want to output two x moves. Could pass in # desired x, or return current x from here. string = "\x1b[{0}E".format(y_dist) # ANSI code to move down N lines and move x to SOL # Would a sequence of 1 or more \n chars be cheaper? If so we'll output that instead if y_dist < len(string): string = '\n' * y_dist return string
python
{ "resource": "" }
q33933
GitRepository.get_head
train
def get_head(self) -> Commit: """ Get the head commit. :return: Commit of the head commit """ head_commit = self.repo.head.commit return Commit(head_commit, self.path, self.main_branch)
python
{ "resource": "" }
q33934
GitRepository.get_list_commits
train
def get_list_commits(self, branch: str = None, reverse_order: bool = True) \ -> Generator[Commit, None, None]: """ Return a generator of commits of all the commits in the repo. :return: Generator[Commit], the generator of all the commits in the repo """ for commit in self.repo.iter_commits(branch, reverse=reverse_order): yield self.get_commit_from_gitpython(commit)
python
{ "resource": "" }
q33935
GitRepository.get_commit
train
def get_commit(self, commit_id: str) -> Commit: """ Get the specified commit. :param str commit_id: hash of the commit to analyze :return: Commit """ return Commit(self.repo.commit(commit_id), self.path, self.main_branch)
python
{ "resource": "" }
q33936
GitRepository.get_commit_from_gitpython
train
def get_commit_from_gitpython(self, commit: GitCommit) -> Commit: """ Build a PyDriller commit object from a GitPython commit object. This is internal of PyDriller, I don't think users generally will need it. :param GitCommit commit: GitPython commit :return: Commit commit: PyDriller commit """ return Commit(commit, self.path, self.main_branch)
python
{ "resource": "" }
q33937
GitRepository.get_commit_from_tag
train
def get_commit_from_tag(self, tag: str) -> Commit: """ Obtain the tagged commit. :param str tag: the tag :return: Commit commit: the commit the tag referred to """ try: selected_tag = self.repo.tags[tag] return self.get_commit(selected_tag.commit.hexsha) except (IndexError, AttributeError): logger.debug('Tag %s not found', tag) raise
python
{ "resource": "" }
q33938
Modification.added
train
def added(self) -> int: """ Return the total number of added lines in the file. :return: int lines_added """ added = 0 for line in self.diff.replace('\r', '').split("\n"): if line.startswith('+') and not line.startswith('+++'): added += 1 return added
python
{ "resource": "" }
q33939
Modification.removed
train
def removed(self): """ Return the total number of deleted lines in the file. :return: int lines_deleted """ removed = 0 for line in self.diff.replace('\r', '').split("\n"): if line.startswith('-') and not line.startswith('---'): removed += 1 return removed
python
{ "resource": "" }
q33940
Commit.author
train
def author(self) -> Developer: """ Return the author of the commit as a Developer object. :return: author """ return Developer(self._c_object.author.name, self._c_object.author.email)
python
{ "resource": "" }
q33941
Commit.committer
train
def committer(self) -> Developer: """ Return the committer of the commit as a Developer object. :return: committer """ return Developer(self._c_object.committer.name, self._c_object.committer.email)
python
{ "resource": "" }
q33942
Commit.parents
train
def parents(self) -> List[str]: """ Return the list of parents SHAs. :return: List[str] parents """ parents = [] for p in self._c_object.parents: parents.append(p.hexsha) return parents
python
{ "resource": "" }
q33943
Commit.modifications
train
def modifications(self) -> List[Modification]: """ Return a list of modified files. :return: List[Modification] modifications """ if self._modifications is None: self._modifications = self._get_modifications() return self._modifications
python
{ "resource": "" }
q33944
Commit.branches
train
def branches(self) -> Set[str]: """ Return the set of branches that contain the commit. :return: set(str) branches """ if self._branches is None: self._branches = self._get_branches() return self._branches
python
{ "resource": "" }
q33945
_lex_file_object
train
def _lex_file_object(file_obj): """ Generates token tuples from an nginx config file object Yields 3-tuples like (token, lineno, quoted) """ token = '' # the token buffer token_line = 0 # the line the token starts on next_token_is_directive = True it = itertools.chain.from_iterable(file_obj) it = _iterescape(it) # treat escaped characters differently it = _iterlinecount(it) # count the number of newline characters for char, line in it: # handle whitespace if char.isspace(): # if token complete yield it and reset token buffer if token: yield (token, token_line, False) if next_token_is_directive and token in EXTERNAL_LEXERS: for custom_lexer_token in EXTERNAL_LEXERS[token](it, token): yield custom_lexer_token next_token_is_directive = True else: next_token_is_directive = False token = '' # disregard until char isn't a whitespace character while char.isspace(): char, line = next(it) # if starting comment if not token and char == '#': while not char.endswith('\n'): token = token + char char, _ = next(it) yield (token, line, False) token = '' continue if not token: token_line = line # handle parameter expansion syntax (ex: "${var[@]}") if token and token[-1] == '$' and char == '{': next_token_is_directive = False while token[-1] != '}' and not char.isspace(): token += char char, line = next(it) # if a quote is found, add the whole string to the token buffer if char in ('"', "'"): # if a quote is inside a token, treat it like any other char if token: token += char continue quote = char char, line = next(it) while char != quote: token += quote if char == '\\' + quote else char char, line = next(it) yield (token, token_line, True) # True because this is in quotes # handle quoted external directives if next_token_is_directive and token in EXTERNAL_LEXERS: for custom_lexer_token in EXTERNAL_LEXERS[token](it, token): yield custom_lexer_token next_token_is_directive = True else: next_token_is_directive = False token = '' continue # handle special characters that are treated like full tokens if char in ('{', '}', ';'): # if token complete yield it and reset token buffer if token: yield (token, token_line, False) token = '' # this character is a full token so yield it now yield (char, line, False) next_token_is_directive = True continue # append char to the token buffer token += char
python
{ "resource": "" }
q33946
_balance_braces
train
def _balance_braces(tokens, filename=None): """Raises syntax errors if braces aren't balanced""" depth = 0 for token, line, quoted in tokens: if token == '}' and not quoted: depth -= 1 elif token == '{' and not quoted: depth += 1 # raise error if we ever have more right braces than left if depth < 0: reason = 'unexpected "}"' raise NgxParserSyntaxError(reason, filename, line) else: yield (token, line, quoted) # raise error if we have less right braces than left at EOF if depth > 0: reason = 'unexpected end of file, expecting "}"' raise NgxParserSyntaxError(reason, filename, line)
python
{ "resource": "" }
q33947
lex
train
def lex(filename): """Generates tokens from an nginx config file""" with io.open(filename, mode='r', encoding='utf-8') as f: it = _lex_file_object(f) it = _balance_braces(it, filename) for token, line, quoted in it: yield (token, line, quoted)
python
{ "resource": "" }
q33948
_prepare_if_args
train
def _prepare_if_args(stmt): """Removes parentheses from an "if" directive's arguments""" args = stmt['args'] if args and args[0].startswith('(') and args[-1].endswith(')'): args[0] = args[0][1:].lstrip() args[-1] = args[-1][:-1].rstrip() start = int(not args[0]) end = len(args) - int(not args[-1]) args[:] = args[start:end]
python
{ "resource": "" }
q33949
_combine_parsed_configs
train
def _combine_parsed_configs(old_payload): """ Combines config files into one by using include directives. :param old_payload: payload that's normally returned by parse() :return: the new combined payload """ old_configs = old_payload['config'] def _perform_includes(block): for stmt in block: if 'block' in stmt: stmt['block'] = list(_perform_includes(stmt['block'])) if 'includes' in stmt: for index in stmt['includes']: config = old_configs[index]['parsed'] for stmt in _perform_includes(config): yield stmt else: yield stmt # do not yield include stmt itself combined_config = { 'file': old_configs[0]['file'], 'status': 'ok', 'errors': [], 'parsed': [] } for config in old_configs: combined_config['errors'] += config.get('errors', []) if config.get('status', 'ok') == 'failed': combined_config['status'] = 'failed' first_config = old_configs[0]['parsed'] combined_config['parsed'] += _perform_includes(first_config) combined_payload = { 'status': old_payload.get('status', 'ok'), 'errors': old_payload.get('errors', []), 'config': [combined_config] } return combined_payload
python
{ "resource": "" }
q33950
rmrf
train
def rmrf(items, verbose=True): "Silently remove a list of directories or files" if isinstance(items, str): items = [items] for item in items: if verbose: print("Removing {}".format(item)) shutil.rmtree(item, ignore_errors=True) # rmtree doesn't remove bare files try: os.remove(item) except FileNotFoundError: pass
python
{ "resource": "" }
q33951
docs
train
def docs(context, builder='html'): "Build documentation using sphinx" cmdline = 'python -msphinx -M {} {} {} {}'.format(builder, DOCS_SRCDIR, DOCS_BUILDDIR, SPHINX_OPTS) context.run(cmdline)
python
{ "resource": "" }
q33952
eggs_clean
train
def eggs_clean(context): "Remove egg directories" #pylint: disable=unused-argument dirs = set() dirs.add('.eggs') for name in os.listdir(os.curdir): if name.endswith('.egg-info'): dirs.add(name) if name.endswith('.egg'): dirs.add(name) rmrf(dirs)
python
{ "resource": "" }
q33953
tag
train
def tag(context, name, message=''): "Add a Git tag and push it to origin" # If a tag was provided on the command-line, then add a Git tag and push it to origin if name: context.run('git tag -a {} -m {!r}'.format(name, message)) context.run('git push origin {}'.format(name))
python
{ "resource": "" }
q33954
validatetag
train
def validatetag(context): "Check to make sure that a tag exists for the current HEAD and it looks like a valid version number" # Validate that a Git tag exists for the current commit HEAD result = context.run("git describe --exact-match --tags $(git log -n1 --pretty='%h')") tag = result.stdout.rstrip() # Validate that the Git tag appears to be a valid version number ver_regex = re.compile('(\d+)\.(\d+)\.(\d+)') match = ver_regex.fullmatch(tag) if match is None: print('Tag {!r} does not appear to be a valid version number'.format(tag)) sys.exit(-1) else: print('Tag {!r} appears to be a valid version number'.format(tag))
python
{ "resource": "" }
q33955
AlerterApp._preloop_hook
train
def _preloop_hook(self) -> None: """ Start the alerter thread """ # This runs after cmdloop() acquires self.terminal_lock, which will be locked until the prompt appears. # Therefore this is the best place to start the alerter thread since there is no risk of it alerting # before the prompt is displayed. You can also start it via a command if its not something that should # be running during the entire application. See do_start_alerts(). self._stop_thread = False self._alerter_thread = threading.Thread(name='alerter', target=self._alerter_thread_func) self._alerter_thread.start()
python
{ "resource": "" }
q33956
AlerterApp.do_start_alerts
train
def do_start_alerts(self, _): """ Starts the alerter thread """ if self._alerter_thread.is_alive(): print("The alert thread is already started") else: self._stop_thread = False self._alerter_thread = threading.Thread(name='alerter', target=self._alerter_thread_func) self._alerter_thread.start()
python
{ "resource": "" }
q33957
AlerterApp._alerter_thread_func
train
def _alerter_thread_func(self) -> None: """ Prints alerts and updates the prompt any time the prompt is showing """ self._alert_count = 0 self._next_alert_time = 0 while not self._stop_thread: # Always acquire terminal_lock before printing alerts or updating the prompt # To keep the app responsive, do not block on this call if self.terminal_lock.acquire(blocking=False): # Get any alerts that need to be printed alert_str = self._generate_alert_str() # Generate a new prompt new_prompt = self._generate_colored_prompt() # Check if we have alerts to print if alert_str: # new_prompt is an optional parameter to async_alert() self.async_alert(alert_str, new_prompt) new_title = "Alerts Printed: {}".format(self._alert_count) self.set_window_title(new_title) # No alerts needed to be printed, check if the prompt changed elif new_prompt != self.prompt: self.async_update_prompt(new_prompt) # Don't forget to release the lock self.terminal_lock.release() time.sleep(0.5)
python
{ "resource": "" }
q33958
quote_string_if_needed
train
def quote_string_if_needed(arg: str) -> str: """ Quotes a string if it contains spaces and isn't already quoted """ if is_quoted(arg) or ' ' not in arg: return arg if '"' in arg: quote = "'" else: quote = '"' return quote + arg + quote
python
{ "resource": "" }
q33959
namedtuple_with_defaults
train
def namedtuple_with_defaults(typename: str, field_names: Union[str, List[str]], default_values: collections.Iterable = ()): """ Convenience function for defining a namedtuple with default values From: https://stackoverflow.com/questions/11351032/namedtuple-and-default-values-for-optional-keyword-arguments Examples: >>> Node = namedtuple_with_defaults('Node', 'val left right') >>> Node() Node(val=None, left=None, right=None) >>> Node = namedtuple_with_defaults('Node', 'val left right', [1, 2, 3]) >>> Node() Node(val=1, left=2, right=3) >>> Node = namedtuple_with_defaults('Node', 'val left right', {'right':7}) >>> Node() Node(val=None, left=None, right=7) >>> Node(4) Node(val=4, left=None, right=7) """ T = collections.namedtuple(typename, field_names) # noinspection PyProtectedMember,PyUnresolvedReferences T.__new__.__defaults__ = (None,) * len(T._fields) if isinstance(default_values, collections.Mapping): prototype = T(**default_values) else: prototype = T(*default_values) T.__new__.__defaults__ = tuple(prototype) return T
python
{ "resource": "" }
q33960
cast
train
def cast(current: Any, new: str) -> Any: """Tries to force a new value into the same type as the current when trying to set the value for a parameter. :param current: current value for the parameter, type varies :param new: new value :return: new value with same type as current, or the current value if there was an error casting """ typ = type(current) orig_new = new if typ == bool: try: return bool(int(new)) except (ValueError, TypeError): pass try: new = new.lower() if (new == 'on') or (new[0] in ('y', 't')): return True if (new == 'off') or (new[0] in ('n', 'f')): return False except AttributeError: pass else: try: return typ(new) except (ValueError, TypeError): pass print("Problem setting parameter (now {}) to {}; incorrect type?".format(current, orig_new)) return current
python
{ "resource": "" }
q33961
which
train
def which(editor: str) -> Optional[str]: """Find the full path of a given editor. Return the full path of the given editor, or None if the editor can not be found. :param editor: filename of the editor to check, ie 'notepad.exe' or 'vi' :return: a full path or None """ try: editor_path = subprocess.check_output(['which', editor], stderr=subprocess.STDOUT).strip() editor_path = editor_path.decode() except subprocess.CalledProcessError: editor_path = None return editor_path
python
{ "resource": "" }
q33962
is_text_file
train
def is_text_file(file_path: str) -> bool: """Returns if a file contains only ASCII or UTF-8 encoded text. :param file_path: path to the file being checked :return: True if the file is a text file, False if it is binary. """ import codecs expanded_path = os.path.abspath(os.path.expanduser(file_path.strip())) valid_text_file = False # Check if the file is ASCII try: with codecs.open(expanded_path, encoding='ascii', errors='strict') as f: # Make sure the file has at least one line of text # noinspection PyUnusedLocal if sum(1 for line in f) > 0: valid_text_file = True except OSError: # pragma: no cover pass except UnicodeDecodeError: # The file is not ASCII. Check if it is UTF-8. try: with codecs.open(expanded_path, encoding='utf-8', errors='strict') as f: # Make sure the file has at least one line of text # noinspection PyUnusedLocal if sum(1 for line in f) > 0: valid_text_file = True except OSError: # pragma: no cover pass except UnicodeDecodeError: # Not UTF-8 pass return valid_text_file
python
{ "resource": "" }
q33963
remove_duplicates
train
def remove_duplicates(list_to_prune: List) -> List: """Removes duplicates from a list while preserving order of the items. :param list_to_prune: the list being pruned of duplicates :return: The pruned list """ temp_dict = collections.OrderedDict() for item in list_to_prune: temp_dict[item] = None return list(temp_dict.keys())
python
{ "resource": "" }
q33964
alphabetical_sort
train
def alphabetical_sort(list_to_sort: Iterable[str]) -> List[str]: """Sorts a list of strings alphabetically. For example: ['a1', 'A11', 'A2', 'a22', 'a3'] To sort a list in place, don't call this method, which makes a copy. Instead, do this: my_list.sort(key=norm_fold) :param list_to_sort: the list being sorted :return: the sorted list """ return sorted(list_to_sort, key=norm_fold)
python
{ "resource": "" }
q33965
natural_sort
train
def natural_sort(list_to_sort: Iterable[str]) -> List[str]: """ Sorts a list of strings case insensitively as well as numerically. For example: ['a1', 'A2', 'a3', 'A11', 'a22'] To sort a list in place, don't call this method, which makes a copy. Instead, do this: my_list.sort(key=natural_keys) :param list_to_sort: the list being sorted :return: the list sorted naturally """ return sorted(list_to_sort, key=natural_keys)
python
{ "resource": "" }
q33966
find_editor
train
def find_editor() -> str: """Find a reasonable editor to use by default for the system that the cmd2 application is running on.""" editor = os.environ.get('EDITOR') if not editor: if sys.platform[:3] == 'win': editor = 'notepad' else: # Favor command-line editors first so we don't leave the terminal to edit for editor in ['vim', 'vi', 'emacs', 'nano', 'pico', 'gedit', 'kate', 'subl', 'geany', 'atom']: if which(editor): break return editor
python
{ "resource": "" }
q33967
StdSim.write
train
def write(self, s: str) -> None: """Add str to internal bytes buffer and if echo is True, echo contents to inner stream""" if not isinstance(s, str): raise TypeError('write() argument must be str, not {}'.format(type(s))) if not self.pause_storage: self.buffer.byte_buf += s.encode(encoding=self.encoding, errors=self.errors) if self.echo: self.inner_stream.write(s)
python
{ "resource": "" }
q33968
StdSim.getvalue
train
def getvalue(self) -> str: """Get the internal contents as a str""" return self.buffer.byte_buf.decode(encoding=self.encoding, errors=self.errors)
python
{ "resource": "" }
q33969
ByteBuf.write
train
def write(self, b: bytes) -> None: """Add bytes to internal bytes buffer and if echo is True, echo contents to inner stream.""" if not isinstance(b, bytes): raise TypeError('a bytes-like object is required, not {}'.format(type(b))) if not self.std_sim_instance.pause_storage: self.byte_buf += b if self.std_sim_instance.echo: self.std_sim_instance.inner_stream.buffer.write(b) # Since StdSim wraps TextIO streams, we will flush the stream if line buffering is on # and the bytes being written contain a new line character. This is helpful when StdSim # is being used to capture output of a shell command because it causes the output to print # to the screen more often than if we waited for the stream to flush its buffer. if self.std_sim_instance.line_buffering: if any(newline in b for newline in ByteBuf.NEWLINES): self.std_sim_instance.flush()
python
{ "resource": "" }
q33970
CmdLineApp.add_whitespace_hook
train
def add_whitespace_hook(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData: """A hook to split alphabetic command names immediately followed by a number. l24 -> l 24 list24 -> list 24 list 24 -> list 24 """ command = data.statement.command # regular expression with looks for: # ^ - the beginning of the string # ([^\s\d]+) - one or more non-whitespace non-digit characters, set as capture group 1 # (\d+) - one or more digit characters, set as capture group 2 command_pattern = re.compile(r'^([^\s\d]+)(\d+)') match = command_pattern.search(command) if match: data.statement = self.statement_parser.parse("{} {} {}".format( match.group(1), match.group(2), '' if data.statement.args is None else data.statement.args )) return data
python
{ "resource": "" }
q33971
CmdLineApp.downcase_hook
train
def downcase_hook(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData: """A hook to make uppercase commands lowercase.""" command = data.statement.command.lower() data.statement = self.statement_parser.parse("{} {}".format( command, '' if data.statement.args is None else data.statement.args )) return data
python
{ "resource": "" }
q33972
CmdLineApp.abbrev_hook
train
def abbrev_hook(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData: """Accept unique abbreviated commands""" func = self.cmd_func(data.statement.command) if func is None: # check if the entered command might be an abbreviation possible_cmds = [cmd for cmd in self.get_all_commands() if cmd.startswith(data.statement.command)] if len(possible_cmds) == 1: raw = data.statement.raw.replace(data.statement.command, possible_cmds[0], 1) data.statement = self.statement_parser.parse(raw) return data
python
{ "resource": "" }
q33973
CmdLineApp.do_list
train
def do_list(self, arglist: List[str]) -> None: """Generate a list of 10 numbers.""" if arglist: first = arglist[0] try: first = int(first) except ValueError: first = 1 else: first = 1 last = first + 10 for x in range(first, last): self.poutput(str(x))
python
{ "resource": "" }
q33974
SubcommandsExample.do_base
train
def do_base(self, args): """Base command help""" func = getattr(args, 'func', None) if func is not None: # Call whatever sub-command function was selected func(self, args) else: # No sub-command was provided, so call help self.do_help('base')
python
{ "resource": "" }
q33975
SubcommandsExample.do_alternate
train
def do_alternate(self, args): """Alternate command help""" func = getattr(args, 'func', None) if func is not None: # Call whatever sub-command function was selected func(self, args) else: # No sub-command was provided, so call help self.do_help('alternate')
python
{ "resource": "" }
q33976
main
train
def main(argv=None): """Run when invoked from the operating system shell""" parser = argparse.ArgumentParser( description='Commands as arguments' ) command_help = 'optional command to run, if no command given, enter an interactive shell' parser.add_argument('command', nargs='?', help=command_help) arg_help = 'optional arguments for command' parser.add_argument('command_args', nargs=argparse.REMAINDER, help=arg_help) args = parser.parse_args(argv) c = CmdLineApp() if args.command: # we have a command, run it and then exit c.onecmd_plus_hooks('{} {}'.format(args.command, ' '.join(args.command_args))) else: # we have no command, drop into interactive mode c.cmdloop()
python
{ "resource": "" }
q33977
TabCompleteExample.do_add_item
train
def do_add_item(self, args): """Add item command help""" if args.food: add_item = args.food elif args.sport: add_item = args.sport elif args.other: add_item = args.other else: add_item = 'no items' self.poutput("You added {}".format(add_item))
python
{ "resource": "" }
q33978
CmdLineApp.do_tag
train
def do_tag(self, args: argparse.Namespace): """create an html tag""" # The Namespace always includes the Statement object created when parsing the command line statement = args.__statement__ self.poutput("The command line you ran was: {}".format(statement.command_and_args)) self.poutput("It generated this tag:") self.poutput('<{0}>{1}</{0}>'.format(args.tag, ' '.join(args.content)))
python
{ "resource": "" }
q33979
CmdLineApp.do_tagg
train
def do_tagg(self, arglist: List[str]): """version of creating an html tag using arglist instead of argparser""" if len(arglist) >= 2: tag = arglist[0] content = arglist[1:] self.poutput('<{0}>{1}</{0}>'.format(tag, ' '.join(content))) else: self.perror("tagg requires at least 2 arguments")
python
{ "resource": "" }
q33980
ReplWithExitCode.do_exit
train
def do_exit(self, arg_list: List[str]) -> bool: """Exit the application with an optional exit code. Usage: exit [exit_code] Where: * exit_code - integer exit code to return to the shell """ # If an argument was provided if arg_list: try: self.exit_code = int(arg_list[0]) except ValueError: self.perror("{} isn't a valid integer exit code".format(arg_list[0])) self.exit_code = -1 self._should_quit = True return self._STOP_AND_EXIT
python
{ "resource": "" }
q33981
categorize
train
def categorize(func: Union[Callable, Iterable], category: str) -> None: """Categorize a function. The help command output will group this function under the specified category heading :param func: function to categorize :param category: category to put it in """ if isinstance(func, Iterable): for item in func: setattr(item, HELP_CATEGORY, category) else: setattr(func, HELP_CATEGORY, category)
python
{ "resource": "" }
q33982
with_category
train
def with_category(category: str) -> Callable: """A decorator to apply a category to a command function.""" def cat_decorator(func): categorize(func, category) return func return cat_decorator
python
{ "resource": "" }
q33983
with_argparser_and_unknown_args
train
def with_argparser_and_unknown_args(argparser: argparse.ArgumentParser, preserve_quotes: bool = False) -> \ Callable[[argparse.Namespace, List], Optional[bool]]: """A decorator to alter a cmd2 method to populate its ``args`` argument by parsing arguments with the given instance of argparse.ArgumentParser, but also returning unknown args as a list. :param argparser: unique instance of ArgumentParser :param preserve_quotes: if True, then arguments passed to argparse maintain their quotes :return: function that gets passed argparse-parsed args in a Namespace and a list of unknown argument strings A member called __statement__ is added to the Namespace to provide command functions access to the Statement object. This can be useful if the command function needs to know the command line. """ import functools # noinspection PyProtectedMember def arg_decorator(func: Callable): @functools.wraps(func) def cmd_wrapper(cmd2_instance, statement: Union[Statement, str]): statement, parsed_arglist = cmd2_instance.statement_parser.get_command_arg_list(command_name, statement, preserve_quotes) try: args, unknown = argparser.parse_known_args(parsed_arglist) except SystemExit: return else: setattr(args, '__statement__', statement) return func(cmd2_instance, args, unknown) # argparser defaults the program name to sys.argv[0] # we want it to be the name of our command command_name = func.__name__[len(COMMAND_FUNC_PREFIX):] argparser.prog = command_name # If the description has not been set, then use the method docstring if one exists if argparser.description is None and func.__doc__: argparser.description = func.__doc__ # Set the command's help text as argparser.description (which can be None) cmd_wrapper.__doc__ = argparser.description # Mark this function as having an argparse ArgumentParser setattr(cmd_wrapper, 'argparser', argparser) return cmd_wrapper return arg_decorator
python
{ "resource": "" }
q33984
Cmd.decolorized_write
train
def decolorized_write(self, fileobj: IO, msg: str) -> None: """Write a string to a fileobject, stripping ANSI escape sequences if necessary Honor the current colors setting, which requires us to check whether the fileobject is a tty. """ if self.colors.lower() == constants.COLORS_NEVER.lower() or \ (self.colors.lower() == constants.COLORS_TERMINAL.lower() and not fileobj.isatty()): msg = utils.strip_ansi(msg) fileobj.write(msg)
python
{ "resource": "" }
q33985
Cmd.perror
train
def perror(self, err: Union[str, Exception], traceback_war: bool = True, err_color: str = Fore.LIGHTRED_EX, war_color: str = Fore.LIGHTYELLOW_EX) -> None: """ Print error message to sys.stderr and if debug is true, print an exception Traceback if one exists. :param err: an Exception or error message to print out :param traceback_war: (optional) if True, print a message to let user know they can enable debug :param err_color: (optional) color escape to output error with :param war_color: (optional) color escape to output warning with """ if self.debug and sys.exc_info() != (None, None, None): import traceback traceback.print_exc() if isinstance(err, Exception): err_msg = "EXCEPTION of type '{}' occurred with message: '{}'\n".format(type(err).__name__, err) else: err_msg = "{}\n".format(err) err_msg = err_color + err_msg + Fore.RESET self.decolorized_write(sys.stderr, err_msg) if traceback_war and not self.debug: war = "To enable full traceback, run the following command: 'set debug true'\n" war = war_color + war + Fore.RESET self.decolorized_write(sys.stderr, war)
python
{ "resource": "" }
q33986
Cmd.pfeedback
train
def pfeedback(self, msg: str) -> None: """For printing nonessential feedback. Can be silenced with `quiet`. Inclusion in redirected output is controlled by `feedback_to_output`.""" if not self.quiet: if self.feedback_to_output: self.poutput(msg) else: self.decolorized_write(sys.stderr, "{}\n".format(msg))
python
{ "resource": "" }
q33987
Cmd.ppaged
train
def ppaged(self, msg: str, end: str = '\n', chop: bool = False) -> None: """Print output using a pager if it would go off screen and stdout isn't currently being redirected. Never uses a pager inside of a script (Python or text) or when output is being redirected or piped or when stdout or stdin are not a fully functional terminal. :param msg: message to print to current stdout (anything convertible to a str with '{}'.format() is OK) :param end: string appended after the end of the message if not already present, default a newline :param chop: True -> causes lines longer than the screen width to be chopped (truncated) rather than wrapped - truncated text is still accessible by scrolling with the right & left arrow keys - chopping is ideal for displaying wide tabular data as is done in utilities like pgcli False -> causes lines longer than the screen width to wrap to the next line - wrapping is ideal when you want to keep users from having to use horizontal scrolling WARNING: On Windows, the text always wraps regardless of what the chop argument is set to """ import subprocess if msg is not None and msg != '': try: msg_str = '{}'.format(msg) if not msg_str.endswith(end): msg_str += end # Attempt to detect if we are not running within a fully functional terminal. # Don't try to use the pager when being run by a continuous integration system like Jenkins + pexpect. functional_terminal = False if self.stdin.isatty() and self.stdout.isatty(): if sys.platform.startswith('win') or os.environ.get('TERM') is not None: functional_terminal = True # Don't attempt to use a pager that can block if redirecting or running a script (either text or Python) # Also only attempt to use a pager if actually running in a real fully functional terminal if functional_terminal and not self.redirecting and not self._in_py and not self._script_dir: if self.colors.lower() == constants.COLORS_NEVER.lower(): msg_str = utils.strip_ansi(msg_str) pager = self.pager if chop: pager = self.pager_chop # Prevent KeyboardInterrupts while in the pager. The pager application will # still receive the SIGINT since it is in the same process group as us. with self.sigint_protection: pipe_proc = subprocess.Popen(pager, shell=True, stdin=subprocess.PIPE) pipe_proc.communicate(msg_str.encode('utf-8', 'replace')) else: self.decolorized_write(self.stdout, msg_str) except BrokenPipeError: # This occurs if a command's output is being piped to another process and that process closes before the # command is finished. If you would like your application to print a warning message, then set the # broken_pipe_warning attribute to the message you want printed.` if self.broken_pipe_warning: sys.stderr.write(self.broken_pipe_warning)
python
{ "resource": "" }
q33988
Cmd.reset_completion_defaults
train
def reset_completion_defaults(self) -> None: """ Resets tab completion settings Needs to be called each time readline runs tab completion """ self.allow_appended_space = True self.allow_closing_quote = True self.completion_header = '' self.display_matches = [] self.matches_delimited = False self.matches_sorted = False if rl_type == RlType.GNU: readline.set_completion_display_matches_hook(self._display_matches_gnu_readline) elif rl_type == RlType.PYREADLINE: # noinspection PyUnresolvedReferences readline.rl.mode._display_completions = self._display_matches_pyreadline
python
{ "resource": "" }
q33989
Cmd.basic_complete
train
def basic_complete(text: str, line: str, begidx: int, endidx: int, match_against: Iterable) -> List[str]: """ Performs tab completion against a list :param text: the string prefix we are attempting to match (all returned matches must begin with it) :param line: the current input line with leading whitespace removed :param begidx: the beginning index of the prefix text :param endidx: the ending index of the prefix text :param match_against: the list being matched against :return: a list of possible tab completions """ return [cur_match for cur_match in match_against if cur_match.startswith(text)]
python
{ "resource": "" }
q33990
Cmd.delimiter_complete
train
def delimiter_complete(self, text: str, line: str, begidx: int, endidx: int, match_against: Iterable, delimiter: str) -> List[str]: """ Performs tab completion against a list but each match is split on a delimiter and only the portion of the match being tab completed is shown as the completion suggestions. This is useful if you match against strings that are hierarchical in nature and have a common delimiter. An easy way to illustrate this concept is path completion since paths are just directories/files delimited by a slash. If you are tab completing items in /home/user you don't get the following as suggestions: /home/user/file.txt /home/user/program.c /home/user/maps/ /home/user/cmd2.py Instead you are shown: file.txt program.c maps/ cmd2.py For a large set of data, this can be visually more pleasing and easier to search. Another example would be strings formatted with the following syntax: company::department::name In this case the delimiter would be :: and the user could easily narrow down what they are looking for if they were only shown suggestions in the category they are at in the string. :param text: the string prefix we are attempting to match (all returned matches must begin with it) :param line: the current input line with leading whitespace removed :param begidx: the beginning index of the prefix text :param endidx: the ending index of the prefix text :param match_against: the list being matched against :param delimiter: what delimits each portion of the matches (ex: paths are delimited by a slash) :return: a list of possible tab completions """ matches = self.basic_complete(text, line, begidx, endidx, match_against) # Display only the portion of the match that's being completed based on delimiter if matches: # Set this to True for proper quoting of matches with spaces self.matches_delimited = True # Get the common beginning for the matches common_prefix = os.path.commonprefix(matches) prefix_tokens = common_prefix.split(delimiter) # Calculate what portion of the match we are completing display_token_index = 0 if prefix_tokens: display_token_index = len(prefix_tokens) - 1 # Get this portion for each match and store them in self.display_matches for cur_match in matches: match_tokens = cur_match.split(delimiter) display_token = match_tokens[display_token_index] if not display_token: display_token = delimiter self.display_matches.append(display_token) return matches
python
{ "resource": "" }
q33991
Cmd.get_exes_in_path
train
def get_exes_in_path(starts_with: str) -> List[str]: """Returns names of executables in a user's path :param starts_with: what the exes should start with. leave blank for all exes in path. :return: a list of matching exe names """ # Purposely don't match any executable containing wildcards wildcards = ['*', '?'] for wildcard in wildcards: if wildcard in starts_with: return [] # Get a list of every directory in the PATH environment variable and ignore symbolic links paths = [p for p in os.getenv('PATH').split(os.path.pathsep) if not os.path.islink(p)] # Use a set to store exe names since there can be duplicates exes_set = set() # Find every executable file in the user's path that matches the pattern for path in paths: full_path = os.path.join(path, starts_with) matches = [f for f in glob.glob(full_path + '*') if os.path.isfile(f) and os.access(f, os.X_OK)] for match in matches: exes_set.add(os.path.basename(match)) return list(exes_set)
python
{ "resource": "" }
q33992
Cmd._autocomplete_default
train
def _autocomplete_default(self, text: str, line: str, begidx: int, endidx: int, argparser: argparse.ArgumentParser) -> List[str]: """Default completion function for argparse commands.""" completer = AutoCompleter(argparser, self) tokens, _ = self.tokens_for_completion(line, begidx, endidx) if not tokens: return [] return completer.complete_command(tokens, text, line, begidx, endidx)
python
{ "resource": "" }
q33993
Cmd.get_all_commands
train
def get_all_commands(self) -> List[str]: """Returns a list of all commands.""" return [name[len(COMMAND_FUNC_PREFIX):] for name in self.get_names() if name.startswith(COMMAND_FUNC_PREFIX) and callable(getattr(self, name))]
python
{ "resource": "" }
q33994
Cmd.get_visible_commands
train
def get_visible_commands(self) -> List[str]: """Returns a list of commands that have not been hidden or disabled.""" commands = self.get_all_commands() # Remove the hidden commands for name in self.hidden_commands: if name in commands: commands.remove(name) # Remove the disabled commands for name in self.disabled_commands: if name in commands: commands.remove(name) return commands
python
{ "resource": "" }
q33995
Cmd.get_commands_aliases_and_macros_for_completion
train
def get_commands_aliases_and_macros_for_completion(self) -> List[str]: """Return a list of visible commands, aliases, and macros for tab completion""" visible_commands = set(self.get_visible_commands()) alias_names = set(self.get_alias_names()) macro_names = set(self.get_macro_names()) return list(visible_commands | alias_names | macro_names)
python
{ "resource": "" }
q33996
Cmd.get_help_topics
train
def get_help_topics(self) -> List[str]: """ Returns a list of help topics """ return [name[len(HELP_FUNC_PREFIX):] for name in self.get_names() if name.startswith(HELP_FUNC_PREFIX) and callable(getattr(self, name))]
python
{ "resource": "" }
q33997
Cmd.sigint_handler
train
def sigint_handler(self, signum: int, frame) -> None: """Signal handler for SIGINTs which typically come from Ctrl-C events. If you need custom SIGINT behavior, then override this function. :param signum: signal number :param frame """ if self.cur_pipe_proc_reader is not None: # Pass the SIGINT to the current pipe process self.cur_pipe_proc_reader.send_sigint() # Check if we are allowed to re-raise the KeyboardInterrupt if not self.sigint_protection: raise KeyboardInterrupt("Got a keyboard interrupt")
python
{ "resource": "" }
q33998
Cmd.parseline
train
def parseline(self, line: str) -> Tuple[str, str, str]: """Parse the line into a command name and a string containing the arguments. NOTE: This is an override of a parent class method. It is only used by other parent class methods. Different from the parent class method, this ignores self.identchars. :param line: line read by readline :return: tuple containing (command, args, line) """ statement = self.statement_parser.parse_command_only(line) return statement.command, statement.args, statement.command_and_args
python
{ "resource": "" }
q33999
Cmd._run_cmdfinalization_hooks
train
def _run_cmdfinalization_hooks(self, stop: bool, statement: Optional[Statement]) -> bool: """Run the command finalization hooks""" with self.sigint_protection: if not sys.platform.startswith('win') and self.stdout.isatty(): # Before the next command runs, fix any terminal problems like those # caused by certain binary characters having been printed to it. import subprocess proc = subprocess.Popen(['stty', 'sane']) proc.communicate() try: data = plugin.CommandFinalizationData(stop, statement) for func in self._cmdfinalization_hooks: data = func(data) # retrieve the final value of stop, ignoring any # modifications to the statement return data.stop except Exception as ex: self.perror(ex)
python
{ "resource": "" }