_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q43000
pipupdate
train
def pipupdate(): """ Update all currently installed pip packages """ packages = [d for d in pkg_resources.working_set] subprocess.call('pip install --upgrade ' + ' '.join(packages))
python
{ "resource": "" }
q43001
loglevel
train
def loglevel(leveltype=None, isequal=False): """ Set or get the logging level of Quilt :type leveltype: string or integer :param leveltype: Choose the logging level. Possible choices are none (0), debug (10), info (20), warning (30), error (40) and critical (50). :type isequal: boolean :param isequal: Check if level is equal to leveltype. :return: If the level is equal to leveltype. :rtype: boolean >>> loglevel() 30 """ log = logging.getLogger(__name__) leveltype = leveltype loglevels = { "none": 0, "debug": 10, "info": 20, "warning": 30, "error": 40, "critical": 50 } if leveltype is None and isequal is False: return log.getEffectiveLevel() if leveltype is not None and isequal is True: if leveltype in loglevels.values(): return leveltype == log.getEffectiveLevel() elif leveltype in loglevels: return loglevels[leveltype] == log.getEffectiveLevel() raise ValueError( "Incorrect input provided. It should be none, debug, info, warning, error or critical." ) if leveltype in loglevels.values(): log.basicConfig(level=leveltype) elif leveltype in loglevels: log.basicConfig(level=loglevels[leveltype]) else: raise ValueError( "Incorrect input provided. It should be none, debug, info, warning, error or critical." )
python
{ "resource": "" }
q43002
text
train
def text(path, operation, content): """ Perform changes on text files :type path: string :param path: The path to perform the action on :type operation: string :param operation: The operation to use on the file :type content: string :param content: The content to use with the operation """ # If the operation is "write" if operation.lower() == 'write': # Open the file as "fh" with open(path, 'w') as fh: # Write to the file fh.write(content) # If the operation is "append" elif operation.lower() == 'append': # Open the file as "fh" with open(path, 'a') as fh: # Write to the file fh.write(content) # Raise a warning raise ValueError("Invalid operation provided")
python
{ "resource": "" }
q43003
mailto
train
def mailto(to, cc=None, bcc=None, subject=None, body=None): """ Generate and run mailto. :type to: string :param to: The recipient email address. :type cc: string :param cc: The recipient to copy to. :type bcc: string :param bcc: The recipient to blind copy to. :type subject: string :param subject: The subject to use. :type body: string :param body: The body content to use. """ mailurl = 'mailto:' + str(to) if cc is None and bcc is None and subject is None and body is None: return str(mailurl) mailurl += '?' if cc is not None: mailurl += 'cc=' + str(cc) added = True added = False if bcc is not None: if added is True: mailurl += '&' mailurl += 'bcc=' + str(cc) added = True if subject is not None: if added is True: mailurl += '&' mailurl += 'subject=' + str(subject) added = True if body is not None: if added is True: mailurl += '&' mailurl += 'body=' + str(body) added = True return mailurl
python
{ "resource": "" }
q43004
Virsh.execute_virsh_command
train
def execute_virsh_command(self, **kwargs): ''' common virsh execution function ''' host_list = kwargs.get('host_list', None) remote_user = kwargs.get('remote_user', None) remote_pass = kwargs.get('remote_pass', None) sudo = kwargs.get('sudo', False) sudo_user = kwargs.get('sudo_user', None) sudo_pass = kwargs.get('sudo_pass', None) host_list, remote_user, remote_pass, \ sudo, sudo_user, sudo_pass = self.get_validated_params( host_list, remote_user, remote_pass, sudo, sudo_user, sudo_pass) if 'cmd' not in kwargs.keys(): print "Require a command to execute" return None cmd = kwargs['cmd'] if 'delimiter' not in kwargs.keys(): delimiter = ":" else: delimiter = kwargs['delimiter'] if 'output_type' not in kwargs.keys(): output_type = "LRVALUE" else: output_type = kwargs['output_type'] if output_type == "TABLE": if 'fields' not in kwargs.keys(): print "Require to pass fields" return None fields = kwargs['fields'] result, failed_hosts = self.runner.ansible_perform_operation( host_list=host_list, remote_user=remote_user, remote_pass=remote_pass, module="command", module_args=cmd, sudo=sudo, sudo_user=sudo_user, sudo_pass=sudo_pass) virsh_result = None if result['contacted'].keys(): virsh_result = {} for node in result['contacted'].keys(): nodeobj = result['contacted'][node] if output_type == "LRVALUE": jsonoutput = rex.parse_lrvalue_string(nodeobj['stdout'], delimiter) elif output_type == "TABLE": jsonoutput = rex.parse_tabular_string(nodeobj['stdout'], fields) else: pass virsh_result[node] = {} virsh_result[node]['result'] = jsonoutput return virsh_result
python
{ "resource": "" }
q43005
Virsh.virsh_version
train
def virsh_version(self, host_list=None, remote_user=None, remote_pass=None, sudo=False, sudo_user=None, sudo_pass=None): ''' Get the virsh version ''' host_list, remote_user, remote_pass, \ sudo, sudo_user, sudo_pass = self.get_validated_params( host_list, remote_user, remote_pass, sudo, sudo_user, sudo_pass) result, failed_hosts = self.runner.ansible_perform_operation( host_list=host_list, remote_user=remote_user, remote_pass=remote_pass, module="command", module_args="virsh version", sudo=sudo, sudo_user=sudo_user, sudo_pass=sudo_pass) virsh_result = None if result['contacted'].keys(): virsh_result = {} for node in result['contacted'].keys(): nodeobj = result['contacted'][node] jsonoutput = rex.parse_lrvalue_string(nodeobj['stdout'], ":") virsh_result[node] = {} virsh_result[node]['result'] = jsonoutput return virsh_result
python
{ "resource": "" }
q43006
Virsh.virsh_per_domain_info
train
def virsh_per_domain_info(self, **kwargs): ''' Get per domain stats from each hosts passed as hostlist. ''' host_list = kwargs.get('host_list', self.host_list) remote_user = kwargs.get('remote_user', self.remote_user) remote_pass = kwargs.get('remote_pass', self.remote_pass) sudo = kwargs.get('sudo', self.sudo) sudo_user = kwargs.get('sudo_user', self.sudo_user) sudo_pass = kwargs.get('sudo_pass', self.sudo_pass) result, failed_hosts = self.runner.ansible_perform_operation( host_list=host_list, remote_user=remote_user, remote_pass=remote_pass, module="command", module_args="virsh list", sudo=sudo, sudo_user=sudo_user, sudo_pass=sudo_pass) virsh_result = None fields = ["Id", "Name", "state"] if result['contacted'].keys(): virsh_result = {} for node in result['contacted'].keys(): nodeobj = result['contacted'][node] jsonoutput = rex.parse_tabular_string(nodeobj['stdout'], fields) virsh_result[node] = {} virsh_result[node]['result'] = jsonoutput print virsh_result return virsh_result
python
{ "resource": "" }
q43007
getUserAgent
train
def getUserAgent(): ''' Generate a randomized user agent by permuting a large set of possible values. The returned user agent should look like a valid, in-use brower, with a specified preferred language of english. Return value is a list of tuples, where each tuple is one of the user-agent headers. Currently can provide approximately 147 * 17 * 5 * 5 * 2 * 3 * 2 values, or ~749K possible unique user-agents. ''' coding = random.choice(ENCODINGS) random.shuffle(coding) coding = random.choice((", ", ",")).join(coding) accept_list = [tmp for tmp in random.choice(ACCEPT)] accept_list.append(random.choice(ACCEPT_POSTFIX)) accept_str = random.choice((", ", ",")).join(accept_list) assert accept_str.count("*.*") <= 1 user_agent = [ ('User-Agent' , random.choice(USER_AGENTS)), ('Accept-Language' , random.choice(ACCEPT_LANGUAGE)), ('Accept' , accept_str), ('Accept-Encoding' , coding) ] return user_agent
python
{ "resource": "" }
q43008
CatTransformer.get_generator
train
def get_generator(self): """Return the generator object to anonymize data.""" faker = Faker() try: return getattr(faker, self.category) except AttributeError: raise ValueError('Category {} couldn\'t be found on faker')
python
{ "resource": "" }
q43009
CatTransformer.anonymize_column
train
def anonymize_column(self, col): """Map the values of column to new ones of the same type. It replaces the values from others generated using `faker`. It will however, keep the original distribution. That mean that the generated `probability_map` for both will have the same values, but different keys. Args: col (pandas.DataFrame): Dataframe containing the column to anonymize. Returns: pd.DataFrame: DataFrame with its values mapped to new ones, keeping the original distribution. Raises: ValueError: A `ValueError` is raised if faker is not able to provide enought different values. """ column = col[self.col_name] generator = self.get_generator() original_values = column[~pd.isnull(column)].unique() new_values = [generator() for x in range(len(original_values))] if len(new_values) != len(set(new_values)): raise ValueError( 'There are not enought different values on faker provider' 'for category {}'.format(self.category) ) value_map = dict(zip(original_values, new_values)) column = column.apply(value_map.get) return column.to_frame()
python
{ "resource": "" }
q43010
CatTransformer._fit
train
def _fit(self, col): """Create a map of the empirical probability for each category. Args: col(pandas.DataFrame): Data to transform. """ column = col[self.col_name].replace({np.nan: np.inf}) frequencies = column.groupby(column).count().rename({np.inf: None}).to_dict() # next set probability ranges on interval [0,1] start = 0 end = 0 num_vals = len(col) for val in frequencies: prob = frequencies[val] / num_vals end = start + prob interval = (start, end) mean = np.mean(interval) std = prob / 6 self.probability_map[val] = (interval, mean, std) start = end
python
{ "resource": "" }
q43011
CatTransformer.fit_transform
train
def fit_transform(self, col): """Prepare the transformer and return processed data. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.DataFrame """ if self.anonymize: col = self.anonymize_column(col) self._fit(col) return self.transform(col)
python
{ "resource": "" }
q43012
CatTransformer.get_val
train
def get_val(self, x): """Convert cat value into num between 0 and 1.""" interval, mean, std = self.probability_map[x] new_val = norm.rvs(mean, std) return new_val
python
{ "resource": "" }
q43013
CatTransformer.get_category
train
def get_category(self, column): """Returns categories for the specified numeric values Args: column(pandas.Series): Values to transform into categories Returns: pandas.Series """ result = pd.Series(index=column.index) for category, stats in self.probability_map.items(): start, end = stats[0] result[(start < column) & (column < end)] = category return result
python
{ "resource": "" }
q43014
PositiveNumberTransformer.transform
train
def transform(self, column): """Applies an exponential to values to turn them positive numbers. Args: column (pandas.DataFrame): Data to transform. Returns: pd.DataFrame """ self.check_data_type() return pd.DataFrame({self.col_name: np.exp(column[self.col_name])})
python
{ "resource": "" }
q43015
PositiveNumberTransformer.reverse_transform
train
def reverse_transform(self, column): """Applies the natural logarithm function to turn positive values into real ranged values. Args: column (pandas.DataFrame): Data to transform. Returns: pd.DataFrame """ self.check_data_type() return pd.DataFrame({self.col_name: np.log(column[self.col_name])})
python
{ "resource": "" }
q43016
YQL._payload_builder
train
def _payload_builder(self, query, format=None): '''Build the payload''' if self.community : query = self.COMMUNITY_DATA + query # access to community data tables if vars(self).get('yql_table_url') : # Attribute only defined when MYQL.use has been called before query = "use '{0}' as {1}; ".format(self.yql_table_url, self.yql_table_name) + query if vars(self).get('_func'): # if post query function filters query = '| '.join((query, self._func)) self._query = query self._query = self._add_limit() self._query = self._add_offset() logger.info("QUERY = %s" %(self._query,)) payload = { 'q': self._query, 'callback': '',#This is not javascript 'diagnostics': self.diagnostics, 'format': format if format else self.format, 'debug': self.debug, 'jsonCompact': 'new' if self.jsonCompact else '' } if vars(self).get('_vars'): payload.update(self._vars) if self.crossProduct: payload['crossProduct'] = 'optimized' self._payload = payload logger.info("PAYLOAD = %s " %(payload, )) return payload
python
{ "resource": "" }
q43017
YQL.execute_query
train
def execute_query(self, payload): '''Execute the query and returns and response''' if vars(self).get('oauth'): if not self.oauth.token_is_valid(): # Refresh token if token has expired self.oauth.refresh_token() response = self.oauth.session.get(self.PRIVATE_URL, params= payload, header_auth=True) else: response = requests.get(self.PUBLIC_URL, params= payload) self._response = response # Saving last response object. return response
python
{ "resource": "" }
q43018
YQL.response_builder
train
def response_builder(self, response): '''Try to return a pretty formatted response object ''' try: r = response.json() result = r['query']['results'] response = { 'num_result': r['query']['count'] , 'result': result } except (Exception,) as e: print(e) return response.content return response
python
{ "resource": "" }
q43019
YQL._func_filters
train
def _func_filters(self, filters): '''Build post query filters ''' if not isinstance(filters, (list,tuple)): raise TypeError('func_filters must be a <type list> or <type tuple>') for i, func in enumerate(filters) : if isinstance(func, str) and func == 'reverse': filters[i] = 'reverse()' elif isinstance(func, tuple) and func[0] in YQL.FUNC_FILTERS: filters[i] = '{:s}(count={:d})'.format(*func) elif isinstance(func, dict) : func_stmt = '' func_name = list(func.keys())[0] # Because of Py3 values = [ "{0}='{1}'".format(v[0], v[1]) for v in func[func_name] ] func_stmt = ','.join(values) func_stmt = '{0}({1})'.format(func_name, func_stmt) filters[i] = func_stmt else: raise TypeError('{0} is neither a <str>, a <tuple> or a <dict>'.format(func)) return '| '.join(filters)
python
{ "resource": "" }
q43020
MYQL.show_tables
train
def show_tables(self, format='json'): '''Return list of all available tables''' query = 'SHOW TABLES' payload = self._payload_builder(query, format) response = self.execute_query(payload) return response
python
{ "resource": "" }
q43021
event
train
def event(from_states=None, to_state=None): """ a decorator for transitioning from certain states to a target state. must be used on bound methods of a class instance, only. """ from_states_tuple = (from_states, ) if isinstance(from_states, State) else tuple(from_states or []) if not len(from_states_tuple) >= 1: raise ValueError() if not all(isinstance(state, State) for state in from_states_tuple): raise TypeError() if not isinstance(to_state, State): raise TypeError() def wrapper(wrapped): @functools.wraps(wrapped) def transition(instance, *a, **kw): if instance.current_state not in from_states_tuple: raise InvalidStateTransition() try: result = wrapped(instance, *a, **kw) except Exception as error: error_handlers = getattr(instance, '___pystatemachine_transition_failure_handlers', []) for error_handler in error_handlers: error_handler(instance, wrapped, instance.current_state, to_state, error) if not error_handlers: raise error else: StateInfo.set_current_state(instance, to_state) return result return transition return wrapper
python
{ "resource": "" }
q43022
Weather.get_weather_in
train
def get_weather_in(self, place, unit=None, items=None): """Return weather info according to place """ unit = unit if unit else self.unit response = self.select('weather.forecast', items=items).where(['woeid','IN',('SELECT woeid FROM geo.places WHERE text="{0}"'.format(place),)], ['u','=',unit] if unit else []) return response
python
{ "resource": "" }
q43023
Weather.get_weather_forecast
train
def get_weather_forecast(self, place, unit=None): """Return weather forecast accoriding to place """ unit = unit if unit else self.unit response = self.get_weather_in(place, items=['item.forecast'], unit=unit) return response
python
{ "resource": "" }
q43024
WebGetCrMixin.stepThroughJsWaf_bare_chromium
train
def stepThroughJsWaf_bare_chromium(self, url, titleContains='', titleNotContains='', extra_tid=None): ''' Use Chromium to access a resource behind WAF protection. Params: ``url`` - The URL to access that is protected by WAF ``titleContains`` - A string that is in the title of the protected page, and NOT the WAF intermediate page. The presence of this string in the page title is used to determine whether the WAF protection has been successfully penetrated. ``titleContains`` - A string that is in the title of the WAF intermediate page and NOT in the target page. The presence of this string in the page title is used to determine whether the WAF protection has been successfully penetrated. The current WebGetRobust headers are installed into the selenium browser, which is then used to access the protected resource. Once the protected page has properly loaded, the WAF access cookie is then extracted from the selenium browser, and installed back into the WebGetRobust instance, so it can continue to use the WAF auth in normal requests. ''' if (not titleContains) and (not titleNotContains): raise ValueError("You must pass either a string the title should contain, or a string the title shouldn't contain!") if titleContains and titleNotContains: raise ValueError("You can only pass a single conditional statement!") self.log.info("Attempting to access page through WAF browser verification.") current_title = None if extra_tid is True: extra_tid = threading.get_ident() with self._chrome_context(url, extra_tid=extra_tid) as cr: self._syncIntoChromium(cr) cr.blocking_navigate(url) for _ in range(self.wrapper_step_through_timeout): time.sleep(1) current_title, _ = cr.get_page_url_title() if titleContains and titleContains in current_title: self._syncOutOfChromium(cr) return True if titleNotContains and current_title and titleNotContains not in current_title: self._syncOutOfChromium(cr) return True self._syncOutOfChromium(cr) self.log.error("Failed to step through. Current title: '%s'", current_title) return False
python
{ "resource": "" }
q43025
WebGetCrMixin.chromiumContext
train
def chromiumContext(self, url, extra_tid=None): ''' Return a active chromium context, useable for manual operations directly against chromium. The WebRequest user agent and other context is synchronized into the chromium instance at startup, and changes are flushed back to the webrequest instance from chromium at completion. ''' assert url is not None, "You need to pass a URL to the contextmanager, so it can dispatch to the correct tab!" if extra_tid is True: extra_tid = threading.get_ident() return self._chrome_context(url, extra_tid=extra_tid)
python
{ "resource": "" }
q43026
load_data_table
train
def load_data_table(table_name, meta_file, meta): """Return the contents and metadata of a given table. Args: table_name(str): Name of the table. meta_file(str): Path to the meta.json file. meta(dict): Contents of meta.json. Returns: tuple(pandas.DataFrame, dict) """ for table in meta['tables']: if table['name'] == table_name: prefix = os.path.dirname(meta_file) relative_path = os.path.join(prefix, meta['path'], table['path']) return pd.read_csv(relative_path), table
python
{ "resource": "" }
q43027
get_col_info
train
def get_col_info(table_name, col_name, meta_file): """Return the content and metadata of a fiven column. Args: table_name(str): Name of the table. col_name(str): Name of the column. meta_file(str): Path to the meta.json file. Returns: tuple(pandas.Series, dict) """ with open(meta_file, 'r') as f: meta = json.load(f) data_table, table = load_data_table(table_name, meta_file, meta) for field in table['fields']: if field['name'] == col_name: col_meta = field col = data_table[col_name] return (col, col_meta)
python
{ "resource": "" }
q43028
task_coverage
train
def task_coverage(): """show coverage for all modules including tests""" cov = Coverage( [PythonPackage('import_deps', 'tests')], config={'branch':True,}, ) yield cov.all() # create task `coverage` yield cov.src()
python
{ "resource": "" }
q43029
register_workflow
train
def register_workflow(connection, domain, workflow): """Register a workflow type. Return False if this workflow already registered (and True otherwise). """ args = get_workflow_registration_parameter(workflow) try: connection.register_workflow_type(domain=domain, **args) except ClientError as err: if err.response['Error']['Code'] == 'TypeAlreadyExistsFault': return False # Ignore this error raise return True
python
{ "resource": "" }
q43030
pretty_json
train
def pretty_json(data): """Return a pretty formatted json """ data = json.loads(data.decode('utf-8')) return json.dumps(data, indent=4, sort_keys=True)
python
{ "resource": "" }
q43031
pretty_xml
train
def pretty_xml(data): """Return a pretty formated xml """ parsed_string = minidom.parseString(data.decode('utf-8')) return parsed_string.toprettyxml(indent='\t', encoding='utf-8')
python
{ "resource": "" }
q43032
prettyfy
train
def prettyfy(response, format='json'): """A wrapper for pretty_json and pretty_xml """ if format == 'json': return pretty_json(response.content) else: return pretty_xml(response.content)
python
{ "resource": "" }
q43033
parse_journal
train
def parse_journal(journal): """Parses the USN Journal content removing duplicates and corrupted records. """ events = [e for e in journal if not isinstance(e, CorruptedUsnRecord)] keyfunc = lambda e: str(e.file_reference_number) + e.file_name + e.timestamp event_groups = (tuple(g) for k, g in groupby(events, key=keyfunc)) if len(events) < len(list(journal)): LOGGER.debug( "Corrupted records in UsnJrnl, some events might be missing.") return [journal_event(g) for g in event_groups]
python
{ "resource": "" }
q43034
journal_event
train
def journal_event(events): """Group multiple events into a single one.""" reasons = set(chain.from_iterable(e.reasons for e in events)) attributes = set(chain.from_iterable(e.file_attributes for e in events)) return JrnlEvent(events[0].file_reference_number, events[0].parent_file_reference_number, events[0].file_name, events[0].timestamp, list(reasons), list(attributes))
python
{ "resource": "" }
q43035
generate_timeline
train
def generate_timeline(usnjrnl, filesystem_content): """Aggregates the data collected from the USN journal and the filesystem content. """ journal_content = defaultdict(list) for event in usnjrnl: journal_content[event.inode].append(event) for event in usnjrnl: try: dirent = lookup_dirent(event, filesystem_content, journal_content) yield UsnJrnlEvent( dirent.inode, dirent.path, dirent.size, dirent.allocated, event.timestamp, event.changes, event.attributes) except LookupError as error: LOGGER.debug(error)
python
{ "resource": "" }
q43036
lookup_dirent
train
def lookup_dirent(event, filesystem_content, journal_content): """Lookup the dirent given a journal event.""" for dirent in filesystem_content[event.inode]: if dirent.path.endswith(event.name): return dirent path = lookup_folder(event, filesystem_content) if path is not None: return Dirent(event.inode, path, -1, None, False, 0, 0, 0, 0) path = lookup_deleted_folder(event, filesystem_content, journal_content) if path is not None: return Dirent(event.inode, path, -1, None, False, 0, 0, 0, 0) raise LookupError("File %s not found" % event.name)
python
{ "resource": "" }
q43037
lookup_folder
train
def lookup_folder(event, filesystem): """Lookup the parent folder in the filesystem content.""" for dirent in filesystem[event.parent_inode]: if dirent.type == 'd' and dirent.allocated: return ntpath.join(dirent.path, event.name)
python
{ "resource": "" }
q43038
lookup_deleted_folder
train
def lookup_deleted_folder(event, filesystem, journal): """Lookup the parent folder in the journal content.""" folder_events = (e for e in journal[event.parent_inode] if 'DIRECTORY' in e.attributes and 'FILE_DELETE' in e.changes) for folder_event in folder_events: path = lookup_deleted_folder(folder_event, filesystem, journal) return ntpath.join(path, event.name) return lookup_folder(event, filesystem)
python
{ "resource": "" }
q43039
FSTimeline._visit_filesystem
train
def _visit_filesystem(self): """Walks through the filesystem content.""" self.logger.debug("Parsing File System content.") root_partition = self._filesystem.inspect_get_roots()[0] yield from self._root_dirent() for entry in self._filesystem.filesystem_walk(root_partition): yield Dirent( entry['tsk_inode'], self._filesystem.path('/' + entry['tsk_name']), entry['tsk_size'], entry['tsk_type'], True if entry['tsk_flags'] & TSK_ALLOC else False, timestamp(entry['tsk_atime_sec'], entry['tsk_atime_nsec']), timestamp(entry['tsk_mtime_sec'], entry['tsk_mtime_nsec']), timestamp(entry['tsk_ctime_sec'], entry['tsk_ctime_nsec']), timestamp(entry['tsk_crtime_sec'], entry['tsk_crtime_nsec']))
python
{ "resource": "" }
q43040
FSTimeline._root_dirent
train
def _root_dirent(self): """Returns the root folder dirent as filesystem_walk API doesn't.""" fstat = self._filesystem.stat('/') yield Dirent(fstat['ino'], self._filesystem.path('/'), fstat['size'], 'd', True, timestamp(fstat['atime'], 0), timestamp(fstat['mtime'], 0), timestamp(fstat['ctime'], 0), 0)
python
{ "resource": "" }
q43041
NTFSTimeline.usnjrnl_timeline
train
def usnjrnl_timeline(self): """Iterates over the changes occurred within the filesystem. Yields UsnJrnlEvent namedtuples containing: file_reference_number: known in Unix FS as inode. path: full path of the file. size: size of the file in bytes if recoverable. allocated: whether the file exists or it has been deleted. timestamp: timespamp of the change. changes: list of changes applied to the file. attributes: list of file attributes. """ filesystem_content = defaultdict(list) self.logger.debug("Extracting Update Sequence Number journal.") journal = self._read_journal() for dirent in self._visit_filesystem(): filesystem_content[dirent.inode].append(dirent) self.logger.debug("Generating timeline.") yield from generate_timeline(journal, filesystem_content)
python
{ "resource": "" }
q43042
NTFSTimeline._read_journal
train
def _read_journal(self): """Extracts the USN journal from the disk and parses its content.""" root = self._filesystem.inspect_get_roots()[0] inode = self._filesystem.stat('C:\\$Extend\\$UsnJrnl')['ino'] with NamedTemporaryFile(buffering=0) as tempfile: self._filesystem.download_inode(root, inode, tempfile.name) journal = usn_journal(tempfile.name) return parse_journal(journal)
python
{ "resource": "" }
q43043
ProcessList.put
train
def put(self, stream, cmd): """ Spawn a new background process """ if len(self.q) < self.max_size: if stream['id'] in self.q: raise QueueDuplicate p = self.call(stream, cmd) self.q[stream['id']] = p else: raise QueueFull
python
{ "resource": "" }
q43044
ProcessList.get_finished
train
def get_finished(self): """ Clean up terminated processes and returns the list of their ids """ indices = [] for idf, v in self.q.items(): if v.poll() != None: indices.append(idf) for i in indices: self.q.pop(i) return indices
python
{ "resource": "" }
q43045
ProcessList.get_stdouts
train
def get_stdouts(self): """ Get the list of stdout of each process """ souts = [] for v in self.q.values(): souts.append(v.stdout) return souts
python
{ "resource": "" }
q43046
ProcessList.terminate_process
train
def terminate_process(self, idf): """ Terminate a process by id """ try: p = self.q.pop(idf) p.terminate() return p except: return None
python
{ "resource": "" }
q43047
ProcessList.terminate
train
def terminate(self): """ Terminate all processes """ for w in self.q.values(): try: w.terminate() except: pass self.q = {}
python
{ "resource": "" }
q43048
StreamList.init
train
def init(self, s): """ Initialize the text interface """ # Hide cursor curses.curs_set(0) self.s = s self.s.keypad(1) self.set_screen_size() self.pads = {} self.offsets = {} self.init_help() self.init_streams_pad() self.current_pad = 'streams' self.set_title(TITLE_STRING) self.got_g = False signal.signal(28, self.resize) if self.config.CHECK_ONLINE_ON_START: self.check_online_streams() self.set_status('Ready')
python
{ "resource": "" }
q43049
StreamList.resize
train
def resize(self, signum, obj): """ handler for SIGWINCH """ self.s.clear() stream_cursor = self.pads['streams'].getyx()[0] for pad in self.pads.values(): pad.clear() self.s.refresh() self.set_screen_size() self.set_title(TITLE_STRING) self.init_help() self.init_streams_pad() self.move(stream_cursor, absolute=True, pad_name='streams', refresh=False) self.s.refresh() self.show()
python
{ "resource": "" }
q43050
StreamList.set_screen_size
train
def set_screen_size(self): """ Setup screen size and padding We have need 2 free lines at the top and 2 free lines at the bottom """ height, width = self.getheightwidth() curses.resizeterm(height, width) self.pad_x = 0 self.max_y, self.max_x = (height-1, width-1) self.pad_h = height-3 self.pad_w = width-2*self.pad_x
python
{ "resource": "" }
q43051
StreamList.set_title
train
def set_title(self, msg): """ Set first header line text """ self.s.move(0, 0) self.overwrite_line(msg, curses.A_REVERSE)
python
{ "resource": "" }
q43052
StreamList.set_header
train
def set_header(self, msg): """ Set second head line text """ self.s.move(1, 0) self.overwrite_line(msg, attr=curses.A_NORMAL)
python
{ "resource": "" }
q43053
StreamList.set_footer
train
def set_footer(self, msg, reverse=True): """ Set first footer line text """ self.s.move(self.max_y-1, 0) if reverse: self.overwrite_line(msg, attr=curses.A_REVERSE) else: self.overwrite_line(msg, attr=curses.A_NORMAL)
python
{ "resource": "" }
q43054
StreamList.show_help
train
def show_help(self): """ Redraw Help screen and wait for any input to leave """ self.s.move(1,0) self.s.clrtobot() self.set_header('Help'.center(self.pad_w)) self.set_footer(' ESC or \'q\' to return to main menu') self.s.refresh() self.current_pad = 'help' self.refresh_current_pad()
python
{ "resource": "" }
q43055
StreamList.init_streams_pad
train
def init_streams_pad(self, start_row=0): """ Create a curses pad and populate it with a line by stream """ y = 0 pad = curses.newpad(max(1,len(self.filtered_streams)), self.pad_w) pad.keypad(1) for s in self.filtered_streams: pad.addstr(y, 0, self.format_stream_line(s)) y+=1 self.offsets['streams'] = 0 pad.move(start_row, 0) if not self.no_stream_shown: pad.chgat(curses.A_REVERSE) self.pads['streams'] = pad
python
{ "resource": "" }
q43056
StreamList.move
train
def move(self, direction, absolute=False, pad_name=None, refresh=True): """ Scroll the current pad direction : (int) move by one in the given direction -1 is up, 1 is down. If absolute is True, go to position direction. Behaviour is affected by cursor_line and scroll_only below absolute : (bool) """ # pad in this lists have the current line highlighted cursor_line = [ 'streams' ] # pads in this list will be moved screen-wise as opposed to line-wise # if absolute is set, will go all the way top or all the way down depending # on direction scroll_only = [ 'help' ] if not pad_name: pad_name = self.current_pad pad = self.pads[pad_name] if pad_name == 'streams' and self.no_streams: return (row, col) = pad.getyx() new_row = row offset = self.offsets[pad_name] new_offset = offset if pad_name in scroll_only: if absolute: if direction > 0: new_offset = pad.getmaxyx()[0] - self.pad_h + 1 else: new_offset = 0 else: if direction > 0: new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h) elif offset > 0: new_offset = max(0, offset - self.pad_h) else: if absolute and direction >= 0 and direction < pad.getmaxyx()[0]: if direction < offset: new_offset = direction elif direction > offset + self.pad_h - 2: new_offset = direction - self.pad_h + 2 new_row = direction else: if direction == -1 and row > 0: if row == offset: new_offset -= 1 new_row = row-1 elif direction == 1 and row < len(self.filtered_streams)-1: if row == offset + self.pad_h - 2: new_offset += 1 new_row = row+1 if pad_name in cursor_line: pad.move(row, 0) pad.chgat(curses.A_NORMAL) self.offsets[pad_name] = new_offset pad.move(new_row, 0) if pad_name in cursor_line: pad.chgat(curses.A_REVERSE) if pad_name == 'streams': self.redraw_stream_footer() if refresh: self.refresh_current_pad()
python
{ "resource": "" }
q43057
StreamList.redraw_current_line
train
def redraw_current_line(self): """ Redraw the highlighted line """ if self.no_streams: return row = self.pads[self.current_pad].getyx()[0] s = self.filtered_streams[row] pad = self.pads['streams'] pad.move(row, 0) pad.clrtoeol() pad.addstr(row, 0, self.format_stream_line(s), curses.A_REVERSE) pad.chgat(curses.A_REVERSE) pad.move(row, 0) self.refresh_current_pad()
python
{ "resource": "" }
q43058
WebGetSeleniumChromiumMixin.stepThroughJsWaf_selenium_chromium
train
def stepThroughJsWaf_selenium_chromium(self, url, titleContains='', titleNotContains=''): ''' Use Selenium+SeleniumChromium to access a resource behind cloudflare protection. Params: ``url`` - The URL to access that is protected by cloudflare ``titleContains`` - A string that is in the title of the protected page, and NOT the cloudflare intermediate page. The presence of this string in the page title is used to determine whether the cloudflare protection has been successfully penetrated. The current WebGetRobust headers are installed into the selenium browser, which is then used to access the protected resource. Once the protected page has properly loaded, the cloudflare access cookie is then extracted from the selenium browser, and installed back into the WebGetRobust instance, so it can continue to use the cloudflare auth in normal requests. ''' if (not titleContains) and (not titleNotContains): raise ValueError("You must pass either a string the title should contain, or a string the title shouldn't contain!") if titleContains and titleNotContains: raise ValueError("You can only pass a single conditional statement!") self.log.info("Attempting to access page through cloudflare browser verification.") if not self.selenium_chromium_driver: self._initSeleniumChromiumWebDriver() self._syncIntoSeleniumChromiumWebDriver() self.selenium_chromium_driver.get(url) if titleContains: condition = EC.title_contains(titleContains) elif titleNotContains: condition = SeleniumCommon.title_not_contains(titleNotContains) else: raise ValueError("Wat?") try: WebDriverWait(self.selenium_chromium_driver, 45).until(condition) success = True self.log.info("Successfully accessed main page!") except TimeoutException: self.log.error("Could not pass through cloudflare blocking!") success = False # Add cookies to cookiejar self._syncOutOfSeleniumChromiumWebDriver() self._syncCookiesFromFile() return success
python
{ "resource": "" }
q43059
Loader.scope
train
def scope(self, key, *tags, default=None): """Only apply tags and default for top-level key, effectively scoping the tags.""" scope = self._scopes[key] tags = self._ensure_exclamation(tags) default = default if not default or default.startswith("!") else "!" + default if scope: scope[0] = scope[0] + tags scope[1] = default if default else scope[1] else: scope.append(tags) scope.append(default)
python
{ "resource": "" }
q43060
Loader.load
train
def load(self, content): """Parse yaml content.""" # Try parsing the YAML with global tags try: config = yaml.load(content, Loader=self._loader(self._global_tags)) except yaml.YAMLError: raise InvalidConfigError(_("Config is not valid yaml.")) # Try extracting just the tool portion try: config = config[self.tool] except (TypeError, KeyError): return None # If no scopes, just apply global default if not isinstance(config, dict): config = self._apply_default(config, self._global_default) else: # Figure out what scopes exist scoped_keys = set(key for key in self._scopes) # For every scope for key in config: # If scope has custom tags, apply if key in scoped_keys: # local tags, and local default tags, default = self._scopes[key] # Inherit global default if no local default if not default: default = self._global_default config[key] = self._apply_default(config[key], default) self._apply_scope(config[key], tags) # Otherwise just apply global default else: config[key] = self._apply_default(config[key], self._global_default) self._validate(config) return config
python
{ "resource": "" }
q43061
Loader._loader
train
def _loader(self, tags): """Create a yaml Loader.""" class ConfigLoader(SafeLoader): pass ConfigLoader.add_multi_constructor("", lambda loader, prefix, node: TaggedValue(node.value, node.tag, *tags)) return ConfigLoader
python
{ "resource": "" }
q43062
Loader._validate
train
def _validate(self, config): """Check whether every TaggedValue has a valid tag, otherwise raise InvalidConfigError""" if isinstance(config, dict): # Recursively validate each item in the config for val in config.values(): self._validate(val) elif isinstance(config, list): # Recursively validate each item in the config for item in config: self._validate(item) elif isinstance(config, TaggedValue): tagged_value = config # if tagged_value is invalid, error if tagged_value.tag not in tagged_value.tags: raise InvalidConfigError(_("{} is not a valid tag for {}".format(tagged_value.tag, self.tool)))
python
{ "resource": "" }
q43063
Loader._apply_default
train
def _apply_default(self, config, default): """ Apply default value to every str in config. Also ensure every TaggedValue has default in .tags """ # No default, nothing to be done here if not default: return config # If the entire config is just a string, return default TaggedValue if isinstance(config, str): return TaggedValue(config, default, default, *self._global_tags) if isinstance(config, dict): # Recursively apply defaults for each item in the config for key, val in config.items(): config[key] = self._apply_default(val, default) elif isinstance(config, list): # Recursively apply defaults for each item in the config for i, val in enumerate(config): config[i] = self._apply_default(val, default) elif isinstance(config, TaggedValue): # Make sure each TaggedValue knows about the default tag config.tags.add(default) return config
python
{ "resource": "" }
q43064
Loader._apply_scope
train
def _apply_scope(self, config, tags): """Add locally scoped tags to config""" if isinstance(config, dict): # Recursively _apply_scope for each item in the config for val in config.values(): self._apply_scope(val, tags) elif isinstance(config, list): # Recursively _apply_scope for each item in the config for item in config: self._apply_scope(item, tags) elif isinstance(config, TaggedValue): tagged_value = config # add all local tags tagged_value.tags |= set(tags) for tag in tags: if not hasattr(tagged_value, tag): setattr(tagged_value, tag, False)
python
{ "resource": "" }
q43065
Overload.has_args
train
def has_args(): ''' returns true if the decorator invocation had arguments passed to it before being sent a function to decorate ''' no_args_syntax = '@overload' args_syntax = no_args_syntax + '(' args, no_args = [(-1,-1)], [(-1,-1)] for i, line in enumerate(Overload.traceback_lines()): if args_syntax in line: args.append((i, line.find(args_syntax))) if no_args_syntax in line: no_args.append((i, line.find(no_args_syntax))) args, no_args = max(args), max(no_args) if sum(args)+sum(no_args) == -4: # couldnt find invocation return False return args >= no_args
python
{ "resource": "" }
q43066
Overload.identify
train
def identify(fn): ''' returns a tuple that is used to match functions to their neighbors in their resident namespaces ''' return ( fn.__globals__['__name__'], # module namespace getattr(fn, '__qualname__', getattr(fn, '__name__', '')) # class and function namespace ) def __init__(self, fn): self.validate_function(fn) self.configured = False self.has_backup_plan = False if self.has_args(): self.backup_plan = fn else: self.id = self.identify(fn) self.backup_plan = big.overload._cache.get(self.id, None) #if self.id in overload._cache: # self.backup_plan = self.configure_with(fn) #wraps(fn)(self) def __call__(self, *args, **kwargs): #print(locals()) try: # try running like normal return self.fn(*args, **kwargs) except Exception as ex: if self.has_backup_plan: return self.backup_plan(*args, **kwargs) # run backup plan elif self.configured: raise ex # no backup plan, abort else: # complete unconfigured setup self.configure_with(*args, **kwargs) return self
python
{ "resource": "" }
q43067
Overload.overload
train
def overload(fn, function_to_overload=None): ''' This function decorator allows you to overload already defined functions. The execution of overloaded functions is done by trying the original version first and if it fails, the variables are handed off to the overloading function. While this does seem like a sloppy way to go about choosing the execution of functions, this gives you far more control in terms of how you want each function to be selected and allows you to program for the "ideal situation" first. With this approach, you can simply require very specific conditions that would apply to a majority of the use cases of the function and allow the code to mitigate edge case scenarios only when the edge cases show up vs checking for edge cases on every single usage of the function. This approach rewards functions that are designed with proper input validation, which you should be adding anyways. #------------------------------------------------------------------------------ # Example Usage Below #------------------------------------------------------------------------------ def my_print(arg): print('running original my_print') print(arg) @overload def my_print(arg): assert type(arg) == list print('running list my_print') print(', '.join(str(i) for i in arg)) @overload def my_print(arg): assert type(arg) == dict print('running dict my_print') out = ('='.join((str(k), str(v))) for k,v in arg.items()) print(' | '.join(out)) my_print(list(range(10))) # running list my_print # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 my_print(tuple(range(10))) # running original my_print # (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) my_print({i:i*2 for i in range(10)}) # running dict my_print # 0=0 | 1=2 | 2=4 | 3=6 | 4=8 | 5=10 | 6=12 | 7=14 | 8=16 | 9=18 ''' if function_to_overload is None: if Overload.has_args(): return Overload.configured_decorator(fn) else: return Overload.default_decorator(fn) else: return Overload.configured_decorator(function_to_overload)(fn)
python
{ "resource": "" }
q43068
package_to_requirement
train
def package_to_requirement(package_name): """Translate a name like Foo-1.2 to Foo==1.3""" match = re.search(r'^(.*?)-(dev|\d.*)', package_name) if match: name = match.group(1) version = match.group(2) else: name = package_name version = '' if version: return '%s==%s' % (name, version) else: return name
python
{ "resource": "" }
q43069
string_range
train
def string_range(last): """Compute the range of string between "a" and last. This works for simple "a to z" lists, but also for "a to zz" lists. """ for k in range(len(last)): for x in product(string.ascii_lowercase, repeat=k+1): result = ''.join(x) yield result if result == last: return
python
{ "resource": "" }
q43070
PackageFinder._get_mirror_urls
train
def _get_mirror_urls(self, mirrors=None, main_mirror_url=None): """Retrieves a list of URLs from the main mirror DNS entry unless a list of mirror URLs are passed. """ if not mirrors: mirrors = get_mirrors(main_mirror_url) # Should this be made "less random"? E.g. netselect like? random.shuffle(mirrors) mirror_urls = set() for mirror_url in mirrors: # Make sure we have a valid URL if not ("http://" or "https://" or "file://") in mirror_url: mirror_url = "http://%s" % mirror_url if not mirror_url.endswith("/simple"): mirror_url = "%s/simple/" % mirror_url mirror_urls.add(mirror_url) return list(mirror_urls)
python
{ "resource": "" }
q43071
HTMLPage._get_content_type
train
def _get_content_type(url): """Get the Content-Type of the given url, using a HEAD request""" scheme, netloc, path, query, fragment = urlparse.urlsplit(url) if not scheme in ('http', 'https', 'ftp', 'ftps'): ## FIXME: some warning or something? ## assertion error? return '' req = Urllib2HeadRequest(url, headers={'Host': netloc}) resp = urlopen(req) try: if hasattr(resp, 'code') and resp.code != 200 and scheme not in ('ftp', 'ftps'): ## FIXME: doesn't handle redirects return '' return resp.info().get('content-type', '') finally: resp.close()
python
{ "resource": "" }
q43072
HTMLPage.explicit_rel_links
train
def explicit_rel_links(self, rels=('homepage', 'download')): """Yields all links with the given relations""" for match in self._rel_re.finditer(self.content): found_rels = match.group(1).lower().split() for rel in rels: if rel in found_rels: break else: continue match = self._href_re.search(match.group(0)) if not match: continue url = match.group(1) or match.group(2) or match.group(3) url = self.clean_link(urlparse.urljoin(self.base_url, url)) yield Link(url, self)
python
{ "resource": "" }
q43073
BaseBuilder._metahash
train
def _metahash(self): """Checksum hash of all the inputs to this rule. Output is invalid until collect_srcs and collect_deps have been run. In theory, if this hash doesn't change, the outputs won't change either, which makes it useful for caching. """ # BE CAREFUL when overriding/extending this method. You want to copy # the if(cached)/return(cached) part, then call this method, then at # the end update the cached metahash. Just like this code, basically, # only you call the method from the base class in the middle of it. If # you get this wrong it could result in butcher not noticing changed # inputs between runs, which could cause really nasty problems. # TODO(ben): the above warning seems avoidable with better memoization if self._cached_metahash: return self._cached_metahash # If you are extending this function in a subclass, # here is where you do: # BaseBuilder._metahash(self) log.debug('[%s]: Metahash input: %s', self.address, unicode(self.address)) mhash = util.hash_str(unicode(self.address)) log.debug('[%s]: Metahash input: %s', self.address, self.rule.params) mhash = util.hash_str(str(self.rule.params), hasher=mhash) for src in self.rule.source_files or []: log.debug('[%s]: Metahash input: %s', self.address, src) mhash = util.hash_str(src, hasher=mhash) mhash = util.hash_file(self.srcs_map[src], hasher=mhash) for dep in self.rule.composed_deps() or []: dep_rule = self.rule.subgraph.node[dep]['target_obj'] for item in dep_rule.output_files: log.debug('[%s]: Metahash input: %s', self.address, item) item_path = os.path.join(self.buildroot, item) mhash = util.hash_str(item, hasher=mhash) mhash = util.hash_file(item_path, hasher=mhash) self._cached_metahash = mhash return mhash
python
{ "resource": "" }
q43074
BaseBuilder.collect_outs
train
def collect_outs(self): """Collect and store the outputs from this rule.""" # TODO: this should probably live in CacheManager. for outfile in self.rule.output_files or []: outfile_built = os.path.join(self.buildroot, outfile) if not os.path.exists(outfile_built): raise error.TargetBuildFailed( self.address, 'Output file is missing: %s' % outfile) #git_sha = gitrepo.RepoState().GetRepo(self.address.repo).repo.commit() # git_sha is insufficient, and is actually not all that useful. # More factors to include in hash: # - commit/state of source repo of all dependencies # (or all input files?) # - Actually I like that idea: hash all the input files! # - versions of build tools used (?) metahash = self._metahash() log.debug('[%s]: Metahash: %s', self.address, metahash.hexdigest()) # TODO: record git repo state and buildoptions in cachemgr # TODO: move cachemgr to outer controller(?) self.cachemgr.putfile(outfile_built, self.buildroot, metahash)
python
{ "resource": "" }
q43075
BaseBuilder.is_cached
train
def is_cached(self): """Returns true if this rule is already cached.""" # TODO: cache by target+hash, not per file. try: for item in self.rule.output_files: log.info(item) self.cachemgr.in_cache(item, self._metahash()) except cache.CacheMiss: log.info('[%s]: Not cached.', self.address) return False else: log.info('[%s]: found in cache.', self.address) return True
python
{ "resource": "" }
q43076
BaseBuilder.get_from_cache
train
def get_from_cache(self): """See if this rule has already been built and cached.""" for item in self.rule.output_files: dstpath = os.path.join(self.buildroot, item) self.linkorcopy( self.cachemgr.path_in_cache(item, self._metahash()), dstpath)
python
{ "resource": "" }
q43077
BaseBuilder.linkorcopy
train
def linkorcopy(self, src, dst): """hardlink src file to dst if possible, otherwise copy.""" if os.path.isdir(dst): log.warn('linkorcopy given a directory as destination. ' 'Use caution.') log.debug('src: %s dst: %s', src, dst) elif os.path.exists(dst): os.unlink(dst) elif not os.path.exists(os.path.dirname(dst)): os.makedirs(os.path.dirname(dst)) if self.linkfiles: log.debug('Linking: %s -> %s', src, dst) os.link(src, dst) else: log.debug('Copying: %s -> %s', src, dst) shutil.copy2(src, dst)
python
{ "resource": "" }
q43078
BaseBuilder.rulefor
train
def rulefor(self, addr): """Return the rule object for an address from our deps graph.""" return self.rule.subgraph.node[self.rule.makeaddress(addr)][ 'target_obj']
python
{ "resource": "" }
q43079
BaseTarget.composed_deps
train
def composed_deps(self): """Dependencies of this build target.""" if 'deps' in self.params: param_deps = self.params['deps'] or [] deps = [self.makeaddress(dep) for dep in param_deps] return deps else: return None
python
{ "resource": "" }
q43080
BaseTarget.source_files
train
def source_files(self): """This rule's source files.""" if 'srcs' in self.params and self.params['srcs'] is not None: return util.flatten(self.params['srcs'])
python
{ "resource": "" }
q43081
BaseTarget.makeaddress
train
def makeaddress(self, label): """Turn a label into an Address with current context. Adds repo and path if given a label that only has a :target part. """ addr = address.new(label) if not addr.repo: addr.repo = self.address.repo if not addr.path: addr.path = self.address.path return addr
python
{ "resource": "" }
q43082
SignedViewSetMixin.get_queryset
train
def get_queryset(self): """Return the allowed queryset for this sign or the default one.""" if 'sign' in self.request.query_params: try: filter_and_actions = unsign_filters_and_actions( self.request.query_params['sign'], '{}.{}'.format( self.queryset.model._meta.app_label, self.queryset.model._meta.model_name, ) ) except signing.BadSignature: return super(SignedViewSetMixin, self).get_queryset() else: for filtered_action in filter_and_actions: try: qs = self.queryset.filter(**filtered_action['filters']) except FieldError: continue return qs return super(SignedViewSetMixin, self).get_queryset()
python
{ "resource": "" }
q43083
slugify
train
def slugify(value, allow_unicode=False): """ Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace. """ value if allow_unicode: value = unicodedata.normalize('NFKC', value) else: value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') value = re.sub(r'[^\w\s-]', '', value).strip().lower() return re.sub(r'[-\s]+', '-', value)
python
{ "resource": "" }
q43084
Configuration.items
train
def items(self): """Settings as key-value pair. """ return [(section, dict(self.conf.items(section, raw=True))) for \ section in [section for section in self.conf.sections()]]
python
{ "resource": "" }
q43085
jarsign
train
def jarsign(storepass, keypass, keystore, source, alias, path=None): """ Uses Jarsign to sign an apk target file using the provided keystore information. :param storepass(str) - keystore storepass :param keypass(str) - keystore keypass :param keystore(str) - keystore file path :param source(str) - apk path :param alias(str) - keystore alias :param path(str) - basedir to run the command """ cmd = [ 'jarsigner', '-verbose', '-storepass', storepass, '-keypass', keypass, '-keystore', keystore, source, alias ] common.run_cmd(cmd, log='jarsign.log', cwd=path)
python
{ "resource": "" }
q43086
get_default_keystore
train
def get_default_keystore(prefix='AG_'): """ Gets the default keystore information based on environment variables and a prefix. $PREFIX_KEYSTORE_PATH - keystore file path, default is opt/digger/debug.keystore $PREFIX_KEYSTORE_STOREPASS - keystore storepass, default is android $PREFIX_KEYSTORE_KEYPASS - keystore keypass, default is android $PREFIX_KEYSTORE_ALIAS - keystore alias, default is androiddebug :param prefix(str) - A prefix to be used for environment variables, default is AG_. Returns: A tuple containing the keystore information: (path, storepass, keypass, alias) """ path = os.environ.get('%s_KEYSTORE_PATH' % prefix, config.keystore.path) storepass = os.environ.get('%s_KEYSTORE_STOREPASS' % prefix, config.keystore.storepass) keypass = os.environ.get('%s_KEYSTORE_KEYPASS' % prefix, config.keystore.keypass) alias = os.environ.get('%s_KEYSTORE_ALIAS' % prefix, config.keystore.alias) return (path, storepass, keypass, alias)
python
{ "resource": "" }
q43087
get_highest_build_tool
train
def get_highest_build_tool(sdk_version=None): """ Gets the highest build tool version based on major version sdk version. :param sdk_version(int) - sdk version to be used as the marjor build tool version context. Returns: A string containg the build tool version (default is 23.0.2 if none is found) """ if sdk_version is None: sdk_version = config.sdk_version android_home = os.environ.get('AG_MOBILE_SDK', os.environ.get('ANDROID_HOME')) build_tool_folder = '%s/build-tools' % android_home folder_list = os.listdir(build_tool_folder) versions = [folder for folder in folder_list if folder.startswith('%s.' % sdk_version)] if len(versions) == 0: return config.build_tool_version return versions[::-1][0]
python
{ "resource": "" }
q43088
Command.rename_file
train
def rename_file(self, instance, field_name): """ Renames a file and updates the model field to point to the new file. Returns True if a change has been made; otherwise False """ file = getattr(instance, field_name) if file: new_name = get_hashed_filename(file.name, file) if new_name != file.name: print(' Renaming "%s" to "%s"' % (file.name, new_name)) file.save(os.path.basename(new_name), file, save=False) return True return False
python
{ "resource": "" }
q43089
PeekPlatformServerHttpHookABC.addServerResource
train
def addServerResource(self, pluginSubPath: bytes, resource: BasicResource) -> None: """ Add Server Resource Add a cusotom implementation of a served http resource. :param pluginSubPath: The resource path where you want to serve this resource. :param resource: The resource to serve. :return: None """ pluginSubPath = pluginSubPath.strip(b'/') self.__rootServerResource.putChild(pluginSubPath, resource)
python
{ "resource": "" }
q43090
BaseHttpStreamWriter.write
train
def write(self, data: bytes) -> None: """ Write the data. """ if self.finished(): if self._exc: raise self._exc raise WriteAfterFinishedError if not data: return try: self._delegate.write_data(data, finished=False) except BaseWriteException as e: self._finished.set() if self._exc is None: self._exc = e raise
python
{ "resource": "" }
q43091
BaseHttpStreamWriter.flush
train
async def flush(self) -> None: """ Give the writer a chance to flush the pending data out of the internal buffer. """ async with self._flush_lock: if self.finished(): if self._exc: raise self._exc return try: await self._delegate.flush_buf() except asyncio.CancelledError: # pragma: no cover raise except BaseWriteException as e: self._finished.set() if self._exc is None: self._exc = e raise
python
{ "resource": "" }
q43092
BaseHttpStreamWriter.finish
train
def finish(self, data: bytes=b"") -> None: """ Finish the stream. """ if self.finished(): if self._exc: raise self._exc if data: raise WriteAfterFinishedError return try: self._delegate.write_data(data, finished=True) except BaseWriteException as e: if self._exc is None: self._exc = e raise finally: self._finished.set()
python
{ "resource": "" }
q43093
register
train
def register(**kwargs): """Registers a notification_cls. """ def _wrapper(notification_cls): if not issubclass(notification_cls, (Notification,)): raise RegisterNotificationError( f"Wrapped class must be a 'Notification' class. " f"Got '{notification_cls.__name__}'" ) site_notifications.register(notification_cls=notification_cls) return notification_cls return _wrapper
python
{ "resource": "" }
q43094
Address.__parse_target
train
def __parse_target(targetstr, current_repo=None): """Parse a build target string. General form: //repo[gitref]/dir/path:target. These are all valid: //repo //repo[a038fi31d9e8bc11582ef1b1b1982d8fc] //repo[a039aa30853298]:foo //repo/dir //repo[a037928734]/dir //repo/dir/path //repo/dir/path:foo :foo dir/path dir/path:foo dir:foo Returns: {'repo': '//reponame', 'git_ref': 'a839a38fd...', 'path': 'dir/path', 'target': 'targetname} """ # 'blah' -> ':blah' if not (':' in targetstr or '/' in targetstr): targetstr = ':%s' % targetstr match = re.match( r'^(?://(?P<repo>[\w-]+)(?:\[(?P<git_ref>.*)\])?)?' r'(?:$|/?(?P<path>[\w/-]+)?(?::?(?P<target>[\w-]+)?))', targetstr) try: groups = match.groupdict() if not groups['repo']: groups['repo'] = current_repo if not groups['git_ref']: groups['git_ref'] = 'develop' if not groups['target']: groups['target'] = 'all' if not groups['path']: groups['path'] = '' except AttributeError: raise error.ButcherError('"%s" is not a valid build target.') #log.debug('parse_target: %s -> %s', targetstr, groups) return groups
python
{ "resource": "" }
q43095
authenticated_session
train
def authenticated_session(username, password): """ Given username and password, return an authenticated Yahoo `requests` session that can be used for further scraping requests. Throw an AuthencationError if authentication fails. """ session = requests.Session() session.headers.update(headers()) response = session.get(url()) login_path = path(response.text) login_url = urljoin(response.url, login_path) login_post_data = post_data(response.text, username, password) response = session.post(login_url, data=login_post_data) if response.headers['connection'] == 'close': raise Exception('Authencation failed') return session
python
{ "resource": "" }
q43096
post_data
train
def post_data(page, username, password): """ Given username and password, return the post data necessary for login """ soup = BeautifulSoup(page) try: inputs = soup.find(id='hiddens').findAll('input') post_data = {input['name']: input['value'] for input in inputs} post_data['username'] = username post_data['passwd'] = password return post_data except: return None
python
{ "resource": "" }
q43097
Certification.from_signed_raw
train
def from_signed_raw(cls: Type[CertificationType], signed_raw: str) -> CertificationType: """ Return Certification instance from signed raw document :param signed_raw: Signed raw document :return: """ n = 0 lines = signed_raw.splitlines(True) version = int(Identity.parse_field("Version", lines[n])) n += 1 Certification.parse_field("Type", lines[n]) n += 1 currency = Certification.parse_field("Currency", lines[n]) n += 1 pubkey_from = Certification.parse_field("Issuer", lines[n]) n += 1 identity_pubkey = Certification.parse_field("IdtyIssuer", lines[n]) n += 1 identity_uid = Certification.parse_field("IdtyUniqueID", lines[n]) n += 1 identity_timestamp = BlockUID.from_str(Certification.parse_field("IdtyTimestamp", lines[n])) n += 1 identity_signature = Certification.parse_field("IdtySignature", lines[n]) n += 1 timestamp = BlockUID.from_str(Certification.parse_field("CertTimestamp", lines[n])) n += 1 signature = Certification.parse_field("Signature", lines[n]) identity = Identity(version, currency, identity_pubkey, identity_uid, identity_timestamp, identity_signature) return cls(version, currency, pubkey_from, identity, timestamp, signature)
python
{ "resource": "" }
q43098
Certification.from_inline
train
def from_inline(cls: Type[CertificationType], version: int, currency: str, blockhash: Optional[str], inline: str) -> CertificationType: """ Return Certification instance from inline document Only self.pubkey_to is populated. You must populate self.identity with an Identity instance to use raw/sign/signed_raw methods :param version: Version of document :param currency: Name of the currency :param blockhash: Hash of the block :param inline: Inline document :return: """ cert_data = Certification.re_inline.match(inline) if cert_data is None: raise MalformedDocumentError("Certification ({0})".format(inline)) pubkey_from = cert_data.group(1) pubkey_to = cert_data.group(2) blockid = int(cert_data.group(3)) if blockid == 0 or blockhash is None: timestamp = BlockUID.empty() else: timestamp = BlockUID(blockid, blockhash) signature = cert_data.group(4) return cls(version, currency, pubkey_from, pubkey_to, timestamp, signature)
python
{ "resource": "" }
q43099
Certification.inline
train
def inline(self) -> str: """ Return inline document string :return: """ return "{0}:{1}:{2}:{3}".format(self.pubkey_from, self.pubkey_to, self.timestamp.number, self.signatures[0])
python
{ "resource": "" }