_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q44400
reconfigArg
train
def reconfigArg(ArgConfig): r"""Reconfigures an argument based on its configuration. """ _type = ArgConfig.get('type') if _type: if hasattr(_type, '__ec_config__'): # pass the ArgConfig to the CustomType: _type.__ec_config__(ArgConfig) if not 'type_str' in ArgConfig: ArgConfig['type_str'] = (_type.__name__ if isinstance(_type, type) else 'unspecified type') if _type else 'str' if not 'desc' in ArgConfig: ArgConfig['desc'] = ArgConfig['name'] return ArgConfig
python
{ "resource": "" }
q44401
getTaskHelp
train
def getTaskHelp(_Task): r"""Gets help on the given task member. """ Ret = [] for k in ['name', 'desc']: v = _Task.Config.get(k) if v is not None: Ret.append('%s: %s' % (k, v)) Args = _Task.Args if Args: Ret.append('\nArgs:') for argName, Arg in Args.items(): Ret.append(' %s: %s' % (argName, Arg.get('desc', Arg['type_str']))) Ret.append('') return '\n'.join(Ret).rstrip()
python
{ "resource": "" }
q44402
restart_in_venv
train
def restart_in_venv(venv, base, site_packages, args): """ Restart this script using the interpreter in the given virtual environment """ if base and not os.path.isabs(venv) and not venv.startswith('~'): base = os.path.expanduser(base) # ensure we have an abs basepath at this point: # a relative one makes no sense (or does it?) if os.path.isabs(base): venv = os.path.join(base, venv) if venv.startswith('~'): venv = os.path.expanduser(venv) if not os.path.exists(venv): try: import virtualenv except ImportError: print('The virtual environment does not exist: %s' % venv) print('and virtualenv is not installed, so a new environment cannot be created') sys.exit(3) print('Creating new virtualenv environment in %s' % venv) virtualenv.logger = logger logger.indent += 2 virtualenv.create_environment(venv, site_packages=site_packages) if sys.platform == 'win32': python = os.path.join(venv, 'Scripts', 'python.exe') # check for bin directory which is used in buildouts if not os.path.exists(python): python = os.path.join(venv, 'bin', 'python.exe') else: python = os.path.join(venv, 'bin', 'python') if not os.path.exists(python): python = venv if not os.path.exists(python): raise BadCommand('Cannot find virtual environment interpreter at %s' % python) base = os.path.dirname(os.path.dirname(python)) file = os.path.join(os.path.dirname(__file__), 'runner.py') if file.endswith('.pyc'): file = file[:-1] proc = subprocess.Popen( [python, file] + args + [base, '___VENV_RESTART___']) proc.wait() sys.exit(proc.returncode)
python
{ "resource": "" }
q44403
history
train
async def history(client: Client, pubkey: str) -> dict: """ Get transactions history of public key :param client: Client to connect to the api :param pubkey: Public key :return: """ return await client.get(MODULE + '/history/%s' % pubkey, schema=HISTORY_SCHEMA)
python
{ "resource": "" }
q44404
process
train
async def process(client: Client, transaction_signed_raw: str) -> ClientResponse: """ POST a transaction raw document :param client: Client to connect to the api :param transaction_signed_raw: Transaction signed raw document :return: """ return await client.post(MODULE + '/process', {'transaction': transaction_signed_raw}, rtype=RESPONSE_AIOHTTP)
python
{ "resource": "" }
q44405
sources
train
async def sources(client: Client, pubkey: str) -> dict: """ GET transaction sources :param client: Client to connect to the api :param pubkey: Public key :return: """ return await client.get(MODULE + '/sources/%s' % pubkey, schema=SOURCES_SCHEMA)
python
{ "resource": "" }
q44406
blocks
train
async def blocks(client: Client, pubkey: str, start: int, end: int) -> dict: """ GET public key transactions history between start and end block number :param client: Client to connect to the api :param pubkey: Public key :param start: Start from block number :param end: End to block number :return: """ return await client.get(MODULE + '/history/%s/blocks/%s/%s' % (pubkey, start, end), schema=HISTORY_SCHEMA)
python
{ "resource": "" }
q44407
ReportCommand._ndays
train
def _ndays(self, start_date, ndays): """ Compute an end date given a start date and a number of days. """ if not getattr(self.args, 'start-date') and not self.config.get('start-date', None): raise Exception('start-date must be provided when ndays is used.') d = date(*map(int, start_date.split('-'))) d += timedelta(days=ndays) return d.strftime('%Y-%m-%d')
python
{ "resource": "" }
q44408
ReportCommand.report
train
def report(self): """ Query analytics and stash data in a format suitable for serializing. """ output = OrderedDict() for arg in GLOBAL_ARGUMENTS: output[arg] = getattr(self.args, arg) or self.config.get(arg, None) output['title'] = getattr(self.args, 'title') or self.config.get('title', 'Untitled Report') output['run_date'] = datetime.now().strftime('%Y-%m-%d') output['queries'] = [] for analytic in self.config.get('queries', []): print 'Querying "%s"' % analytic['name'] results = self.query( metrics=analytic['metrics'], dimensions=analytic.get('dimensions', []), filters=analytic.get('filter', None), segment=analytic.get('segment', None), sort=analytic.get('sort', []), start_index=analytic.get('start-index', 1), max_results=analytic.get('max-results', 10) ) dimensions_len = len(analytic.get('dimensions', [])) data = OrderedDict([ ('config', analytic), ('sampled', results.get('containsSampledData', False)), ('sampleSize', int(results.get('sampleSize', 0))), ('sampleSpace', int(results.get('sampleSpace', 0))), ('data_types', OrderedDict()), ('data', OrderedDict()) ]) for column in results['columnHeaders'][dimensions_len:]: data['data_types'][column['name']] = column['dataType'] def cast_data_type(d, dt): if dt == 'INTEGER': return int(d) elif data_type in ['TIME', 'FLOAT', 'CURRENCY', 'PERCENT']: return float(d) else: raise Exception('Unknown metric data type: %s' % data_type) for i, metric in enumerate(analytic['metrics']): data['data'][metric] = OrderedDict() data_type = data['data_types'][metric] if dimensions_len: for row in results.get('rows', []): column = i + dimensions_len label = ','.join(row[:dimensions_len]) value = cast_data_type(row[column], data_type) data['data'][metric][label] = value data['data'][metric]['total'] = cast_data_type(results['totalsForAllResults'][metric], data_type) # Prevent rate-limiting sleep(1) output['queries'].append(data) return output
python
{ "resource": "" }
q44409
ReportCommand.html
train
def html(self, report, f): """ Write report data to an HTML file. """ env = Environment(loader=PackageLoader('clan', 'templates')) template = env.get_template('report.html') context = { 'report': report, 'GLOBAL_ARGUMENTS': GLOBAL_ARGUMENTS, 'field_definitions': self.field_definitions, 'format_comma': format_comma, 'format_duration': format_duration, 'format_percent': format_percent } f.write(template.render(**context).encode('utf-8'))
python
{ "resource": "" }
q44410
ensure_exe
train
def ensure_exe(exe_name: str, *paths: str): # pragma: no cover """ Makes sure that an executable can be found on the system path. Will exit the program if the executable cannot be found Args: exe_name: name of the executable paths: optional path(s) to be searched; if not specified, search the whole system """ if not elib_run.find_executable(exe_name, *paths): LOGGER.error('could not find "%s.exe" on this system', exe_name) sys.exit(-1)
python
{ "resource": "" }
q44411
APISession.handle_captcha
train
def handle_captcha(self, query_params: dict, html: str, login_data: dict) -> requests.Response: """ Handling CAPTCHA request """ check_url = get_base_url(html) captcha_url = '{}?s={}&sid={}'.format(self.CAPTCHA_URI, query_params['s'], query_params['sid']) login_data['captcha_sid'] = query_params['sid'] login_data['captcha_key'] = input(self.CAPTCHA_INPUT_PROMPT .format(captcha_url)) return self.post(check_url, login_data)
python
{ "resource": "" }
q44412
APISession.handle_two_factor_check
train
def handle_two_factor_check(self, html: str) -> requests.Response: """ Handling two factor authorization request """ action_url = get_base_url(html) code = input(self.TWO_FACTOR_PROMPT).strip() data = {'code': code, '_ajax': '1', 'remember': '1'} post_url = '/'.join((self.LOGIN_URL, action_url)) return self.post(post_url, data)
python
{ "resource": "" }
q44413
APISession.handle_phone_number_check
train
def handle_phone_number_check(self, html: str) -> requests.Response: """ Handling phone number request """ action_url = get_base_url(html) phone_number = input(self.PHONE_PROMPT) url_params = get_url_params(action_url) data = {'code': phone_number, 'act': 'security_check', 'hash': url_params['hash']} post_url = '/'.join((self.LOGIN_URL, action_url)) return self.post(post_url, data)
python
{ "resource": "" }
q44414
APISession.check_for_additional_actions
train
def check_for_additional_actions(self, url_params: dict, html: str, login_data: dict) -> None: """ Checks the url for a request for additional actions, if so, calls the event handler """ action_response = '' if 'sid' in url_params: action_response = self.handle_captcha(url_params, html, login_data) elif 'authcheck' in url_params: action_response = self.handle_two_factor_check(html) elif 'security_check' in url_params: action_response = self.handle_phone_number_check(html) if action_response: check_page_for_warnings(action_response.text)
python
{ "resource": "" }
q44415
APISession.login
train
def login(self) -> bool: """ Authorizes a user and returns a bool value of the result """ response = self.get(self.LOGIN_URL) login_url = get_base_url(response.text) login_data = {'email': self._login, 'pass': self._password} login_response = self.post(login_url, login_data) url_params = get_url_params(login_response.url) self.check_for_additional_actions(url_params, login_response.text, login_data) if 'remixsid' in self.cookies or 'remixsid6' in self.cookies: return True
python
{ "resource": "" }
q44416
APISession.auth_oauth2
train
def auth_oauth2(self) -> dict: """ Authorizes a user by OAuth2 to get access token """ oauth_data = { 'client_id': self._app_id, 'display': 'mobile', 'response_type': 'token', 'scope': '+66560', 'v': self.API_VERSION } response = self.post(self.OAUTH_URL, oauth_data) url_params = get_url_params(response.url, fragment=True) if 'access_token' in url_params: return url_params action_url = get_base_url(response.text) if action_url: response = self.get(action_url) return get_url_params(response.url) response_json = response.json() if 'error' in response_json['error']: exception_msg = '{}: {}'.format(response_json['error'], response_json['error_description']) raise VVKAuthException(exception_msg)
python
{ "resource": "" }
q44417
APISession.get_access_token
train
def get_access_token(self) -> str: """ Returns the access token in case of successful authorization """ if self._service_token: return self._service_token if self._app_id and self._login and self._password: try: if self.login(): url_params = self.auth_oauth2() if 'access_token' in url_params: return url_params['access_token'] finally: self.close()
python
{ "resource": "" }
q44418
APISession.send_method_request
train
def send_method_request(self, method: str, method_params: dict) -> dict: """ Sends user-defined method and method params """ url = '/'.join((self.METHOD_URL, method)) method_params['v'] = self.API_VERSION if self._access_token: method_params['access_token'] = self._access_token response = self.post(url, method_params, timeout=10) response.raise_for_status() return json.loads(response.text)
python
{ "resource": "" }
q44419
PhraseClassificationTrainer.train
train
def train(self, net_sizes, epochs, batchsize): """ Initialize the base trainer """ self.trainer = ClassificationTrainer(self.data, self.targets, net_sizes) self.trainer.learn(epochs, batchsize) return self.trainer.evaluate(batchsize)
python
{ "resource": "" }
q44420
PhraseClassifier.classify
train
def classify(self, phrase, cut_to_len=True): """ Classify a phrase based on the loaded model. If cut_to_len is True, cut to desired length.""" if (len(phrase) > self.max_phrase_len): if not cut_to_len: raise Exception("Phrase too long.") phrase = phrase[0:self.max_phrase_len] numbers = self.classifier.classify(stringToVector(phrase, self.vocab, self.max_vector_len)) return zip(self.targets, numbers)
python
{ "resource": "" }
q44421
popen_wrapper
train
def popen_wrapper(args): """ Friendly wrapper around Popen. Returns stdout output, stderr output and OS status code. """ try: p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt', universal_newlines=True) except OSError as e: raise OSError( "Error executing '{:}': '{:}'".format(args[0], e.strerror)) output, errors = p.communicate() return ( output, text_type(errors), p.returncode )
python
{ "resource": "" }
q44422
getPackages
train
def getPackages(plist): """ Cleans up input from the command line tool and returns a list of package names """ nlist = plist.split('\n') pkgs = [] for i in nlist: if i.find('===') > 0: continue pkg = i.split()[0] if pkg == 'Warning:': continue elif pkg == 'Could': continue elif pkg == 'Some': continue elif pkg == 'You': continue elif not pkg: continue pkgs.append(pkg) print(' >> Found', len(pkgs), 'packages') return pkgs
python
{ "resource": "" }
q44423
pip
train
def pip(usr_pswd=None): """ This updates one package at a time. Could do all at once: pip list --outdated | cut -d' ' -f1 | xargs pip install --upgrade """ # see if pip is installed try: cmd('which pip') except: return print('-[pip]----------') p = cmd('pip list --outdated') if not p: return pkgs = getPackages(p) # update pip and setuptools first for i, p in enumerate(pkgs): if p in ['pip', 'setuptools']: cmd('pip install -U ' + p, usr_pwd=usr_pswd, run=global_run) pkgs.pop(i) # update the rest of them for p in pkgs: cmd('pip install -U ' + p, usr_pwd=usr_pswd, run=global_run)
python
{ "resource": "" }
q44424
brew
train
def brew(clean=False): """ Handle homebrew on macOS """ # see if homebrew is installed try: cmd('which brew') except: return print('-[brew]----------') cmd('brew update') p = cmd('brew outdated') if not p: return pkgs = getPackages(p) for p in pkgs: cmd('brew upgrade {}'.format(p), run=global_run) if clean: print(' > brew prune old sym links and cleanup') cmd('brew prune') cmd('brew cleanup')
python
{ "resource": "" }
q44425
kernel
train
def kernel(): """ Handle linux kernel update """ print('================================') print(' WARNING: upgrading the kernel') print('================================') time.sleep(5) print('-[kernel]----------') cmd('rpi-update', True) print(' >> You MUST reboot to load the new kernel <<')
python
{ "resource": "" }
q44426
npm
train
def npm(usr_pwd=None, clean=False): """ Handle npm for Node.js """ # see if node is installed try: cmd('which npm') except: return print('-[npm]----------') # awk, ignore 1st line and grab 1st word p = cmd("npm outdated -g | awk 'NR>1 {print $1}'") if not p: return pkgs = getPackages(p) for p in pkgs: cmd('{} {}'.format('npm update -g ', p), usr_pwd=usr_pwd, run=global_run)
python
{ "resource": "" }
q44427
Interval.split
train
def split(self): """Immediately stop the current interval and start a new interval that has a start_instant equivalent to the stop_interval of self""" self.stop() interval = Interval() interval._start_instant = self.stop_instant return interval
python
{ "resource": "" }
q44428
Interval.stop
train
def stop(self): """Mark the stop of the interval. Calling stop on an already stopped interval has no effect. An interval can only be stopped once. :returns: the duration if the interval is truely stopped otherwise ``False``. """ if self._start_instant is None: raise IntervalException("Attempt to stop an interval that has not started.") if self._stop_instant is None: self._stop_instant = instant() self._duration = int((self._stop_instant - self._start_instant) * 1000) return self._duration return False
python
{ "resource": "" }
q44429
Interval.duration_so_far
train
def duration_so_far(self): """Return how the duration so far. :returns: the duration from the time the Interval was started if the interval is running, otherwise ``False``. """ if self._start_instant is None: return False if self._stop_instant is None: return int((instant() - self._start_instant) * 1000) return False
python
{ "resource": "" }
q44430
Interval.duration
train
def duration(self): """Returns the integer value of the interval, the value is in milliseconds. If the interval has not had stop called yet, it will report the number of milliseconds in the interval up to the current point in time. """ if self._stop_instant is None: return int((instant() - self._start_instant) * 1000) if self._duration is None: self._duration = int((self._stop_instant - self._start_instant) * 1000) return self._duration
python
{ "resource": "" }
q44431
Bridge.import_module
train
def import_module(self, name): """Import a module into the bridge.""" if name not in self._objects: module = _import_module(name) self._objects[name] = module self._object_references[id(module)] = name return self._objects[name]
python
{ "resource": "" }
q44432
_component_of
train
def _component_of(name): """Get the root package or module of the passed module. """ # Get the registered package this model belongs to. segments = name.split('.') while segments: # Is this name a registered package? test = '.'.join(segments) if test in settings.get('COMPONENTS', []): # This is the component we are in. return test # Remove the right-most segment. segments.pop() if not segments and '.models' in name: # No package was found to be registered; attempt to guess the # right package name; strip all occurrances of '.models' from the # pacakge name. return _component_of(name.replace('.models', ''))
python
{ "resource": "" }
q44433
Model.save
train
def save(self, commit=False): """Save the changes to the model. If the model has not been persisted then it adds the model to the declared session. Then it flushes the object session and optionally commits it. """ if not has_identity(self): # Object has not been persisted to the database. session.add(self) if commit: # Commit the session as requested. session.commit() else: # Just flush the session; do not commit. session.flush()
python
{ "resource": "" }
q44434
team
train
def team(page): """ Return the team name """ soup = BeautifulSoup(page) try: return soup.find('title').text.split(' | ')[0].split(' - ')[1] except: return None
python
{ "resource": "" }
q44435
league
train
def league(page): """ Return the league name """ soup = BeautifulSoup(page) try: return soup.find('title').text.split(' | ')[0].split(' - ')[0] except: return None
python
{ "resource": "" }
q44436
date
train
def date(page): """ Return the date, nicely-formatted """ soup = BeautifulSoup(page) try: page_date = soup.find('input', attrs={'name': 'date'})['value'] parsed_date = datetime.strptime(page_date, '%Y-%m-%d') return parsed_date.strftime('%a, %b %d, %Y') except: return None
python
{ "resource": "" }
q44437
start_active_players_path
train
def start_active_players_path(page): """ Return the path in the "Start Active Players" button """ soup = BeautifulSoup(page) try: return soup.find('a', href=True, text='Start Active Players')['href'] except: return None
python
{ "resource": "" }
q44438
PluginServerStorageEntryHookABC._migrateStorageSchema
train
def _migrateStorageSchema(self, metadata: MetaData) -> None: """ Initialise the DB This method is called by the platform between the load() and start() calls. There should be no need for a plugin to call this method it's self. :param metadata: the SQLAlchemy metadata for this plugins schema """ relDir = self._packageCfg.config.storage.alembicDir(require_string) alembicDir = os.path.join(self.rootDir, relDir) if not os.path.isdir(alembicDir): raise NotADirectoryError(alembicDir) self._dbConn = DbConnection( dbConnectString=self.platform.dbConnectString, metadata=metadata, alembicDir=alembicDir, enableCreateAll=False ) self._dbConn.migrate()
python
{ "resource": "" }
q44439
PluginServerStorageEntryHookABC.prefetchDeclarativeIds
train
def prefetchDeclarativeIds(self, Declarative, count) -> Deferred: """ Get PG Sequence Generator A PostGreSQL sequence generator returns a chunk of IDs for the given declarative. :return: A generator that will provide the IDs :rtype: an iterator, yielding the numbers to assign """ return self._dbConn.prefetchDeclarativeIds(Declarative=Declarative, count=count)
python
{ "resource": "" }
q44440
run_once
train
def run_once(func): """ Simple decorator to ensure a function is ran only once """ def _inner(*args, **kwargs): if func.__name__ in CTX.run_once: LOGGER.info('skipping %s', func.__name__) return CTX.run_once[func.__name__] LOGGER.info('running: %s', func.__name__) result = func(*args, **kwargs) CTX.run_once[func.__name__] = result return result return _inner
python
{ "resource": "" }
q44441
score_x_of_a_kind_yahtzee
train
def score_x_of_a_kind_yahtzee(dice: List[int], min_same_faces: int) -> int: """Return sum of dice if there are a minimum of equal min_same_faces dice, otherwise return zero. Only works for 3 or more min_same_faces. """ for die, count in Counter(dice).most_common(1): if count >= min_same_faces: return sum(dice) return 0
python
{ "resource": "" }
q44442
score_x_of_a_kind_yatzy
train
def score_x_of_a_kind_yatzy(dice: List[int], min_same_faces: int) -> int: """Similar to yahtzee, but only return the sum of the dice that satisfy min_same_faces """ for die, count in Counter(dice).most_common(1): if count >= min_same_faces: return die * min_same_faces return 0
python
{ "resource": "" }
q44443
score_small_straight_yahztee
train
def score_small_straight_yahztee(dice: List[int]) -> int: """ Small straight scoring according to regular yahtzee rules """ global CONSTANT_SCORES_YAHTZEE dice_set = set(dice) if _are_two_sets_equal({1, 2, 3, 4}, dice_set) or \ _are_two_sets_equal({2, 3, 4, 5}, dice_set) or \ _are_two_sets_equal({3, 4, 5, 6}, dice_set): return CONSTANT_SCORES_YAHTZEE[Category.SMALL_STRAIGHT] return 0
python
{ "resource": "" }
q44444
score_small_straight_yatzy
train
def score_small_straight_yatzy(dice: List[int]) -> int: """ Small straight scoring according to yatzy rules """ dice_set = set(dice) if _are_two_sets_equal({1, 2, 3, 4, 5}, dice_set): return sum(dice) return 0
python
{ "resource": "" }
q44445
score_large_straight_yahtzee
train
def score_large_straight_yahtzee(dice: List[int]) -> int: """ Large straight scoring according to regular yahtzee rules """ global CONSTANT_SCORES_YAHTZEE dice_set = set(dice) if _are_two_sets_equal({1, 2, 3, 4, 5}, dice_set) or \ _are_two_sets_equal({2, 3, 4, 5, 6}, dice_set): return CONSTANT_SCORES_YAHTZEE[Category.LARGE_STRAIGHT] return 0
python
{ "resource": "" }
q44446
score_large_straight_yatzy
train
def score_large_straight_yatzy(dice: List[int]) -> int: """ Large straight scoring according to yatzy rules """ dice_set = set(dice) if _are_two_sets_equal({2, 3, 4, 5, 6}, dice_set): return sum(dice) return 0
python
{ "resource": "" }
q44447
select_dict
train
def select_dict(coll, key, value): """ Given an iterable of dictionaries, return the dictionaries where the values at a given key match the given value. If the value is an iterable of objects, the function will consider any to be a match. This is especially useful when calling REST APIs which return arrays of JSON objects. When such a response is converted to a Python list of dictionaries, it may be easily filtered using this function. :param iter coll: An iterable containing dictionaries :param obj key: A key to search in each dictionary :param value: A value or iterable of values to match :type value: obj or iter :returns: A list of dictionaries matching the query :rtype: list :Example: :: >>> dicts = [ ... {'hi': 'bye'}, ... {10: 2, 30: 4}, ... {'hi': 'hello', 'bye': 'goodbye'}, ... ] >>> select_dict(dicts, 'hi', 'bye') [{'hi': 'bye'}] >>> select_dict(dicts, 'hi', ('bye', 'hello')) [{'hi': 'bye'}, {'hi': 'hello', 'bye': 'goodbye'}] """ if getattr(value, '__iter__', None): iterable = value else: iterable = [value] return [v for v in coll if key in v and v[key] in iterable]
python
{ "resource": "" }
q44448
inside_brain
train
def inside_brain(stat_dset,atlas=None,p=0.001): '''calculates the percentage of voxels above a statistical threshold inside a brain mask vs. outside it if ``atlas`` is ``None``, it will try to find ``TT_N27``''' atlas = find_atlas(atlas) if atlas==None: return None mask_dset = nl.suffix(stat_dset,'_atlasfrac') nl.run(['3dfractionize','-template',nl.strip_subbrick(stat_dset),'-input',nl.calc([atlas],'1+step(a-100)',datum='short'),'-preserve','-clip','0.2','-prefix',mask_dset],products=mask_dset,quiet=True,stderr=None) s = nl.roi_stats(mask_dset,nl.thresh(stat_dset,p)) return 100.0 * s[2]['nzvoxels'] / (s[1]['nzvoxels'] + s[2]['nzvoxels'])
python
{ "resource": "" }
q44449
auto_qc
train
def auto_qc(dset,inside_perc=60,atlas=None,p=0.001): '''returns ``False`` if ``dset`` fails minimum checks, or returns a float from ``0.0`` to ``100.0`` describing data quality''' with nl.notify('Running quality check on %s:' % dset): if not os.path.exists(dset): nl.notify('Error: cannot find the file!',level=nl.level.error) return False info = nl.dset_info(dset) if not info: nl.notify('Error: could not read the dataset!',level=nl.level.error) if any(['stat' in x for x in info.subbricks]): with nl.notify('Statistical results detected...'): inside = inside_brain(dset,atlas=atlas,p=p) nl.notify('%.1f significant voxels inside brain') if inside<inside_perc: nl.notify('Warning: below quality threshold!',level=nl.level.warning) # return False nl.notify('Looks ok') return inside if len(info.subbricks)>1: with nl.notify('Time-series detected...'): return_val = True (cost,overlap) = atlas_overlap(dset) if cost>0.15 or overlap<80: nl.notify('Warning: does not appear to conform to brain dimensions',level=nl.level.warning) return_val = False if len(info.subbricks)>5: (oc,perc_outliers) = outcount(dset) if perc_outliers>0.1: nl.notify('Warning: large amount of outlier time points',level=nl.level.warning) return_val = False if return_val: nl.notify('Looks ok') return min(100*(1-cost),overlap,100*perc_outliers) return False with nl.notify('Single brain image detected...'): (cost,overlap) = atlas_overlap(dset) # Be more lenient if it's not an EPI, cuz who knows what else is in this image if cost>0.45 or overlap<70: nl.notify('Warning: does not appear to conform to brain dimensions',level=nl.level.warning) return False nl.notify('Looks ok') return min(100*(1-cost),overlap)
python
{ "resource": "" }
q44450
StoneRedis._multi_lpop_pipeline
train
def _multi_lpop_pipeline(self, pipe, queue, number): ''' Pops multiple elements from a list in a given pipeline''' pipe.lrange(queue, 0, number - 1) pipe.ltrim(queue, number, -1)
python
{ "resource": "" }
q44451
StoneRedis.multi_lpop
train
def multi_lpop(self, queue, number, transaction=False): ''' Pops multiple elements from a list This operation will be atomic if transaction=True is passed ''' try: pipe = self.pipeline(transaction=transaction) pipe.multi() self._multi_lpop_pipeline(pipe, queue, number) return pipe.execute()[0] except IndexError: return [] except: raise
python
{ "resource": "" }
q44452
StoneRedis._multi_rpush_pipeline
train
def _multi_rpush_pipeline(self, pipe, queue, values, bulk_size=0): ''' Pushes multiple elements to a list in a given pipeline If bulk_size is set it will execute the pipeline every bulk_size elements ''' cont = 0 for value in values: pipe.rpush(queue, value) if bulk_size != 0 and cont % bulk_size == 0: pipe.execute()
python
{ "resource": "" }
q44453
StoneRedis.multi_rpush
train
def multi_rpush(self, queue, values, bulk_size=0, transaction=False): ''' Pushes multiple elements to a list If bulk_size is set it will execute the pipeline every bulk_size elements This operation will be atomic if transaction=True is passed ''' # Check that what we receive is iterable if hasattr(values, '__iter__'): pipe = self.pipeline(transaction=transaction) pipe.multi() self._multi_rpush_pipeline(pipe, queue, values, bulk_size) pipe.execute() else: raise ValueError('Expected an iterable')
python
{ "resource": "" }
q44454
StoneRedis.rpush_limit
train
def rpush_limit(self, queue, value, limit=100000): ''' Pushes an element to a list in an atomic way until it reaches certain size Once limit is reached, the function will lpop the oldest elements This operation runs in LUA, so is always atomic ''' lua = ''' local queue = KEYS[1] local max_size = tonumber(KEYS[2]) local table_len = 1 local redis_queue_len = tonumber(redis.call('LLEN', queue)) local total_size = redis_queue_len + table_len local from = 0 if total_size >= max_size then -- Delete the same amount of data we are inserting. Even better, limit the queue to the specified size redis.call('PUBLISH', 'DEBUG', 'trim') if redis_queue_len - max_size + table_len > 0 then from = redis_queue_len - max_size + table_len else from = 0 end redis.call('LTRIM', queue, from, redis_queue_len) end redis.call('RPUSH', queue, ARGV[1]) return 1 ''' try: self.rpush_limit_script([queue, limit], [value]) except AttributeError: if self.logger: self.logger.info('Script not registered... registering') # If the script is not registered, register it self.rpush_limit_script = self.register_script(lua) self.rpush_limit_script([queue, limit], [value])
python
{ "resource": "" }
q44455
StoneRedis.get_lock
train
def get_lock(self, lockname, locktime=60, auto_renewal=False): ''' Gets a lock and returns if it can be stablished. Returns false otherwise ''' pid = os.getpid() caller = inspect.stack()[0][3] try: # rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ]) rl = redis_lock.Lock(self, lockname, expire=locktime, auto_renewal=auto_renewal) except: if self.logger: self.logger.error('Process {0} ({1}) could not get lock {2}. Going ahead without locking!!! {3}'.format(pid, caller, lockname, traceback.format_exc())) return False try: lock = rl.acquire(blocking=False) except RedisError: return False if not lock: return False else: return rl
python
{ "resource": "" }
q44456
StoneRedis.wait_for_lock
train
def wait_for_lock(self, lockname, locktime=60, auto_renewal=False): ''' Gets a lock or waits until it is able to get it ''' pid = os.getpid() caller = inspect.stack()[0][3] try: # rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ]) rl = redis_lock.Lock(self, lockname, expire=locktime, auto_renewal=auto_renewal) except AssertionError: if self.logger: self.logger.error('Process {0} ({1}) could not get lock {2}. Going ahead without locking!!! {3}'.format(pid, caller, lockname, traceback.format_exc())) return False cont = 1 t0 = time.time() lock = None while not lock: time.sleep(.05) cont += 1 if cont % 20 == 0: if self.logger: self.logger.debug('Process {0} ({1}) waiting for lock {2}. {3} seconds elapsed.'.format(pid, caller, lockname, time.time() - t0)) # lock = rl.lock(lockname, locktime_ms) try: lock = rl.acquire() except RedisError: pass if self.logger: self.logger.debug('Process {0} ({1}) got lock {2} for {3} seconds'.format(pid, caller, lockname, locktime)) return rl
python
{ "resource": "" }
q44457
StoneRedis.release_lock
train
def release_lock(self, lock, force=False): ''' Frees a lock ''' pid = os.getpid() caller = inspect.stack()[0][3] # try: # rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ]) # except: # logger.error('Process {0} ({1}) could not release lock {2}'.format(pid, caller, lock.resource)) # return False if lock and lock._held: lock.release() if self.logger: self.logger.debug('Process {0} ({1}) released lock'.format(pid, caller))
python
{ "resource": "" }
q44458
StoneRedis.pipeline
train
def pipeline(self, transaction=True, shard_hint=None): ''' Return a pipeline that support StoneRedis custom methods ''' args_dict = { 'connection_pool': self.connection_pool, 'response_callbacks': self.response_callbacks, 'transaction': transaction, 'shard_hint': shard_hint, 'logger': self.logger, } return StonePipeline(**args_dict)
python
{ "resource": "" }
q44459
StonePipeline.multi_lpop
train
def multi_lpop(self, queue, number, transaction=False): ''' Pops multiple elements from a list ''' try: self._multi_lpop_pipeline(self, queue, number) except: raise
python
{ "resource": "" }
q44460
StonePipeline.multi_rpush
train
def multi_rpush(self, queue, values, bulk_size=0, transaction=False): ''' Pushes multiple elements to a list ''' # Check that what we receive is iterable if hasattr(values, '__iter__'): self._multi_rpush_pipeline(self, queue, values, 0) else: raise ValueError('Expected an iterable')
python
{ "resource": "" }
q44461
_deserialize_dict
train
def _deserialize_dict(data, boxed_type): """Deserializes a dict and its elements. :param data: dict to deserialize. :type data: dict :param boxed_type: class literal. :return: deserialized dict. :rtype: dict """ return {k: _deserialize(v, boxed_type) for k, v in six.iteritems(data)}
python
{ "resource": "" }
q44462
ObserverStore.remove
train
def remove(self, what, call): """ remove an observer what: (string | array) state fields to observe call: (function) when not given, decorator usage is assumed. The call function should have 2 parameters: - previousValue, - actualValue """ type = observerTypeEnum.typeOf(what) self._observers.remove({ "observing": what, "type": type, "call": call })
python
{ "resource": "" }
q44463
ObserverStore.getObservers
train
def getObservers(self): """ Get the list of observer to the instance of the class. :return: Subscribed Obversers. :rtype: Array """ result = [] for observer in self._observers: result.append( { "observing": observer["observing"], "call": observer["call"] }) return result
python
{ "resource": "" }
q44464
add_job
train
def add_job(session, command_line, name = 'job', dependencies = [], array = None, exec_dir=None, log_dir = None, stop_on_failure = False, **kwargs): """Helper function to create a job, add the dependencies and the array jobs.""" job = Job(command_line=command_line, name=name, exec_dir=exec_dir, log_dir=log_dir, array_string=array, stop_on_failure=stop_on_failure, kwargs=kwargs) session.add(job) session.flush() session.refresh(job) # by default id and unique id are identical, but the id might be overwritten later on job.id = job.unique for d in dependencies: if d == job.unique: logger.warn("Adding self-dependency of job %d is not allowed" % d) continue depending = list(session.query(Job).filter(Job.unique == d)) if len(depending): session.add(JobDependence(job.unique, depending[0].unique)) else: logger.warn("Could not find dependent job with id %d in database" % d) if array: (start, stop, step) = array # add array jobs for i in range(start, stop+1, step): session.add(ArrayJob(i, job.unique)) session.commit() return job
python
{ "resource": "" }
q44465
Job.submit
train
def submit(self, new_queue = None): """Sets the status of this job to 'submitted'.""" self.status = 'submitted' self.result = None self.machine_name = None if new_queue is not None: self.queue_name = new_queue for array_job in self.array: array_job.status = 'submitted' array_job.result = None array_job.machine_name = None self.submit_time = datetime.now() self.start_time = None self.finish_time = None
python
{ "resource": "" }
q44466
Job.queue
train
def queue(self, new_job_id = None, new_job_name = None, queue_name = None): """Sets the status of this job to 'queued' or 'waiting'.""" # update the job id (i.e., when the job is executed in the grid) if new_job_id is not None: self.id = new_job_id if new_job_name is not None: self.name = new_job_name if queue_name is not None: self.queue_name = queue_name new_status = 'queued' self.result = None # check if we have to wait for another job to finish for job in self.get_jobs_we_wait_for(): if job.status not in ('success', 'failure'): new_status = 'waiting' elif self.stop_on_failure and job.status == 'failure': new_status = 'failure' # reset the queued jobs that depend on us to waiting status for job in self.get_jobs_waiting_for_us(): if job.status == 'queued': job.status = 'failure' if new_status == 'failure' else 'waiting' self.status = new_status for array_job in self.array: if array_job.status not in ('success', 'failure'): array_job.status = new_status
python
{ "resource": "" }
q44467
Job.execute
train
def execute(self, array_id = None, machine_name = None): """Sets the status of this job to 'executing'.""" self.status = 'executing' if array_id is not None: for array_job in self.array: if array_job.id == array_id: array_job.status = 'executing' if machine_name is not None: array_job.machine_name = machine_name array_job.start_time = datetime.now() elif machine_name is not None: self.machine_name = machine_name if self.start_time is None: self.start_time = datetime.now() # sometimes, the 'finish' command did not work for array jobs, # so check if any old job still has the 'executing' flag set for job in self.get_jobs_we_wait_for(): if job.array and job.status == 'executing': job.finish(0, -1)
python
{ "resource": "" }
q44468
Job.finish
train
def finish(self, result, array_id = None): """Sets the status of this job to 'success' or 'failure'.""" # check if there is any array job still running new_status = 'success' if result == 0 else 'failure' new_result = result finished = True if array_id is not None: for array_job in self.array: if array_job.id == array_id: array_job.status = new_status array_job.result = result array_job.finish_time = datetime.now() if array_job.status not in ('success', 'failure'): finished = False elif new_result == 0: new_result = array_job.result if finished: # There was no array job, or all array jobs finished self.status = 'success' if new_result == 0 else 'failure' self.result = new_result self.finish_time = datetime.now() # update all waiting jobs for job in self.get_jobs_waiting_for_us(): if job.status == 'waiting': job.queue()
python
{ "resource": "" }
q44469
Job.refresh
train
def refresh(self): """Refreshes the status information.""" if self.status == 'executing' and self.array: new_result = 0 for array_job in self.array: if array_job.status == 'failure' and new_result is not None: new_result = array_job.result elif array_job.status not in ('success', 'failure'): new_result = None if new_result is not None: self.status = 'success' if new_result == 0 else 'failure' self.result = new_result
python
{ "resource": "" }
q44470
Job.get_array
train
def get_array(self): """Returns the array arguments for the job; usually a string.""" # In python 2, the command line is unicode, which needs to be converted to string before pickling; # In python 3, the command line is bytes, which can be pickled directly return loads(self.array_string) if isinstance(self.array_string, bytes) else loads(self.array_string.encode())
python
{ "resource": "" }
q44471
next_departures
train
def next_departures(bus_number, stop_code, date, time, nb_departure, db_file): """ Getting the 10 next departures How to check with tools database sqlite3 stm.db SELECT "t2"."departure_time" FROM "trips" AS t1 INNER JOIN "stop_times" AS t2 ON ("t1"."trip_id" = "t2"."trip_id") INNER JOIN "stops" AS t3 ON ("t2"."stop_id" = "t3"."stop_id") WHERE ((("t1"."route_id" = '51') AND ("t3"."stop_code" = '51176')) AND ("t1"."service_id" IN (SELECT "t4"."service_id" FROM "calendar" AS t4 WHERE ('20190102' BETWEEN "t4"."start_date" AND "t4"."end_date" ) AND "t4".wednesday == 1 AND "t4".service_id NOT IN (select c2.service_id from calendar_dates as c2 WHERE 20190102 == c2.date) ) ) ) ORDER BY "t2"."departure_time" ; Replace 20190102 with the expected date Replace wednesday with corresponding day of week make it also for bus number '51' and '51176' Other guideline to get valid working schedule for the weekday select * from calendar WHERE (20190102 BETWEEN start_date AND end_date) AND sunday == 1 select * from calendar_dates WHERE 20190102 == date Select where cases of holiday for days that does not apply SELECT t1.service_id FROM calendar AS t1 WHERE (20190102 BETWEEN t1.start_date AND t1.end_date) AND t1.wednesday == 1 AND (t1.service_id NOT IN (select c2.service_id from calendar_dates as c2 WHERE 20190102 == c2.date)) """ # Use table Calendar as update from december 2018 day_of_week = datetime.datetime.strptime( date, "%Y%m%d").strftime("%A").lower() # Extract dates that the service is disabled subquery_days_off = CalendarDate.select(CalendarDate.service_id)\ .where( date == CalendarDate.date ) # Use calendar to get all services minus days off subquery = Calendar.select(Calendar.service_id)\ .where( (date >= Calendar.start_date) & (date <= Calendar.end_date) & (getattr(Calendar, day_of_week) == 1) & Calendar.service_id.not_in(subquery_days_off) ) # Filter service_id as list of service_id available query_result = Trip.select(StopTime.departure_time)\ .join(StopTime, on=(Trip.trip_id == StopTime.trip_id))\ .join(Stop, on=(StopTime.stop_id == Stop.stop_id))\ .where( (Trip.route_id == bus_number) & (Stop.stop_code == stop_code) & (Trip.service_id .in_(subquery)))\ .order_by(StopTime.departure_time) result = [] departures_listed = 0 for i in query_result.dicts(): dep_time = i['departure_time'].split(':') if dep_time[0] == time[0] and dep_time[1] >= time[1]: result.append("{0}:{1}".format(dep_time[0], dep_time[1])) departures_listed += 1 elif dep_time[0] > time[0]: result.append("{0}:{1}".format(dep_time[0], dep_time[1])) departures_listed += 1 if departures_listed is nb_departure: break return result
python
{ "resource": "" }
q44472
random_ipv4
train
def random_ipv4(cidr='10.0.0.0/8'): """ Return a random IPv4 address from the given CIDR block. :key str cidr: CIDR block :returns: An IPv4 address from the given CIDR block :rtype: ipaddress.IPv4Address """ try: u_cidr = unicode(cidr) except NameError: u_cidr = cidr network = ipaddress.ip_network(u_cidr) start = int(network.network_address) + 1 end = int(network.broadcast_address) randint = random.randrange(start, end) return ipaddress.ip_address(randint)
python
{ "resource": "" }
q44473
_compile_qt_resources
train
def _compile_qt_resources(): """ Compiles PyQT resources file """ if config.QT_RES_SRC(): epab.utils.ensure_exe('pyrcc5') LOGGER.info('compiling Qt resources') elib_run.run(f'pyrcc5 {config.QT_RES_SRC()} -o {config.QT_RES_TGT()}')
python
{ "resource": "" }
q44474
to_json
train
def to_json(msg): """ Returns a JSON string representation of this message """ result = {} # herald specification version #result[herald.MESSAGE_HERALD_VERSION] = herald.HERALD_SPECIFICATION_VERSION # headers result[herald.MESSAGE_HEADERS] = {} if msg.headers is not None: for key in msg.headers: result[herald.MESSAGE_HEADERS][key] = msg.headers.get(key) or None # subject result[herald.MESSAGE_SUBJECT] = msg.subject # content if msg.content is not None: if isinstance(msg.content, str): # string content result[herald.MESSAGE_CONTENT] = msg.content else: # jaborb content result[herald.MESSAGE_CONTENT] = jabsorb.to_jabsorb(msg.content) # metadata result[herald.MESSAGE_METADATA] = {} if msg.metadata is not None: for key in msg.metadata: result[herald.MESSAGE_METADATA][key] = msg.metadata.get(key) or None return json.dumps(result, default=herald.utils.json_converter)
python
{ "resource": "" }
q44475
from_json
train
def from_json(json_string): """ Returns a new MessageReceived from the provided json_string string """ # parse the provided json_message try: parsed_msg = json.loads(json_string) except ValueError as ex: # if the provided json_message is not a valid JSON return None except TypeError as ex: # if json_message not string or buffer return None herald_version = None # check if it is a valid Herald JSON message if herald.MESSAGE_HEADERS in parsed_msg: if herald.MESSAGE_HERALD_VERSION in parsed_msg[herald.MESSAGE_HEADERS]: herald_version = parsed_msg[herald.MESSAGE_HEADERS].get(herald.MESSAGE_HERALD_VERSION) if herald_version is None or herald_version != herald.HERALD_SPECIFICATION_VERSION: _logger.error("Herald specification of the received message is not supported!") return None # construct new Message object from the provided JSON object msg = herald.beans.MessageReceived(uid=(parsed_msg[herald.MESSAGE_HEADERS].get(herald.MESSAGE_HEADER_UID) or None), subject=parsed_msg[herald.MESSAGE_SUBJECT], content=None, sender_uid=(parsed_msg[herald.MESSAGE_HEADERS].get(herald.MESSAGE_HEADER_SENDER_UID) or None), reply_to=(parsed_msg[herald.MESSAGE_HEADERS].get(herald.MESSAGE_HEADER_REPLIES_TO) or None), access=None, timestamp=(parsed_msg[herald.MESSAGE_HEADERS].get(herald.MESSAGE_HEADER_TIMESTAMP) or None) ) # set content try: if herald.MESSAGE_CONTENT in parsed_msg: parsed_content = parsed_msg[herald.MESSAGE_CONTENT] if parsed_content is not None: if isinstance(parsed_content, str): msg.set_content(parsed_content) else: msg.set_content(jabsorb.from_jabsorb(parsed_content)) except KeyError as ex: _logger.error("Error retrieving message content! " + str(ex)) # other headers if herald.MESSAGE_HEADERS in parsed_msg: for key in parsed_msg[herald.MESSAGE_HEADERS]: if key not in msg._headers: msg._headers[key] = parsed_msg[herald.MESSAGE_HEADERS][key] # metadata if herald.MESSAGE_METADATA in parsed_msg: for key in parsed_msg[herald.MESSAGE_METADATA]: if key not in msg._metadata: msg._metadata[key] = parsed_msg[herald.MESSAGE_METADATA][key] return msg
python
{ "resource": "" }
q44476
Naming.new_type
train
def new_type(type_name: str, prefix: str or None = None) -> str: """ Creates a resource type with optionally a prefix. Using the rules of JSON-LD, we use prefixes to disambiguate between different types with the same name: one can Accept a device or a project. In eReuse.org there are different events with the same names, in linked-data terms they have different URI. In eReuse.org, we solve this with the following: "@type": "devices:Accept" // the URI for these events is 'devices/events/accept' "@type": "projects:Accept" // the URI for these events is 'projects/events/accept ... Type is only used in events, when there are ambiguities. The rest of "@type": "devices:Accept" "@type": "Accept" But these not: "@type": "projects:Accept" // it is an event from a project "@type": "Accept" // it is an event from a device """ if Naming.TYPE_PREFIX in type_name: raise TypeError('Cannot create new type: type {} is already prefixed.'.format(type_name)) prefix = (prefix + Naming.TYPE_PREFIX) if prefix is not None else '' return prefix + type_name
python
{ "resource": "" }
q44477
Naming.hid
train
def hid(manufacturer: str, serial_number: str, model: str) -> str: """Computes the HID for the given properties of a device. The HID is suitable to use to an URI.""" return Naming.url_word(manufacturer) + '-' + Naming.url_word(serial_number) + '-' + Naming.url_word(model)
python
{ "resource": "" }
q44478
get_membership_document
train
def get_membership_document(membership_type: str, current_block: dict, identity: Identity, salt: str, password: str) -> Membership: """ Get a Membership document :param membership_type: "IN" to ask for membership or "OUT" to cancel membership :param current_block: Current block data :param identity: Identity document :param salt: Passphrase of the account :param password: Password of the account :rtype: Membership """ # get current block BlockStamp timestamp = BlockUID(current_block['number'], current_block['hash']) # create keys from credentials key = SigningKey.from_credentials(salt, password) # create identity document membership = Membership( version=10, currency=current_block['currency'], issuer=key.pubkey, membership_ts=timestamp, membership_type=membership_type, uid=identity.uid, identity_ts=identity.timestamp, signature=None ) # sign document membership.sign([key]) return membership
python
{ "resource": "" }
q44479
getvar
train
def getvar(syntree, targetvar): """Scan an ast object for targetvar and return its value. Only handles single direct assignment of python literal types. See docs on ast.literal_eval for more info: http://docs.python.org/2/library/ast.html#ast.literal_eval Args: syntree: ast.Module object targetvar: name of global variable to return Returns: Value of targetvar if found in syntree, or None if not found. """ for node in syntree.body: if isinstance(node, ast.Assign): for var in node.targets: if var.id == targetvar: return ast.literal_eval(node.value)
python
{ "resource": "" }
q44480
findObjects
train
def findObjects(path): """Finds objects in pairtree. Given a path that corresponds to a pairtree, walk it and look for non-shorty (it's ya birthday) directories. """ objects = [] if not os.path.isdir(path): return [] contents = os.listdir(path) for item in contents: fullPath = os.path.join(path, item) if not os.path.isdir(fullPath): # deal with a split end at this point # we might want to consider a normalize option return [path] else: if isShorty(item): objects = objects + findObjects(fullPath) else: objects.append(fullPath) return objects
python
{ "resource": "" }
q44481
get_pair_path
train
def get_pair_path(meta_id): """Determines the pair path for the digital object meta-id.""" pair_tree = pair_tree_creator(meta_id) pair_path = os.path.join(pair_tree, meta_id) return pair_path
python
{ "resource": "" }
q44482
pair_tree_creator
train
def pair_tree_creator(meta_id): """Splits string into a pairtree path.""" chunks = [] for x in range(0, len(meta_id)): if x % 2: continue if (len(meta_id) - 1) == x: chunk = meta_id[x] else: chunk = meta_id[x: x + 2] chunks.append(chunk) return os.sep + os.sep.join(chunks) + os.sep
python
{ "resource": "" }
q44483
deSanitizeString
train
def deSanitizeString(name): """Reverses sanitization process. Reverses changes made to a string that has been sanitized for use as a pairtree identifier. """ oldString = name # first pass replaceTable2 = [ ("/", "="), (":", "+"), (".", ","), ] for r in replaceTable2: oldString = oldString.replace(r[1], r[0]) # reverse ascii 0-32 stuff # must subtract number added at sanitization for x in range(0, 33): oldString = oldString.replace( hex(x + sanitizerNum).replace('0x', '^'), chr(x)) # second pass replaceTable = [ ('"', '^22'), ('<', '^3c'), ('?', '^3f'), ('*', '^2a'), ('=', '^3d'), ('+', '^2b'), ('>', '^3e'), ('|', '^7c'), (',', '^2c'), ('^', '^5e'), ] for r in replaceTable: oldString = oldString.replace(r[1], r[0]) return oldString
python
{ "resource": "" }
q44484
sanitizeString
train
def sanitizeString(name): """Cleans string in preparation for splitting for use as a pairtree identifier.""" newString = name # string cleaning, pass 1 replaceTable = [ ('^', '^5e'), # we need to do this one first ('"', '^22'), ('<', '^3c'), ('?', '^3f'), ('*', '^2a'), ('=', '^3d'), ('+', '^2b'), ('>', '^3e'), ('|', '^7c'), (',', '^2c'), ] # " hex 22 < hex 3c ? hex 3f # * hex 2a = hex 3d ^ hex 5e # + hex 2b > hex 3e | hex 7c # , hex 2c for r in replaceTable: newString = newString.replace(r[0], r[1]) # replace ascii 0-32 for x in range(0, 33): # must add somewhat arbitrary num to avoid conflict at deSanitization # conflict example: is ^x1e supposed to be ^x1 (ascii 1) followed by # letter 'e' or really ^x1e (ascii 30) newString = newString.replace( chr(x), hex(x + sanitizerNum).replace('0x', '^')) replaceTable2 = [ ("/", "="), (":", "+"), (".", ","), ] # / -> = # : -> + # . -> , # string cleaning pass 2 for r in replaceTable2: newString = newString.replace(r[0], r[1]) return newString
python
{ "resource": "" }
q44485
toPairTreePath
train
def toPairTreePath(name): """Cleans a string, and then splits it into a pairtree path.""" sName = sanitizeString(name) chunks = [] for x in range(0, len(sName)): if x % 2: continue if (len(sName) - 1) == x: chunk = sName[x] else: chunk = sName[x: x + 2] chunks.append(chunk) return os.sep.join(chunks) + os.sep
python
{ "resource": "" }
q44486
create_paired_dir
train
def create_paired_dir(output_dir, meta_id, static=False, needwebdir=True): """Creates the meta or static dirs. Adds an "even" or "odd" subdirectory to the static path based on the meta-id. """ # get the absolute root path root_path = os.path.abspath(output_dir) # if it's a static directory, add even and odd if static: # determine whether meta-id is odd or even if meta_id[-1].isdigit(): last_character = int(meta_id[-1]) else: last_character = ord(meta_id[-1]) if last_character % 2 == 0: num_dir = 'even' else: num_dir = 'odd' # add odd or even to the path, based on the meta-id output_path = os.path.join(root_path, num_dir) # if it's a meta directory, output as normal else: output_path = root_path # if it doesn't already exist, create the output path (includes even/odd) if not os.path.exists(output_path): os.mkdir(output_path) # add the pairtree to the output path path_name = add_to_pairtree(output_path, meta_id) # add the meta-id directory to the end of the pairpath meta_dir = os.path.join(path_name, meta_id) os.mkdir(meta_dir) # if we are creating static output if static and needwebdir: # add the web path to the output directory os.mkdir(os.path.join(meta_dir, 'web')) static_dir = os.path.join(meta_dir, 'web') return static_dir # else we are creating meta output or don't need web directory else: return meta_dir
python
{ "resource": "" }
q44487
add_to_pairtree
train
def add_to_pairtree(output_path, meta_id): """Creates pairtree dir structure within pairtree for new element.""" # create the pair path paired_path = pair_tree_creator(meta_id) path_append = '' # for each directory in the pair path for pair_dir in paired_path.split(os.sep): # append the pair path together, one directory at a time path_append = os.path.join(path_append, pair_dir) # append the pair path to the output path combined_path = os.path.join(output_path, path_append) # if the path doesn't already exist, create it if not os.path.exists(combined_path): os.mkdir(combined_path) return combined_path
python
{ "resource": "" }
q44488
get_pairtree_prefix
train
def get_pairtree_prefix(pairtree_store): """Returns the prefix given in pairtree_prefix file.""" prefix_path = os.path.join(pairtree_store, 'pairtree_prefix') with open(prefix_path, 'r') as prefixf: prefix = prefixf.read().strip() return prefix
python
{ "resource": "" }
q44489
Document.parse_field
train
def parse_field(cls: Type[DocumentType], field_name: str, line: str) -> Any: """ Parse a document field with regular expression and return the value :param field_name: Name of the field :param line: Line string to parse :return: """ try: match = cls.fields_parsers[field_name].match(line) if match is None: raise AttributeError value = match.group(1) except AttributeError: raise MalformedDocumentError(field_name) return value
python
{ "resource": "" }
q44490
Document.sha_hash
train
def sha_hash(self) -> str: """ Return uppercase hex sha256 hash from signed raw document :return: """ return hashlib.sha256(self.signed_raw().encode("ascii")).hexdigest().upper()
python
{ "resource": "" }
q44491
SavageLogMixin.build_row_dict
train
def build_row_dict(cls, row, dialect, deleted=False, user_id=None, use_dirty=True): """ Builds a dictionary of archive data from row which is suitable for insert. NOTE: If `deleted` is False, version ID will be set to an AsIs SQL construct. :param row: instance of :class:`~SavageModelMixin` :param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect` :param deleted: whether or not the row is deleted (defaults to False) :param user_id: ID of user that is performing the update on this row (defaults to None) :param use_dirty: whether to use the dirty fields from row or not (defaults to True) :return: a dictionary of archive table column names to values, suitable for insert :rtype: dict """ data = { 'data': row.to_archivable_dict(dialect, use_dirty=use_dirty), 'deleted': deleted, 'updated_at': datetime.now(), 'version_id': current_version_sql(as_is=True) if deleted else row.version_id } for col_name in row.version_columns: data[col_name] = utils.get_column_attribute(row, col_name, use_dirty=use_dirty) if user_id is not None: data['user_id'] = user_id return data
python
{ "resource": "" }
q44492
SavageLogMixin.bulk_archive_rows
train
def bulk_archive_rows(cls, rows, session, user_id=None, chunk_size=1000, commit=True): """ Bulk archives data previously written to DB. :param rows: iterable of previously saved model instances to archive :param session: DB session to use for inserts :param user_id: ID of user responsible for row modifications :return: """ dialect = utils.get_dialect(session) to_insert_dicts = [] for row in rows: row_dict = cls.build_row_dict(row, user_id=user_id, dialect=dialect) to_insert_dicts.append(row_dict) if len(to_insert_dicts) < chunk_size: continue # Insert a batch of rows session.execute(insert(cls).values(to_insert_dicts)) to_insert_dicts = [] # Insert final batch of rows (if any) if to_insert_dicts: session.execute(insert(cls).values(to_insert_dicts)) if commit: session.commit()
python
{ "resource": "" }
q44493
SavageLogMixin._validate
train
def _validate(cls, engine, *version_cols): """ Validates the archive table. Validates the following criteria: - all version columns exist in the archive table - the python types of the user table and archive table columns are the same - a user_id column exists - there is a unique constraint on version and the other versioned columns from the user table :param engine: instance of :class:`~sqlalchemy.engine.Engine` :param *version_cols: instances of :class:`~InstrumentedAttribute` from the user table corresponding to the columns that versioning pivots around :raises: :class:`~LogTableCreationError` """ cls._version_col_names = set() for version_column_ut in version_cols: # Make sure all version columns exist on this table version_col_name = version_column_ut.key version_column_at = getattr(cls, version_col_name, None) if not isinstance(version_column_at, InstrumentedAttribute): raise LogTableCreationError("Log table needs {} column".format(version_col_name)) # Make sure the type of the user table and log table columns are the same version_col_at_t = version_column_at.property.columns[0].type.__class__ version_col_ut_t = version_column_ut.property.columns[0].type.__class__ if version_col_at_t != version_col_ut_t: raise LogTableCreationError( "Type of column {} must match in log and user table".format(version_col_name) ) cls._version_col_names.add(version_col_name) # Ensure user added a user_id column # TODO: should user_id column be optional? user_id = getattr(cls, 'user_id', None) if not isinstance(user_id, InstrumentedAttribute): raise LogTableCreationError("Log table needs user_id column") # Check the unique constraint on the versioned columns version_col_names = list(cls._version_col_names) + ['version_id'] if not utils.has_constraint(cls, engine, *version_col_names): raise LogTableCreationError("There is no unique constraint on the version columns")
python
{ "resource": "" }
q44494
self_aware
train
def self_aware(fn): ''' decorating a function with this allows it to refer to itself as 'self' inside the function body. ''' if isgeneratorfunction(fn): @wraps(fn) def wrapper(*a,**k): generator = fn(*a,**k) if hasattr( generator, 'gi_frame' ) and hasattr( generator.gi_frame, 'f_builtins' ) and hasattr( generator.gi_frame.f_builtins, '__setitem__' ): generator.gi_frame.f_builtins[ 'self' ] = generator return wrapper else: fn=strict_globals(**fn.__globals__)(fn) fn.__globals__['self']=fn return fn
python
{ "resource": "" }
q44495
FetchTransformSaveApp._infinite_iterator
train
def _infinite_iterator(self): """this iterator wraps the "_basic_iterator" when the configuration specifies that the "number_of_submissions" is set to "forever". Whenever the "_basic_iterator" is exhausted, it is called again to restart the iteration. It is up to the implementation of the innermost iterator to define what starting over means. Some iterators may repeat exactly what they did before, while others may iterate over new values""" while True: for crash_id in self._basic_iterator(): if self._filter_disallowed_values(crash_id): continue yield crash_id
python
{ "resource": "" }
q44496
FetchTransformSaveApp._limited_iterator
train
def _limited_iterator(self): """this is the iterator for the case when "number_of_submissions" is set to an integer. It goes through the innermost iterator exactly the number of times specified by "number_of_submissions" To do that, it might run the innermost iterator to exhaustion. If that happens, that innermost iterator is called again to start over. It is up to the implementation of the innermost iteration to define what starting over means. Some iterators may repeat exactly what they did before, while others may iterate over new values""" i = 0 while True: for crash_id in self._basic_iterator(): if self._filter_disallowed_values(crash_id): continue if crash_id is None: # it's ok to yield None, however, we don't want it to # be counted as a yielded value yield crash_id continue if i == int(self.config.number_of_submissions): # break out of inner loop, abandoning the wrapped iter break i += 1 yield crash_id # repeat the quit test, to break out of the outer loop and # if necessary, prevent recycling the wrapped iter if i == int(self.config.number_of_submissions): break
python
{ "resource": "" }
q44497
FetchTransformSaveApp._transform
train
def _transform(self, crash_id): """this default transform function only transfers raw data from the source to the destination without changing the data. While this may be good enough for the raw crashmover, the processor would override this method to create and save processed crashes""" try: raw_crash = self.source.get_raw_crash(crash_id) except Exception as x: self.config.logger.error( "reading raw_crash: %s", str(x), exc_info=True ) raw_crash = {} try: dumps = self.source.get_raw_dumps(crash_id) except Exception as x: self.config.logger.error( "reading dump: %s", str(x), exc_info=True ) dumps = {} try: self.destination.save_raw_crash(raw_crash, dumps, crash_id) self.config.logger.info('saved - %s', crash_id) except Exception as x: self.config.logger.error( "writing raw: %s", str(x), exc_info=True ) else: try: self.source.remove(crash_id) except Exception as x: self.config.logger.error( "removing raw: %s", str(x), exc_info=True )
python
{ "resource": "" }
q44498
FetchTransformSaveApp._setup_source_and_destination
train
def _setup_source_and_destination(self): """instantiate the classes that implement the source and destination crash storage systems.""" try: self.source = self.config.source.crashstorage_class( self.config.source, quit_check_callback=self.quit_check ) except Exception: self.config.logger.critical( 'Error in creating crash source', exc_info=True ) raise try: self.destination = self.config.destination.crashstorage_class( self.config.destination, quit_check_callback=self.quit_check ) except Exception: self.config.logger.critical( 'Error in creating crash destination', exc_info=True ) raise
python
{ "resource": "" }
q44499
FetchTransformSaveApp.main
train
def main(self): """this main routine sets up the signal handlers, the source and destination crashstorage systems at the theaded task manager. That starts a flock of threads that are ready to shepherd crashes from the source to the destination.""" self._setup_task_manager() self._setup_source_and_destination() self.task_manager.blocking_start(waiting_func=self.waiting_func) self.close() self.config.logger.info('done.')
python
{ "resource": "" }