_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q37500
URL.qs_add
train
def qs_add(self, *args, **kwargs): '''Add value to QuerySet MultiDict''' query = self.query.copy() if args: mdict = MultiDict(args[0]) for k, v in mdict.items(): query.add(k, v) for k, v in kwargs.items(): query.add(k, v) return self._copy(query=query)
python
{ "resource": "" }
q37501
URL.qs_delete
train
def qs_delete(self, *keys): '''Delete value from QuerySet MultiDict''' query = self.query.copy() for key in set(keys): try: del query[key] except KeyError: pass return self._copy(query=query)
python
{ "resource": "" }
q37502
URL.qs_get
train
def qs_get(self, key, default=None): '''Get a value from QuerySet MultiDict''' return self.query.get(key, default=default)
python
{ "resource": "" }
q37503
warehouse_query
train
def warehouse_query(line, cell): "my cell magic" from IPython import get_ipython parts = line.split() w_var_name = parts.pop(0) w = get_ipython().ev(w_var_name) w.query(cell).close()
python
{ "resource": "" }
q37504
list_product_releases
train
def list_product_releases(page_size=200, page_index=0, sort="", q=""): """ List all ProductReleases """ data = list_product_releases_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data)
python
{ "resource": "" }
q37505
update_release
train
def update_release(id, **kwargs): """ Update an existing ProductRelease with new information """ data = update_release_raw(id, **kwargs) if data: return utils.format_json(data)
python
{ "resource": "" }
q37506
Library.get
train
def get(self, identifier): """get provider by id""" for provider in self._providers: if provider.identifier == identifier: return provider return None
python
{ "resource": "" }
q37507
Library.list_song_standby
train
def list_song_standby(self, song, onlyone=True): """try to list all valid standby Search a song in all providers. The typical usage scenario is when a song is not available in one provider, we can try to acquire it from other providers. Standby choosing strategy: search from all providers, select two song from each provide. Those standby song should have same title and artist name. TODO: maybe we should read a strategy from user config, user knows which provider owns copyright about an artist. FIXME: this method will send several network requests, which may block the caller. :param song: song model :param exclude: exclude providers list :return: list of songs (maximum count: 2) """ def get_score(standby): score = 1 # 分数占比关系: # title + album > artist # artist > title > album if song.artists_name != standby.artists_name: score -= 0.4 if song.title != standby.title: score -= 0.3 if song.album_name != standby.album_name: score -= 0.2 return score valid_sources = [p.identifier for p in self.list() if p.identifier != song.source] q = '{} {}'.format(song.title, song.artists_name) standby_list = [] for result in self.search(q, source_in=valid_sources, limit=10): for standby in result.songs[:2]: standby_list.append(standby) standby_list = sorted( standby_list, key=lambda standby: get_score(standby), reverse=True ) valid_standby_list = [] for standby in standby_list: if standby.url: valid_standby_list.append(standby) if get_score(standby) == 1 or onlyone: break if len(valid_standby_list) >= 2: break return valid_standby_list
python
{ "resource": "" }
q37508
import_class_by_string
train
def import_class_by_string(name): """Return a class by importing its module from a fully qualified string.""" components = name.split('.') clazz = components.pop() mod = __import__('.'.join(components)) components += [clazz] for comp in components[1:]: mod = getattr(mod, comp) return mod
python
{ "resource": "" }
q37509
SimSymbolicDbgMemory.get_unconstrained_bytes
train
def get_unconstrained_bytes(self, name, bits, source=None, key=None, inspect=True, events=True, **kwargs): """ Get some consecutive unconstrained bytes. :param name: Name of the unconstrained variable :param bits: Size of the unconstrained variable :param source: Where those bytes are read from. Currently it is only used in under-constrained symbolic execution so that we can track the allocation depth. :return: The generated variable """ if (self.category == 'mem' and options.CGC_ZERO_FILL_UNCONSTRAINED_MEMORY in self.state.options): # CGC binaries zero-fill the memory for any allocated region # Reference: (https://github.com/CyberGrandChallenge/libcgc/blob/master/allocate.md) return self.state.solver.BVV(0, bits) elif options.SPECIAL_MEMORY_FILL in self.state.options and self.state._special_memory_filler is not None: return self.state._special_memory_filler(name, bits, self.state) else: if options.UNDER_CONSTRAINED_SYMEXEC in self.state.options: if source is not None and type(source) is int: alloc_depth = self.state.uc_manager.get_alloc_depth(source) kwargs['uc_alloc_depth'] = 0 if alloc_depth is None else alloc_depth + 1 r = self.state.solver.Unconstrained(name, bits, key=key, inspect=inspect, events=events, **kwargs) return r
python
{ "resource": "" }
q37510
BuildConfigurationRest.build_type
train
def build_type(self, build_type): """ Sets the build_type of this BuildConfigurationRest. :param build_type: The build_type of this BuildConfigurationRest. :type: str """ allowed_values = ["MVN", "NPM"] if build_type not in allowed_values: raise ValueError( "Invalid value for `build_type` ({0}), must be one of {1}" .format(build_type, allowed_values) ) self._build_type = build_type
python
{ "resource": "" }
q37511
failUnlessWarns
train
def failUnlessWarns(self, category, message, filename, f, *args, **kwargs): """ Fail if the given function doesn't generate the specified warning when called. It calls the function, checks the warning, and forwards the result of the function if everything is fine. @param category: the category of the warning to check. @param message: the output message of the warning to check. @param filename: the filename where the warning should come from. @param f: the function which is supposed to generate the warning. @type f: any callable. @param args: the arguments to C{f}. @param kwargs: the keywords arguments to C{f}. @return: the result of the original function C{f}. """ warningsShown = [] def warnExplicit(*args): warningsShown.append(args) origExplicit = warnings.warn_explicit try: warnings.warn_explicit = warnExplicit result = f(*args, **kwargs) finally: warnings.warn_explicit = origExplicit if not warningsShown: self.fail("No warnings emitted") first = warningsShown[0] for other in warningsShown[1:]: if other[:2] != first[:2]: self.fail("Can't handle different warnings") gotMessage, gotCategory, gotFilename, lineno = first[:4] self.assertEqual(gotMessage, message) self.assertIdentical(gotCategory, category) # Use starts with because of .pyc/.pyo issues. self.failUnless( filename.startswith(gotFilename), 'Warning in %r, expected %r' % (gotFilename, filename)) # It would be nice to be able to check the line number as well, but # different configurations actually end up reporting different line # numbers (generally the variation is only 1 line, but that's enough # to fail the test erroneously...). # self.assertEqual(lineno, xxx) return result
python
{ "resource": "" }
q37512
cases.cases
train
def cases(self, env, data): '''Calls each nested handler until one of them returns nonzero result. If any handler returns `None`, it is interpreted as "request does not match, the handler has nothing to do with it and `web.cases` should try to call the next handler".''' for handler in self.handlers: env._push() data._push() try: result = handler(env, data) finally: env._pop() data._pop() if result is not None: return result
python
{ "resource": "" }
q37513
create_win32tz_map
train
def create_win32tz_map(windows_zones_xml): """Creates a map between Windows and Olson timezone names. Args: windows_zones_xml: The CLDR XML mapping. Yields: (win32_name, olson_name, comment) """ coming_comment = None win32_name = None territory = None parser = genshi.input.XMLParser(StringIO(windows_zones_xml)) map_zones = {} zone_comments = {} for kind, data, _ in parser: if kind == genshi.core.START and str(data[0]) == "mapZone": attrs = data[1] win32_name, territory, olson_name = ( attrs.get("other"), attrs.get("territory"), attrs.get("type").split(" ")[0]) map_zones[(win32_name, territory)] = olson_name elif kind == genshi.core.END and str(data) == "mapZone" and win32_name: if coming_comment: zone_comments[(win32_name, territory)] = coming_comment coming_comment = None win32_name = None elif kind == genshi.core.COMMENT: coming_comment = data.strip() elif kind in (genshi.core.START, genshi.core.END, genshi.core.COMMENT): coming_comment = None for win32_name, territory in sorted(map_zones): yield (win32_name, territory, map_zones[(win32_name, territory)], zone_comments.get((win32_name, territory), None))
python
{ "resource": "" }
q37514
update_stored_win32tz_map
train
def update_stored_win32tz_map(): """Downloads the cldr win32 timezone map and stores it in win32tz_map.py.""" windows_zones_xml = download_cldr_win32tz_map_xml() source_hash = hashlib.md5(windows_zones_xml).hexdigest() if hasattr(windows_zones_xml, "decode"): windows_zones_xml = windows_zones_xml.decode("utf-8") map_zones = create_win32tz_map(windows_zones_xml) map_dir = os.path.dirname(os.path.abspath(__file__)) map_filename = os.path.join(map_dir, "win32tz_map.py") if os.path.exists(map_filename): reload(win32tz_map) current_hash = getattr(win32tz_map, "source_hash", None) if current_hash == source_hash: return False map_file = open(map_filename, "w") comment = "Map between Windows and Olson timezones taken from %s" % ( _CLDR_WINZONES_URL,) comment2 = "Generated automatically from datetime_tz.py" map_file.write("'''%s\n" % comment) map_file.write("%s'''\n" % comment2) map_file.write("source_hash = '%s' # md5 sum of xml source data\n" % ( source_hash)) map_file.write("win32timezones = {\n") for win32_name, territory, olson_name, comment in map_zones: if territory == '001': map_file.write(" %r: %r, # %s\n" % ( str(win32_name), str(olson_name), comment or "")) else: map_file.write(" %r: %r, # %s\n" % ( (str(win32_name), str(territory)), str(olson_name), comment or "")) map_file.write("}\n") map_file.close() return True
python
{ "resource": "" }
q37515
SimDbgMemory.load_objects
train
def load_objects(self, addr, num_bytes, ret_on_segv=False): """ Load memory objects from paged memory. :param addr: Address to start loading. :param num_bytes: Number of bytes to load. :param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise a SimSegfaultError will be raised. :return: list of tuples of (addr, memory_object) :rtype: tuple """ result = [] end = addr + num_bytes for page_addr in self._containing_pages(addr, end): try: #print "Getting page %x" % (page_addr // self._page_size) page = self._get_page(page_addr // self._page_size) #print "... got it" except KeyError: #print "... missing" #print "... SEGV" # missing page if self.allow_segv: if ret_on_segv: break raise SimSegfaultError(addr, 'read-miss') else: continue if self.allow_segv and not page.concrete_permissions & DbgPage.PROT_READ: #print "... SEGV" if ret_on_segv: break raise SimSegfaultError(addr, 'non-readable') result.extend(page.load_slice(self.state, addr, end)) return result
python
{ "resource": "" }
q37516
SimDbgMemory.permissions
train
def permissions(self, addr, permissions=None): """ Returns the permissions for a page at address `addr`. If optional argument permissions is given, set page permissions to that prior to returning permissions. """ if self.state.solver.symbolic(addr): raise SimMemoryError( "page permissions cannot currently be looked up for symbolic addresses") if isinstance(addr, claripy.ast.bv.BV): addr = self.state.solver.eval(addr) page_num = addr // self._page_size try: page = self._get_page(page_num) except KeyError: raise SimMemoryError("page does not exist at given address") # Set permissions for the page if permissions is not None: if isinstance(permissions, (int, long)): permissions = claripy.BVV(permissions, 3) if not isinstance(permissions, claripy.ast.bv.BV): raise SimMemoryError( "Unknown permissions argument type of {0}.".format( type(permissions))) page.permissions = permissions return page.permissions
python
{ "resource": "" }
q37517
call_interval
train
def call_interval(freq, **kwargs): """Decorator for the CallInterval wrapper""" def wrapper(f): return CallInterval(f, freq, **kwargs) return wrapper
python
{ "resource": "" }
q37518
ProgressSection.add
train
def add(self, *args, **kwargs): """Add a new record to the section""" if self.start and self.start.state == 'done' and kwargs.get('log_action') != 'done': raise ProgressLoggingError("Can't add -- process section is done") self.augment_args(args, kwargs) kwargs['log_action'] = kwargs.get('log_action', 'add') rec = Process(**kwargs) self._session.add(rec) self.rec = rec if self._logger: self._logger.info(self.rec.log_str) self._session.commit() self._ai_rec_id = None return self.rec.id
python
{ "resource": "" }
q37519
ProgressSection.update
train
def update(self, *args, **kwargs): """Update the last section record""" self.augment_args(args, kwargs) kwargs['log_action'] = kwargs.get('log_action', 'update') if not self.rec: return self.add(**kwargs) else: for k, v in kwargs.items(): # Don't update object; use whatever was set in the original record if k not in ('source', 's_vid', 'table', 't_vid', 'partition', 'p_vid'): setattr(self.rec, k, v) self._session.merge(self.rec) if self._logger: self._logger.info(self.rec.log_str) self._session.commit() self._ai_rec_id = None return self.rec.id
python
{ "resource": "" }
q37520
ProgressSection.add_update
train
def add_update(self, *args, **kwargs): """A records is added, then on subsequent calls, updated""" if not self._ai_rec_id: self._ai_rec_id = self.add(*args, **kwargs) else: au_save = self._ai_rec_id self.update(*args, **kwargs) self._ai_rec_id = au_save return self._ai_rec_id
python
{ "resource": "" }
q37521
ProgressSection.update_done
train
def update_done(self, *args, **kwargs): """Clear out the previous update""" kwargs['state'] = 'done' self.update(*args, **kwargs) self.rec = None
python
{ "resource": "" }
q37522
ProgressSection.done
train
def done(self, *args, **kwargs): """Mark the whole ProgressSection as done""" kwargs['state'] = 'done' pr_id = self.add(*args, log_action='done', **kwargs) self._session.query(Process).filter(Process.group == self._group).update({Process.state: 'done'}) self.start.state = 'done' self._session.commit() return pr_id
python
{ "resource": "" }
q37523
ProcessLogger.start
train
def start(self, phase, stage, **kwargs): """Start a new routine, stage or phase""" return ProgressSection(self, self._session, phase, stage, self._logger, **kwargs)
python
{ "resource": "" }
q37524
ProcessLogger.clean
train
def clean(self): """Delete all of the records""" # Deleting seems to be really weird and unrelable. self._session \ .query(Process) \ .filter(Process.d_vid == self._d_vid) \ .delete(synchronize_session='fetch') for r in self.records: self._session.delete(r) self._session.commit()
python
{ "resource": "" }
q37525
ProcessLogger.build
train
def build(self): """Access build configuration values as attributes. See self.process for a usage example""" from ambry.orm.config import BuildConfigGroupAccessor # It is a lightweight object, so no need to cache return BuildConfigGroupAccessor(self.dataset, 'buildstate', self._session)
python
{ "resource": "" }
q37526
BuildEnvironmentRest.system_image_type
train
def system_image_type(self, system_image_type): """ Sets the system_image_type of this BuildEnvironmentRest. :param system_image_type: The system_image_type of this BuildEnvironmentRest. :type: str """ allowed_values = ["DOCKER_IMAGE", "VIRTUAL_MACHINE_RAW", "VIRTUAL_MACHINE_QCOW2", "LOCAL_WORKSPACE"] if system_image_type not in allowed_values: raise ValueError( "Invalid value for `system_image_type` ({0}), must be one of {1}" .format(system_image_type, allowed_values) ) self._system_image_type = system_image_type
python
{ "resource": "" }
q37527
ExtDoc.group_by_source
train
def group_by_source(self): """Return a dict of all of the docs, with the source associated with the doc as a key""" from collections import defaultdict docs = defaultdict(list) for k, v in self.items(): if 'source' in v: docs[v.source].append(dict(v.items())) return docs
python
{ "resource": "" }
q37528
FilePath.preauthChild
train
def preauthChild(self, path): """ Use me if `path' might have slashes in it, but you know they're safe. (NOT slashes at the beginning. It still needs to be a _child_). """ newpath = abspath(joinpath(self.path, normpath(path))) if not newpath.startswith(self.path): raise InsecurePath("%s is not a child of %s" % (newpath, self.path)) return self.clonePath(newpath)
python
{ "resource": "" }
q37529
FilePath.childSearchPreauth
train
def childSearchPreauth(self, *paths): """Return my first existing child with a name in 'paths'. paths is expected to be a list of *pre-secured* path fragments; in most cases this will be specified by a system administrator and not an arbitrary user. If no appropriately-named children exist, this will return None. """ p = self.path for child in paths: jp = joinpath(p, child) if exists(jp): return self.clonePath(jp)
python
{ "resource": "" }
q37530
FilePath.siblingExtensionSearch
train
def siblingExtensionSearch(self, *exts): """Attempt to return a path with my name, given multiple possible extensions. Each extension in exts will be tested and the first path which exists will be returned. If no path exists, None will be returned. If '' is in exts, then if the file referred to by this path exists, 'self' will be returned. The extension '*' has a magic meaning, which means "any path that begins with self.path+'.' is acceptable". """ p = self.path for ext in exts: if not ext and self.exists(): return self if ext == '*': basedot = basename(p)+'.' for fn in listdir(dirname(p)): if fn.startswith(basedot): return self.clonePath(joinpath(dirname(p), fn)) p2 = p + ext if exists(p2): return self.clonePath(p2)
python
{ "resource": "" }
q37531
FilePath.globChildren
train
def globChildren(self, pattern): """ Assuming I am representing a directory, return a list of FilePaths representing my children that match the given pattern. """ import glob path = self.path[-1] == '/' and self.path + pattern or slash.join([self.path, pattern]) return map(self.clonePath, glob.glob(path))
python
{ "resource": "" }
q37532
FilePath.create
train
def create(self): """Exclusively create a file, only if this file previously did not exist. """ fdint = os.open(self.path, (os.O_EXCL | os.O_CREAT | os.O_RDWR)) # XXX TODO: 'name' attribute of returned files is not mutable or # settable via fdopen, so this file is slighly less functional than the # one returned from 'open' by default. send a patch to Python... return os.fdopen(fdint, 'w+b')
python
{ "resource": "" }
q37533
FilePath.temporarySibling
train
def temporarySibling(self): """ Create a path naming a temporary sibling of this path in a secure fashion. """ sib = self.parent().child(_secureEnoughString() + self.basename()) sib.requireCreate() return sib
python
{ "resource": "" }
q37534
start
train
def start(): """ Start recording stats. Call this from a benchmark script when your setup is done. Call this at most once. @raise RuntimeError: Raised if the parent process responds with anything other than an acknowledgement of this message. """ os.write(BenchmarkProcess.BACKCHANNEL_OUT, BenchmarkProcess.START) response = util.untilConcludes(os.read, BenchmarkProcess.BACKCHANNEL_IN, 1) if response != BenchmarkProcess.START: raise RuntimeError( "Parent process responded with %r instead of START " % (response,))
python
{ "resource": "" }
q37535
main
train
def main(): """ Run me with the filename of a benchmark script as an argument. I will time it and append the results to a file named output in the current working directory. """ name = sys.argv[1] path = filepath.FilePath('.stat').temporarySibling() path.makedirs() func = makeBenchmarkRunner(path, sys.argv[1:]) try: bench(name, path, func) finally: path.remove()
python
{ "resource": "" }
q37536
BasicProcess.spawn
train
def spawn(cls, executable, args, path, env, spawnProcess=None): """ Run an executable with some arguments in the given working directory with the given environment variables. Returns a Deferred which fires with a two-tuple of (exit status, output list) if the process terminates without timing out or being killed by a signal. Otherwise, the Deferred errbacks with either L{error.TimeoutError} if any 10 minute period passes with no events or L{ProcessDied} if it is killed by a signal. On success, the output list is of two-tuples of (file descriptor, bytes). """ d = defer.Deferred() proto = cls(d, filepath.FilePath(path)) if spawnProcess is None: spawnProcess = reactor.spawnProcess spawnProcess( proto, executable, [executable] + args, path=path, env=env, childFDs={0: 'w', 1: 'r', 2: 'r', cls.BACKCHANNEL_OUT: 'r', cls.BACKCHANNEL_IN: 'w'}) return d
python
{ "resource": "" }
q37537
get
train
def get(orcid_id): """ Get an author based on an ORCID identifier. """ resp = requests.get(ORCID_PUBLIC_BASE_URL + unicode(orcid_id), headers=BASE_HEADERS) json_body = resp.json() return Author(json_body)
python
{ "resource": "" }
q37538
create_product
train
def create_product(name, abbreviation, **kwargs): """ Create a new Product """ data = create_product_raw(name, abbreviation, **kwargs) if data: return utils.format_json(data)
python
{ "resource": "" }
q37539
update_product
train
def update_product(product_id, **kwargs): """ Update a Product with new information """ content = update_product_raw(product_id, **kwargs) if content: return utils.format_json(content)
python
{ "resource": "" }
q37540
get_product
train
def get_product(id=None, name=None): """ Get a specific Product by name or ID """ content = get_product_raw(id, name) if content: return utils.format_json(content)
python
{ "resource": "" }
q37541
list_versions_for_product
train
def list_versions_for_product(id=None, name=None, page_size=200, page_index=0, sort='', q=''): """ List all ProductVersions for a given Product """ content = list_versions_for_product_raw(id, name, page_size, page_index, sort, q) if content: return utils.format_json_list(content)
python
{ "resource": "" }
q37542
list_products
train
def list_products(page_size=200, page_index=0, sort="", q=""): """ List all Products """ content = list_products_raw(page_size, page_index, sort, q) if content: return utils.format_json_list(content)
python
{ "resource": "" }
q37543
DstkGeocoder.geocode
train
def geocode(self): """A Generator that reads from the address generators and returns geocode results. The generator yields ( address, geocode_results, object) """ submit_set = [] data_map = {} for address, o in self.gen: submit_set.append(address) data_map[address] = o if len(submit_set) >= self.submit_size: results = self._send(submit_set) submit_set = [] for k, result in results.items(): o = data_map[k] yield (k, result, o) if len(submit_set) > 0: results = self._send(submit_set) # submit_set = [] for k, result in results.items(): o = data_map[k] yield (k, result, o)
python
{ "resource": "" }
q37544
migrate
train
def migrate(connection, dsn): """ Collects all migrations and applies missed. Args: connection (sqlalchemy connection): """ all_migrations = _get_all_migrations() logger.debug('Collected migrations: {}'.format(all_migrations)) for version, modname in all_migrations: if _is_missed(connection, version) and version <= SCHEMA_VERSION: logger.info('Missed migration: {} migration is missed. Migrating...'.format(version)) module = __import__(modname, fromlist='dummy') # run each migration under its own transaction. This allows us to apply valid migrations # and break on invalid. trans = connection.begin() try: module.Migration().migrate(connection) _update_version(connection, version) trans.commit() except: trans.rollback() logger.error("Failed to migrate '{}' on {} ".format(version, dsn)) raise
python
{ "resource": "" }
q37545
get_stored_version
train
def get_stored_version(connection): """ Returns database version. Args: connection (sqlalchemy connection): Raises: Assuming user_version pragma (sqlite case) and user_version table (postgresql case) exist because they created with the database creation. Returns: int: version of the database. """ if connection.engine.name == 'sqlite': version = connection.execute('PRAGMA user_version').fetchone()[0] if version == 0: raise VersionIsNotStored return version elif connection.engine.name == 'postgresql': try: r = connection\ .execute('SELECT version FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME))\ .fetchone() if not r: raise VersionIsNotStored version = r[0] except ProgrammingError: # This happens when the user_version table doesn't exist raise VersionIsNotStored return version else: raise DatabaseError('Do not know how to get version from {} engine.'.format(connection.engine.name))
python
{ "resource": "" }
q37546
_validate_version
train
def _validate_version(connection, dsn): """ Performs on-the-fly schema updates based on the models version. Raises: DatabaseError: if user uses old sqlite database. """ try: version = get_stored_version(connection) except VersionIsNotStored: logger.debug('Version not stored in the db: assuming new database creation.') version = SCHEMA_VERSION _update_version(connection, version) assert isinstance(version, int) if version > 10 and version < 100: raise DatabaseError('You are trying to open an old SQLite database.') if _migration_required(connection): migrate(connection, dsn)
python
{ "resource": "" }
q37547
_migration_required
train
def _migration_required(connection): """ Returns True if ambry models do not match to db tables. Otherwise returns False. """ stored_version = get_stored_version(connection) actual_version = SCHEMA_VERSION assert isinstance(stored_version, int) assert isinstance(actual_version, int) assert stored_version <= actual_version, \ 'Db version can not be greater than models version. Update your source code.' return stored_version < actual_version
python
{ "resource": "" }
q37548
_update_version
train
def _update_version(connection, version): """ Updates version in the db to the given version. Args: connection (sqlalchemy connection): sqlalchemy session where to update version. version (int): version of the migration. """ if connection.engine.name == 'sqlite': connection.execute('PRAGMA user_version = {}'.format(version)) elif connection.engine.name == 'postgresql': connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_SCHEMA_NAME))) connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_PARTITION_SCHEMA_NAME))) connection.execute('CREATE TABLE IF NOT EXISTS {}.user_version(version INTEGER NOT NULL);' .format(POSTGRES_SCHEMA_NAME)) # upsert. if connection.execute('SELECT * FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME)).fetchone(): # update connection.execute('UPDATE {}.user_version SET version = {};' .format(POSTGRES_SCHEMA_NAME, version)) else: # insert connection.execute('INSERT INTO {}.user_version (version) VALUES ({})' .format(POSTGRES_SCHEMA_NAME, version)) else: raise DatabaseMissingError('Do not know how to migrate {} engine.' .format(connection.engine.driver))
python
{ "resource": "" }
q37549
_get_all_migrations
train
def _get_all_migrations(): """ Returns sorted list of all migrations. Returns: list of (int, str) tuples: first elem of the tuple is migration number, second if module name. """ from . import migrations package = migrations prefix = package.__name__ + '.' all_migrations = [] for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix): version = int(modname.split('.')[-1].split('_')[0]) all_migrations.append((version, modname)) all_migrations = sorted(all_migrations, key=lambda x: x[0]) return all_migrations
python
{ "resource": "" }
q37550
Database.create
train
def create(self): """Create the database from the base SQL.""" if not self.exists(): self._create_path() self.create_tables() return True return False
python
{ "resource": "" }
q37551
Database._create_path
train
def _create_path(self): """Create the path to hold the database, if one wwas specified.""" if self.driver == 'sqlite' and 'memory' not in self.dsn and self.dsn != 'sqlite://': dir_ = os.path.dirname(self.path) if dir_ and not os.path.exists(dir_): try: # Multiple process may try to make, so it could already # exist os.makedirs(dir_) except Exception: pass if not os.path.exists(dir_): raise Exception("Couldn't create directory " + dir_)
python
{ "resource": "" }
q37552
Database.exists
train
def exists(self): """Return True if the database exists, or for Sqlite, which will create the file on the first reference, the file has been initialized with the root config """ if self.driver == 'sqlite' and not os.path.exists(self.path): return False # init engine self.engine try: # Since we are using the connection, rather than the session, need to # explicitly set the search path. from sqlalchemy.engine.reflection import Inspector inspector = Inspector.from_engine(self.engine) if 'config' in inspector.get_table_names(schema=self._schema): return True else: return False finally: self.close_connection()
python
{ "resource": "" }
q37553
Database.engine
train
def engine(self): """return the SqlAlchemy engine for this database.""" if not self._engine: if 'postgres' in self.driver: if 'connect_args' not in self.engine_kwargs: self.engine_kwargs['connect_args'] = { 'application_name': '{}:{}'.format(self._application_prefix, os.getpid()) } # For most use, a small pool is good to prevent connection exhaustion, but these settings may # be too low for the main public web application. self._engine = create_engine(self.dsn, echo=self._echo, pool_size=5, max_overflow=5, **self.engine_kwargs) else: self._engine = create_engine( self.dsn, echo=self._echo, **self.engine_kwargs) # # Disconnect connections that have a different PID from the one they were created in. # This protects against re-use in multi-processing. # @event.listens_for(self._engine, 'connect') def connect(dbapi_connection, connection_record): connection_record.info['pid'] = os.getpid() @event.listens_for(self._engine, 'checkout') def checkout(dbapi_connection, connection_record, connection_proxy): from sqlalchemy.exc import DisconnectionError pid = os.getpid() if connection_record.info['pid'] != pid: connection_record.connection = connection_proxy.connection = None raise DisconnectionError( "Connection record belongs to pid %s, attempting to check out in pid %s" % (connection_record.info['pid'], pid)) if self.driver == 'sqlite': @event.listens_for(self._engine, 'connect') def pragma_on_connect(dbapi_con, con_record): """ISSUE some Sqlite pragmas when the connection is created.""" # dbapi_con.execute('PRAGMA foreign_keys = ON;') # Not clear that there is a performance improvement. # dbapi_con.execute('PRAGMA journal_mode = WAL') dbapi_con.execute('PRAGMA synchronous = OFF') dbapi_con.execute('PRAGMA temp_store = MEMORY') dbapi_con.execute('PRAGMA cache_size = 500000') if self._foreign_keys: dbapi_con.execute('PRAGMA foreign_keys=ON') with self._engine.connect() as conn: _validate_version(conn, self.dsn) return self._engine
python
{ "resource": "" }
q37554
Database.connection
train
def connection(self): """Return an SqlAlchemy connection.""" if not self._connection: logger.debug('Opening connection to: {}'.format(self.dsn)) self._connection = self.engine.connect() logger.debug('Opened connection to: {}'.format(self.dsn)) # logger.debug("Opening connection to: {}".format(self.dsn)) return self._connection
python
{ "resource": "" }
q37555
Database.session
train
def session(self): """Return a SqlAlchemy session.""" from sqlalchemy.orm import sessionmaker from sqlalchemy.event import listen if not self.Session: self.Session = sessionmaker(bind=self.engine) if not self._session: self._session = self.Session() # set the search path if self._schema: def after_begin(session, transaction, connection): # import traceback # print traceback.print_stack() session.execute('SET search_path TO {}'.format(self._schema)) listen(self._session, 'after_begin', after_begin) return self._session
python
{ "resource": "" }
q37556
Database.metadata
train
def metadata(self): """Return an SqlAlchemy MetaData object, bound to the engine.""" from sqlalchemy import MetaData metadata = MetaData(bind=self.engine, schema=self._schema) metadata.reflect(self.engine) return metadata
python
{ "resource": "" }
q37557
Database._add_config_root
train
def _add_config_root(self): """ Adds the root dataset, which holds configuration values for the database. """ try: self.session.query(Dataset).filter_by(id=ROOT_CONFIG_NAME).one() self.close_session() except NoResultFound: o = Dataset( id=ROOT_CONFIG_NAME, vid=ROOT_CONFIG_NAME_V, name=ROOT_CONFIG_NAME, vname=ROOT_CONFIG_NAME_V, fqname='datasetroot-0.0.0~' + ROOT_CONFIG_NAME_V, cache_key=ROOT_CONFIG_NAME, version='0.0.0', source=ROOT_CONFIG_NAME, dataset=ROOT_CONFIG_NAME, revision=1, ) self.session.add(o) self.commit()
python
{ "resource": "" }
q37558
Database.new_dataset
train
def new_dataset(self, *args, **kwargs): """ Creates a new dataset :param args: Positional args passed to the Dataset constructor. :param kwargs: Keyword args passed to the Dataset constructor. :return: :class:`ambry.orm.Dataset` :raises: :class:`ambry.orm.ConflictError` if the a Dataset records already exists with the given vid """ ds = Dataset(*args, **kwargs) try: self.session.add(ds) self.session.commit() ds._database = self return ds except IntegrityError as e: self.session.rollback() raise ConflictError( "Can't create dataset '{}'; one probably already exists: {} ".format(str(ds), e))
python
{ "resource": "" }
q37559
Database.root_dataset
train
def root_dataset(self): """Return the root dataset, which hold configuration values for the library""" ds = self.dataset(ROOT_CONFIG_NAME_V) ds._database = self return ds
python
{ "resource": "" }
q37560
Database.dataset
train
def dataset(self, ref, load_all=False, exception=True): """Return a dataset, given a vid or id :param ref: Vid or id for a dataset. If an id is provided, will it will return the one with the largest revision number :param load_all: Use a query that eagerly loads everything. :return: :class:`ambry.orm.Dataset` """ ref = str(ref) try: ds = self.session.query(Dataset).filter(Dataset.vid == ref).one() except NoResultFound: ds = None if not ds: try: ds = self.session \ .query(Dataset) \ .filter(Dataset.id == ref) \ .order_by(Dataset.revision.desc()) \ .first() except NoResultFound: ds = None if not ds: try: ds = self.session.query(Dataset).filter(Dataset.vname == ref).one() except NoResultFound: ds = None if not ds: try: ds = self.session \ .query(Dataset) \ .filter(Dataset.name == ref) \ .order_by(Dataset.revision.desc()) \ .first() except NoResultFound: ds = None if ds: ds._database = self return ds elif exception: raise NotFoundError('No dataset in library for vid : {} '.format(ref)) else: return None
python
{ "resource": "" }
q37561
BaseMigration.create_table
train
def create_table(table, connection, schema=None): """Create a single table, primarily used din migrations""" orig_schemas = {} # These schema shenanigans are almost certainly wrong. # But they are expedient. For Postgres, it puts the library # tables in the Library schema. We need to change the schema for all tables in case # the table we are creating references another table if schema: connection.execute("SET search_path TO {}".format(schema)) for table in ALL_TABLES: orig_schemas[table.__table__] = table.__table__.schema table.__table__.schema = schema table.__table__.create(bind=connection.engine) # We have to put the schemas back because when installing to a warehouse. # the same library classes can be used to access a Sqlite database, which # does not handle schemas. if schema: for it, orig_schema in list(orig_schemas.items()): it.schema = orig_schema
python
{ "resource": "" }
q37562
return_locals
train
def return_locals(func): '''Modifies decorated function to return its locals''' @functools.wraps(func) def wrap(*args, **kwargs): frames = [] def tracer(frame, event, arg): # pragma: no cover # coverage does not work in this function because the tracer # is deactivated here frames.append(frame) sys.settrace(old_tracer) if old_tracer is not None: return old_tracer(frame, event, arg) old_tracer = sys.gettrace() # tracer is activated on next call, return or exception sys.settrace(tracer) try: func(*args, **kwargs) finally: sys.settrace(old_tracer) assert len(frames) == 1 argspec = inspect.getargspec(func) argnames = list(argspec.args) if argspec.varargs is not None: argnames.append(argspec.varargs) if argspec.keywords is not None: argnames.append(argspec.keywords) return {name: value for name, value in frames.pop(0).f_locals.items() if name not in argnames} return wrap
python
{ "resource": "" }
q37563
generate_repo_list
train
def generate_repo_list(product_name=None, product_version=None, product_milestone=None): """ Generates list of artifacts for offline repository. """ if not validate_input_parameters(product_name, product_version, product_milestone): sys.exit(1) product_version = pnc_api.product_versions.get_all(q="version=='"+ product_version + "';product.name=='"+product_name+"'") if not product_version.content: logging.error('Specified product version not found.') sys.exit(1) product_version_id = product_version.content[0].id milestone = pnc_api.product_milestones.get_all(q="version=='"+ product_milestone + "';productVersion.id=='"+str(product_version_id)+"'") if not milestone.content: logging.error('Specified milestone not found.') sys.exit(1) milestone_id = milestone.content[0].id builds = get_all_successful_builds(milestone_id) if not builds: logging.warning('No builds performed in the milestone.') return for build in builds: built_artifacts = get_all_artifacts(build.id) for artifact in built_artifacts: print(artifact.identifier)
python
{ "resource": "" }
q37564
DelayedCall.cancel
train
def cancel(self): """Unschedule this call @raise AlreadyCancelled: Raised if this call has already been unscheduled. @raise AlreadyCalled: Raised if this call has already been made. """ if self.cancelled: raise error.AlreadyCancelled elif self.called: raise error.AlreadyCalled else: self.canceller(self) self.cancelled = 1 if self.debug: self._str = str(self) del self.func, self.args, self.kw
python
{ "resource": "" }
q37565
DelayedCall.reset
train
def reset(self, secondsFromNow): """Reschedule this call for a different time @type secondsFromNow: C{float} @param secondsFromNow: The number of seconds from the time of the C{reset} call at which this call will be scheduled. @raise AlreadyCancelled: Raised if this call has been cancelled. @raise AlreadyCalled: Raised if this call has already been made. """ if self.cancelled: raise error.AlreadyCancelled elif self.called: raise error.AlreadyCalled else: if self.seconds is None: new_time = base.seconds() + secondsFromNow else: new_time = self.seconds() + secondsFromNow if new_time < self.time: self.delayed_time = 0 self.time = new_time self.resetter(self) else: self.delayed_time = new_time - self.time
python
{ "resource": "" }
q37566
DelayedCall.delay
train
def delay(self, secondsLater): """Reschedule this call for a later time @type secondsLater: C{float} @param secondsLater: The number of seconds after the originally scheduled time for which to reschedule this call. @raise AlreadyCancelled: Raised if this call has been cancelled. @raise AlreadyCalled: Raised if this call has already been made. """ if self.cancelled: raise error.AlreadyCancelled elif self.called: raise error.AlreadyCalled else: self.delayed_time += secondsLater if self.delayed_time < 0: self.activate_delay() self.resetter(self)
python
{ "resource": "" }
q37567
main
train
def main(): """ Start the AMP server and the reactor. """ startLogging(stdout) checker = InMemoryUsernamePasswordDatabaseDontUse() checker.addUser("testuser", "examplepass") realm = AdditionRealm() factory = CredAMPServerFactory(Portal(realm, [checker])) reactor.listenTCP(7805, factory) reactor.run()
python
{ "resource": "" }
q37568
LandingPageDetailView.set_meta
train
def set_meta(self, instance): """ Set django-meta stuff from LandingPageModel instance. """ self.use_title_tag = True self.title = instance.title
python
{ "resource": "" }
q37569
FieldPerm.check
train
def check(self, field): ''' Returns permissions determined by object itself ''' if self.permissions is None: return field.parent.permissions return self.permissions
python
{ "resource": "" }
q37570
View._load_view
train
def _load_view(self, template_engine_name, template_dir): """ Load view by name and return an instance. """ file_name = template_engine_name.lower() class_name = "{}View".format(template_engine_name.title()) try: view_module = import_module("rails.views.{}".format(file_name)) except ImportError: raise Exception("Template engine '{}' not found in 'rails.views'".format(file_name)) view_class = getattr(view_module, class_name) return view_class(template_dir)
python
{ "resource": "" }
q37571
terminate
train
def terminate(pid, sig, timeout): '''Terminates process with PID `pid` and returns True if process finished during `timeout`. Current user must have permission to access process information.''' os.kill(pid, sig) start = time.time() while True: try: # This is requireed if it's our child to avoid zombie. Also # is_running() returns True for zombie process. _, status = os.waitpid(pid, os.WNOHANG) except OSError as exc: if exc.errno != errno.ECHILD: # pragma: nocover raise else: if status: return True if not is_running(pid): return True if time.time()-start>=timeout: return False time.sleep(0.1)
python
{ "resource": "" }
q37572
doublefork
train
def doublefork(pidfile, logfile, cwd, umask): # pragma: nocover '''Daemonize current process. After first fork we return to the shell and removing our self from controling terminal via `setsid`. After second fork we are not session leader any more and cant get controlling terminal when opening files.''' try: if os.fork(): os._exit(os.EX_OK) except OSError as e: sys.exit('fork #1 failed: ({}) {}'.format(e.errno, e.strerror)) os.setsid() os.chdir(cwd) os.umask(umask) try: if os.fork(): os._exit(os.EX_OK) except OSError as e: sys.exit('fork #2 failed: ({}) {}'.format(e.errno, e.strerror)) if logfile is not None: si = open('/dev/null') if six.PY2: so = open(logfile, 'a+', 0) else: so = io.open(logfile, 'ab+', 0) so = io.TextIOWrapper(so, write_through=True, encoding="utf-8") os.dup2(si.fileno(), 0) os.dup2(so.fileno(), 1) os.dup2(so.fileno(), 2) sys.stdin = si sys.stdout = sys.stderr = so with open(pidfile, 'w') as f: f.write(str(os.getpid()))
python
{ "resource": "" }
q37573
Partitions.partition
train
def partition(self, id_): """Get a partition by the id number. Arguments: id_ -- a partition id value Returns: A partitions.Partition object Throws: a Sqlalchemy exception if the partition either does not exist or is not unique Because this method works on the bundle, the id_ ( without version information ) is equivalent to the vid ( with version information ) """ from ..orm import Partition as OrmPartition from sqlalchemy import or_ from ..identity import PartialPartitionName if isinstance(id_, PartitionIdentity): id_ = id_.id_ elif isinstance(id_, PartialPartitionName): id_ = id_.promote(self.bundle.identity.name) session = self.bundle.dataset._database.session q = session\ .query(OrmPartition)\ .filter(OrmPartition.d_vid == self.bundle.dataset.vid)\ .filter(or_(OrmPartition.id == str(id_).encode('ascii'), OrmPartition.vid == str(id_).encode('ascii'))) try: orm_partition = q.one() return self.bundle.wrap_partition(orm_partition) except NoResultFound: orm_partition = None if not orm_partition: q = session\ .query(OrmPartition)\ .filter(OrmPartition.d_vid == self.bundle.dataset.vid)\ .filter(OrmPartition.name == str(id_).encode('ascii')) try: orm_partition = q.one() return self.bundle.wrap_partition(orm_partition) except NoResultFound: orm_partition = None return orm_partition
python
{ "resource": "" }
q37574
Partitions._find_orm
train
def _find_orm(self, pnq): """Return a Partition object from the database based on a PartitionId. An ORM object is returned, so changes can be persisted. """ # import sqlalchemy.orm.exc from ambry.orm import Partition as OrmPartition # , Table from sqlalchemy.orm import joinedload # , joinedload_all assert isinstance(pnq, PartitionNameQuery), "Expected PartitionNameQuery, got {}".format(type(pnq)) pnq = pnq.with_none() q = self.bundle.dataset._database.session.query(OrmPartition) if pnq.fqname is not NameQuery.ANY: q = q.filter(OrmPartition.fqname == pnq.fqname) elif pnq.vname is not NameQuery.ANY: q = q.filter(OrmPartition.vname == pnq.vname) elif pnq.name is not NameQuery.ANY: q = q.filter(OrmPartition.name == str(pnq.name)) else: if pnq.time is not NameQuery.ANY: q = q.filter(OrmPartition.time == pnq.time) if pnq.space is not NameQuery.ANY: q = q.filter(OrmPartition.space == pnq.space) if pnq.grain is not NameQuery.ANY: q = q.filter(OrmPartition.grain == pnq.grain) if pnq.format is not NameQuery.ANY: q = q.filter(OrmPartition.format == pnq.format) if pnq.segment is not NameQuery.ANY: q = q.filter(OrmPartition.segment == pnq.segment) if pnq.table is not NameQuery.ANY: if pnq.table is None: q = q.filter(OrmPartition.t_id is None) else: tr = self.bundle.table(pnq.table) if not tr: raise ValueError("Didn't find table named {} in {} bundle path = {}".format( pnq.table, pnq.vname, self.bundle.database.path)) q = q.filter(OrmPartition.t_vid == tr.vid) ds = self.bundle.dataset q = q.filter(OrmPartition.d_vid == ds.vid) q = q.order_by( OrmPartition.vid.asc()).order_by( OrmPartition.segment.asc()) q = q.options(joinedload(OrmPartition.table)) return q
python
{ "resource": "" }
q37575
Partitions.new_db_from_pandas
train
def new_db_from_pandas(self, frame, table=None, data=None, load=True, **kwargs): """Create a new db partition from a pandas data frame. If the table does not exist, it will be created """ from ..orm import Column # from dbexceptions import ConfigurationError # Create the table from the information in the data frame. with self.bundle.session: sch = self.bundle.schema t = sch.new_table(table) if frame.index.name: id_name = frame.index.name else: id_name = 'id' sch.add_column(t, id_name, datatype=Column.convert_numpy_type(frame.index.dtype), is_primary_key=True) for name, type_ in zip([row for row in frame.columns], [row for row in frame.convert_objects(convert_numeric=True, convert_dates=True).dtypes]): sch.add_column(t, name, datatype=Column.convert_numpy_type(type_)) sch.write_schema() p = self.new_partition(table=table, data=data, **kwargs) if load: pk_name = frame.index.name with p.inserter(table) as ins: for i, row in frame.iterrows(): d = dict(row) d[pk_name] = i ins.insert(d) return p
python
{ "resource": "" }
q37576
update_project
train
def update_project(id, **kwargs): """ Update an existing Project with new information """ content = update_project_raw(id, **kwargs) if content: return utils.format_json(content)
python
{ "resource": "" }
q37577
get_project
train
def get_project(id=None, name=None): """ Get a specific Project by ID or name """ content = get_project_raw(id, name) if content: return utils.format_json(content)
python
{ "resource": "" }
q37578
delete_project
train
def delete_project(id=None, name=None): """ Delete a Project by ID or name. """ content = delete_project_raw(id, name) if content: return utils.format_json(content)
python
{ "resource": "" }
q37579
list_projects
train
def list_projects(page_size=200, page_index=0, sort="", q=""): """ List all Projects """ content = list_projects_raw(page_size=page_size, page_index=page_index, sort=sort, q=q) if content: return utils.format_json_list(content)
python
{ "resource": "" }
q37580
BaseSession.set_handler
train
def set_handler(self, handler): """ Set transport handler @param handler: Handler, should derive from the C{sockjs.cyclone.transports.base.BaseTransportMixin} """ if self.handler is not None: raise Exception('Attempted to overwrite BaseSession handler') self.handler = handler self.transport_name = self.handler.name if self.conn_info is None: self.conn_info = handler.get_conn_info() self.stats.sessionOpened(self.transport_name) return True
python
{ "resource": "" }
q37581
BaseSession.delayed_close
train
def delayed_close(self): """ Delayed close - won't close immediately, but on the next reactor loop. """ self.state = SESSION_STATE.CLOSING reactor.callLater(0, self.close)
python
{ "resource": "" }
q37582
BaseSession.is_closed
train
def is_closed(self): """ Check if session was closed. """ return (self.state == SESSION_STATE.CLOSED or self.state == SESSION_STATE.CLOSING)
python
{ "resource": "" }
q37583
SessionMixin._random_key
train
def _random_key(self): """ Return random session key """ hashstr = '%s%s' % (random.random(), self.time_module.time()) return hashlib.md5(hashstr).hexdigest()
python
{ "resource": "" }
q37584
SessionMixin.promote
train
def promote(self): """ Mark object as alive, so it won't be collected during next run of the garbage collector. """ if self.expiry is not None: self.promoted = self.time_module.time() + self.expiry
python
{ "resource": "" }
q37585
Session.close
train
def close(self, code=3000, message='Go away!'): """ Close session. @param code: Closing code @param message: Closing message """ if self.state != SESSION_STATE.CLOSED: # Notify handler if self.handler is not None: self.handler.send_pack(proto.disconnect(code, message)) super(Session, self).close(code, message)
python
{ "resource": "" }
q37586
CensusStateGeoid.parser
train
def parser(cls, v): """Ensure that the upstream parser gets two digits. """ return geoid.census.State.parse(str(v).zfill(2))
python
{ "resource": "" }
q37587
ConfigReader.get_dependency_structure
train
def get_dependency_structure(self, artifact=None, include_dependencies=False): """ Reads dependency structure. If an artifact is passed in you get only its dependencies otherwise the complete structure is returned. :param artifact: an artifact task or artifact name if only an artifact's deps are needed :param include_dependencies: flag to include also dependencies in returned artifacts and their dependencies in dependencies dict :return: tuple of artifact names list and dependencies dictionary where value is a Task list """ artifacts = [] dependencies_dict = {} if artifact: if isinstance(artifact, str): artifact = self.get_tasks().get_task(artifact) artifacts.append(artifact.name) dependencies_dict[artifact.name] = artifact.ordered_dependencies() if include_dependencies: for dep in dependencies_dict[artifact.name]: artifacts.append(dep.name) dependencies_dict[dep.name] = dep.ordered_dependencies() else: for key, task in self.get_tasks().tasks.iteritems(): artifacts.append(task.name) dependencies_dict[task.name] = task.ordered_dependencies() return artifacts, dependencies_dict
python
{ "resource": "" }
q37588
transform_generator
train
def transform_generator(fn): """A decorator that marks transform pipes that should be called to create the real transform""" if six.PY2: fn.func_dict['is_transform_generator'] = True else: # py3 fn.__dict__['is_transform_generator'] = True return fn
python
{ "resource": "" }
q37589
is_transform_generator
train
def is_transform_generator(fn): """Return true of the function has been marked with @transform_generator""" try: if six.PY2: fn.func_dict['is_transform_generator'] = True else: # py3 return fn.__dict__.get('is_transform_generator', False) except AttributeError: return False
python
{ "resource": "" }
q37590
nullify
train
def nullify(v): """Convert empty strings and strings with only spaces to None values. """ if isinstance(v, six.string_types): v = v.strip() if v is None or v == '': return None else: return v
python
{ "resource": "" }
q37591
parse_int
train
def parse_int(v, header_d): """Parse as an integer, or a subclass of Int.""" v = nullify(v) if v is None: return None try: # The converson to float allows converting float strings to ints. # The conversion int('2.134') will fail. return int(round(float(v), 0)) except (TypeError, ValueError) as e: raise CastingError(int, header_d, v, 'Failed to cast to integer')
python
{ "resource": "" }
q37592
_parse_text
train
def _parse_text(v, header_d): """ Parses unicode. Note: unicode types for py2 and str types for py3. """ v = nullify(v) if v is None: return None try: return six.text_type(v).strip() except Exception as e: raise CastingError(six.text_type, header_d, v, str(e))
python
{ "resource": "" }
q37593
_parse_binary
train
def _parse_binary(v, header_d): """ Parses binary string. Note: <str> for py2 and <binary> for py3. """ # This is often a no-op, but it ocassionally converts numbers into strings v = nullify(v) if v is None: return None if six.PY2: try: return six.binary_type(v).strip() except UnicodeEncodeError: return six.text_type(v).strip() else: # py3 try: return six.binary_type(v, 'utf-8').strip() except UnicodeEncodeError: return six.text_type(v).strip()
python
{ "resource": "" }
q37594
parseJuiceHeaders
train
def parseJuiceHeaders(lines): """ Create a JuiceBox from a list of header lines. @param lines: a list of lines. """ b = JuiceBox() bodylen = 0 key = None for L in lines: if L[0] == ' ': # continuation assert key is not None b[key] += '\r\n'+L[1:] continue parts = L.split(': ', 1) if len(parts) != 2: raise MalformedJuiceBox("Wrong number of parts: %r" % (L,)) key, value = parts key = normalizeKey(key) b[key] = value return int(b.pop(LENGTH, 0)), b
python
{ "resource": "" }
q37595
DispatchMixin.lookupFunction
train
def lookupFunction(self, proto, name, namespace): """Return a callable to invoke when executing the named command. """ # Try to find a method to be invoked in a transaction first # Otherwise fallback to a "regular" method fName = self.autoDispatchPrefix + name fObj = getattr(self, fName, None) if fObj is not None: # pass the namespace along return self._auto(fObj, proto, namespace) assert namespace is None, 'Old-style parsing' # Fall back to simplistic command dispatching - we probably want to get # rid of this eventually, there's no reason to do extra work and write # fewer docs all the time. fName = self.baseDispatchPrefix + name return getattr(self, fName, None)
python
{ "resource": "" }
q37596
Juice._switchTo
train
def _switchTo(self, newProto, clientFactory=None): """ Switch this Juice instance to a new protocol. You need to do this 'simultaneously' on both ends of a connection; the easiest way to do this is to use a subclass of ProtocolSwitchCommand. """ assert self.innerProtocol is None, "Protocol can only be safely switched once." self.setRawMode() self.innerProtocol = newProto self.innerProtocolClientFactory = clientFactory newProto.makeConnection(self.transport)
python
{ "resource": "" }
q37597
Juice.sendPacket
train
def sendPacket(self, completeBox): """ Send a juice.Box to my peer. Note: transport.write is never called outside of this method. """ assert not self.__locked, "You cannot send juice packets when a connection is locked" if self._startingTLSBuffer is not None: self._startingTLSBuffer.append(completeBox) else: if debug: log.msg("Juice send: %s" % pprint.pformat(dict(completeBox.iteritems()))) self.transport.write(completeBox.serialize())
python
{ "resource": "" }
q37598
Plot.dataframe
train
def dataframe(self, filtered_dims={}, unstack=False, df_class=None, add_code=False): """ Yield rows in a reduced format, with one dimension as an index, one measure column per secondary dimension, and all other dimensions filtered. :param measure: The column names of one or more measures :param p_dim: The primary dimension. This will be the index of the dataframe. :param s_dim: a secondary dimension. The returned frame will be unstacked on this dimension :param unstack: :param filtered_dims: A dict of dimension columns names that are filtered, mapped to the dimension value to select. :param add_code: When substitution a label for a column, also add the code value. :return: """ measure = self.table.column(measure) p_dim = self.table.column(p_dim) assert measure assert p_dim if s_dim: s_dim = self.table.column(s_dim) from six import text_type def maybe_quote(v): from six import string_types if isinstance(v, string_types): return '"{}"'.format(v) else: return v all_dims = [p_dim.name] + filtered_dims.keys() if s_dim: all_dims.append(s_dim.name) if filtered_dims: all_dims += filtered_dims.keys() all_dims = [text_type(c) for c in all_dims] # "primary_dimensions" means something different here, all of the dimensions in the # dataset that do not have children. primary_dims = [text_type(c.name) for c in self.primary_dimensions] if set(all_dims) != set(primary_dims): raise ValueError("The primary, secondary and filtered dimensions must cover all dimensions" + " {} != {}".format(sorted(all_dims), sorted(primary_dims))) columns = [] p_dim_label = None s_dim_label = None if p_dim.label: # For geographic datasets, also need the gvid if p_dim.type_is_gvid: columns.append(p_dim.name) p_dim = p_dim_label = p_dim.label columns.append(p_dim_label.name) else: columns.append(p_dim.name) if s_dim: if s_dim.label: s_dim = s_dim_label = s_dim.label columns.append(s_dim_label.name) else: columns.append(s_dim.name) columns.append(measure.name) # Create the predicate to filter out the filtered dimensions if filtered_dims: code = ' and '.join("row.{} == {}".format(k, maybe_quote(v)) for k, v in filtered_dims.items()) predicate = eval('lambda row: {}'.format(code)) else: predicate = lambda row: True df = self.analysis.dataframe(predicate, columns=columns, df_class=df_class) if unstack: # Need to set the s_dim in the index to get a hierarchical index, required for unstacking. # The final df will have only the p_dim as an index. if s_dim: df = df.set_index([p_dim.name, s_dim.name]) df = df.unstack() df.columns = df.columns.get_level_values(1) # [' '.join(col).strip() for col in df.columns.values] else: # Can't actually unstack without a second dimension. df = df.set_index(p_dim.name) df.reset_index() return df
python
{ "resource": "" }
q37599
DatasetSQLiteIndex._index_document
train
def _index_document(self, document, force=False): """ Adds document to the index. """ query = text(""" INSERT INTO dataset_index(vid, title, keywords, doc) VALUES(:vid, :title, :keywords, :doc); """) self.backend.library.database.connection.execute(query, **document)
python
{ "resource": "" }