_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q40500
Protocol._filter_attrs
train
def _filter_attrs(self, feature, request): """ Remove some attributes from the feature and set the geometry to None in the feature based ``attrs`` and the ``no_geom`` parameters. """ if 'attrs' in request.params: attrs = request.params['attrs'].split(',') props = feature.properties new_props = {} for name in attrs: if name in props: new_props[name] = props[name] feature.properties = new_props if asbool(request.params.get('no_geom', False)): feature.geometry = None return feature
python
{ "resource": "" }
q40501
Protocol._get_order_by
train
def _get_order_by(self, request): """ Return an SA order_by """ attr = request.params.get('sort', request.params.get('order_by')) if attr is None or not hasattr(self.mapped_class, attr): return None if request.params.get('dir', '').upper() == 'DESC': return desc(getattr(self.mapped_class, attr)) else: return asc(getattr(self.mapped_class, attr))
python
{ "resource": "" }
q40502
Protocol._query
train
def _query(self, request, filter=None): """ Build a query based on the filter and the request params, and send the query to the database. """ limit = None offset = None if 'maxfeatures' in request.params: limit = int(request.params['maxfeatures']) if 'limit' in request.params: limit = int(request.params['limit']) if 'offset' in request.params: offset = int(request.params['offset']) if filter is None: filter = create_filter(request, self.mapped_class, self.geom_attr) query = self.Session().query(self.mapped_class) if filter is not None: query = query.filter(filter) order_by = self._get_order_by(request) if order_by is not None: query = query.order_by(order_by) query = query.limit(limit).offset(offset) return query.all()
python
{ "resource": "" }
q40503
Protocol.count
train
def count(self, request, filter=None): """ Return the number of records matching the given filter. """ if filter is None: filter = create_filter(request, self.mapped_class, self.geom_attr) query = self.Session().query(self.mapped_class) if filter is not None: query = query.filter(filter) return query.count()
python
{ "resource": "" }
q40504
Protocol.read
train
def read(self, request, filter=None, id=None): """ Build a query based on the filter or the idenfier, send the query to the database, and return a Feature or a FeatureCollection. """ ret = None if id is not None: o = self.Session().query(self.mapped_class).get(id) if o is None: return HTTPNotFound() # FIXME: we return a Feature here, not a mapped object, do # we really want that? ret = self._filter_attrs(o.__geo_interface__, request) else: objs = self._query(request, filter) ret = FeatureCollection( [self._filter_attrs(o.__geo_interface__, request) for o in objs if o is not None]) return ret
python
{ "resource": "" }
q40505
Protocol.create
train
def create(self, request): """ Read the GeoJSON feature collection from the request body and create new objects in the database. """ if self.readonly: return HTTPMethodNotAllowed(headers={'Allow': 'GET, HEAD'}) collection = loads(request.body, object_hook=GeoJSON.to_instance) if not isinstance(collection, FeatureCollection): return HTTPBadRequest() session = self.Session() objects = [] for feature in collection.features: create = False obj = None if hasattr(feature, 'id') and feature.id is not None: obj = session.query(self.mapped_class).get(feature.id) if self.before_create is not None: self.before_create(request, feature, obj) if obj is None: obj = self.mapped_class(feature) create = True else: obj.__update__(feature) if create: session.add(obj) objects.append(obj) session.flush() collection = FeatureCollection(objects) if len(objects) > 0 else None request.response.status_int = 201 return collection
python
{ "resource": "" }
q40506
Protocol.update
train
def update(self, request, id): """ Read the GeoJSON feature from the request body and update the corresponding object in the database. """ if self.readonly: return HTTPMethodNotAllowed(headers={'Allow': 'GET, HEAD'}) session = self.Session() obj = session.query(self.mapped_class).get(id) if obj is None: return HTTPNotFound() feature = loads(request.body, object_hook=GeoJSON.to_instance) if not isinstance(feature, Feature): return HTTPBadRequest() if self.before_update is not None: self.before_update(request, feature, obj) obj.__update__(feature) session.flush() request.response.status_int = 200 return obj
python
{ "resource": "" }
q40507
Protocol.delete
train
def delete(self, request, id): """ Remove the targeted feature from the database """ if self.readonly: return HTTPMethodNotAllowed(headers={'Allow': 'GET, HEAD'}) session = self.Session() obj = session.query(self.mapped_class).get(id) if obj is None: return HTTPNotFound() if self.before_delete is not None: self.before_delete(request, obj) session.delete(obj) return Response(status_int=204)
python
{ "resource": "" }
q40508
uuid_from_kronos_time
train
def uuid_from_kronos_time(time, _type=UUIDType.RANDOM): """ Generate a UUID with the specified time. If `lowest` is true, return the lexicographically first UUID for the specified time. """ return timeuuid_from_time(int(time) + UUID_TIME_OFFSET, type=_type)
python
{ "resource": "" }
q40509
PluginsController.by
train
def by(self, technology): """ Get the plugins registered in PedalPi by technology :param PluginTechnology technology: PluginTechnology identifier """ if technology == PluginTechnology.LV2 \ or str(technology).upper() == PluginTechnology.LV2.value.upper(): return self.lv2_builder.all else: return []
python
{ "resource": "" }
q40510
PluginsController.reload_lv2_plugins_data
train
def reload_lv2_plugins_data(self): """ Search for LV2 audio plugins in the system and extract the metadata needed by pluginsmanager to generate audio plugins. """ plugins_data = self.lv2_builder.lv2_plugins_data() self._dao.save(plugins_data)
python
{ "resource": "" }
q40511
MultiDict.invert
train
def invert(self): ''' Invert by swapping each value with its key. Returns ------- MultiDict Inverted multi-dict. Examples -------- >>> MultiDict({1: {1}, 2: {1,2,3}}, 4: {}).invert() MultiDict({1: {1,2}, 2: {2}, 3: {2}}) ''' result = defaultdict(set) for k, val in self.items(): result[val].add(k) return MultiDict(dict(result))
python
{ "resource": "" }
q40512
_send_with_auth
train
def _send_with_auth(values, secret_key, url): """Send dictionary of JSON serializable `values` as a POST body to `url` along with `auth_token` that's generated from `secret_key` and `values` scheduler.auth.create_token expects a JSON serializable payload, so we send a dictionary. On the receiving end of the POST request, the Flask view will have access to a werkzeug.datastructures.ImmutableMultiDict. The easiest and most surefire way to ensure that the payload sent to create_token will be consistent on both ends is to generate an ImmutableMultiDict using the werkzeug.Request. """ data = urllib.urlencode(values) # Simulate a Flask request because that is what will be unpacked when the # request is received on the other side request = Request.from_values( content_length=len(data), input_stream=StringIO(data), content_type='application/x-www-form-urlencoded', method='POST') # Add the auth_token, re-encode, and send values['auth_token'] = create_token(secret_key, dict(request.form)) data = urllib.urlencode(values) req = urllib2.Request(url, data) response = urllib2.urlopen(req) return json.loads(response.read())
python
{ "resource": "" }
q40513
schedule
train
def schedule(code, interval, secret_key=None, url=None): """Schedule a string of `code` to be executed every `interval` Specificying an `interval` of 0 indicates the event should only be run one time and will not be rescheduled. """ if not secret_key: secret_key = default_key() if not url: url = default_url() url = '%s/schedule' % url values = { 'interval': interval, 'code': code, } return _send_with_auth(values, secret_key, url)
python
{ "resource": "" }
q40514
cancel
train
def cancel(task_id, secret_key=None, url=None): """Cancel scheduled task with `task_id`""" if not secret_key: secret_key = default_key() if not url: url = default_url() url = '%s/cancel' % url values = { 'id': task_id, } return _send_with_auth(values, secret_key, url)
python
{ "resource": "" }
q40515
Teams.get
train
def get(cls, session, team_id): """Return a specific team. Args: session (requests.sessions.Session): Authenticated session. team_id (int): The ID of the team to get. Returns: helpscout.models.Person: A person singleton representing the team, if existing. Otherwise ``None``. """ return cls( '/teams/%d.json' % team_id, singleton=True, session=session, )
python
{ "resource": "" }
q40516
Teams.get_members
train
def get_members(cls, session, team_or_id): """List the members for the team. Args: team_or_id (helpscout.models.Person or int): Team or the ID of the team to get the folders for. Returns: RequestPaginator(output_type=helpscout.models.Users): Users iterator. """ if isinstance(team_or_id, Person): team_or_id = team_or_id.id return cls( '/teams/%d/members.json' % team_or_id, session=session, out_type=User, )
python
{ "resource": "" }
q40517
write_mzxml
train
def write_mzxml(filename, df, info=None, precision='f'): """ Precision is either f or d. """ for r in df.values: df.columns pass
python
{ "resource": "" }
q40518
MzML.read_binary
train
def read_binary(self, ba, param_groups=None): """ ba - binaryDataArray XML node """ if ba is None: return [] pgr = ba.find('m:referenceableParamGroupRef', namespaces=self.ns) if pgr is not None and param_groups is not None: q = 'm:referenceableParamGroup[@id="' + pgr.get('ref') + '"]' pg = param_groups.find(q, namespaces=self.ns) else: pg = ba if pg.find('m:cvParam[@accession="MS:1000574"]', namespaces=self.ns) is not None: compress = True elif pg.find('m:cvParam[@accession="MS:1000576"]', namespaces=self.ns) is not None: compress = False else: # TODO: no info? should check the other record? pass if pg.find('m:cvParam[@accession="MS:1000521"]', namespaces=self.ns) is not None: dtype = 'f' elif pg.find('m:cvParam[@accession="MS:1000523"]', namespaces=self.ns) is not None: dtype = 'd' else: # TODO: no info? should check the other record? pass datatext = ba.find('m:binary', namespaces=self.ns).text if compress: rawdata = zlib.decompress(base64.b64decode(datatext)) else: rawdata = base64.b64decode(datatext) return np.fromstring(rawdata, dtype=dtype)
python
{ "resource": "" }
q40519
pretty_memory_info
train
def pretty_memory_info(): ''' Pretty format memory info. Returns ------- str Memory info. Examples -------- >>> pretty_memory_info() '5MB memory usage' ''' process = psutil.Process(os.getpid()) return '{}MB memory usage'.format(int(process.memory_info().rss / 2**20))
python
{ "resource": "" }
q40520
invert
train
def invert(series): ''' Swap index with values of series. Parameters ---------- series : ~pandas.Series Series to swap on, must have a name. Returns ------- ~pandas.Series Series after swap. See also -------- pandas.Series.map Joins series ``a -> b`` and ``b -> c`` into ``a -> c``. ''' df = series.reset_index() #TODO alt is to to_frame and then use som dataframe methods df.set_index(series.name, inplace=True) return df[df.columns[0]]
python
{ "resource": "" }
q40521
split
train
def split(series): ''' Split values. The index is dropped, but this may change in the future. Parameters ---------- series : ~pandas.Series[~pytil.numpy.ArrayLike] Series with array-like values. Returns ------- ~pandas.Series Series with values split across rows. Examples -------- >>> series = pd.Series([[1,2],[1,2],[3,4,5]]) >>> series 0 [1, 2] 1 [1, 2] 2 [3, 4, 5] dtype: object >>> split(series) 0 1 1 2 2 1 3 2 4 3 5 4 6 5 dtype: object ''' s = df_.split_array_like(series.apply(list).to_frame('column'), 'column')['column'] s.name = series.name return s
python
{ "resource": "" }
q40522
equals
train
def equals(series1, series2, ignore_order=False, ignore_index=False, all_close=False, _return_reason=False): ''' Get whether 2 series are equal. ``NaN`` is considered equal to ``NaN`` and `None`. Parameters ---------- series1 : pandas.Series Series to compare. series2 : pandas.Series Series to compare. ignore_order : bool Ignore order of values (and index). ignore_index : bool Ignore index values and name. all_close : bool If `False`, values must match exactly, if `True`, floats are compared as if compared with `numpy.isclose`. _return_reason : bool Internal. If `True`, `equals` returns a tuple containing the reason, else `equals` only returns a bool indicating equality (or equivalence rather). Returns ------- bool Whether they are equal (after ignoring according to the parameters). Internal note: if ``_return_reason``, ``Tuple[bool, str or None]`` is returned. The former is whether they're equal, the latter is `None` if equal or a short explanation of why the series aren't equal, otherwise. Notes ----- All values (including those of indices) must be copyable and ``__eq__`` must be such that a copy must equal its original. A value must equal itself unless it's ``NaN``. Values needn't be orderable or hashable (however pandas requires index values to be orderable and hashable). By consequence, this is not an efficient function, but it is flexible. ''' result = _equals(series1, series2, ignore_order, ignore_index, all_close) if _return_reason: return result else: return result[0]
python
{ "resource": "" }
q40523
assert_equals
train
def assert_equals(actual, expected, ignore_order=False, ignore_index=False, all_close=False): ''' Assert 2 series are equal. Like ``assert equals(series1, series2, ...)``, but with better hints at where the series differ. See `equals` for detailed parameter doc. Parameters ---------- actual : ~pandas.Series expected : ~pandas.Series ignore_order : bool ignore_index : bool all_close : bool ''' equals_, reason = equals(actual, expected, ignore_order, ignore_index, all_close, _return_reason=True) assert equals_, '{}\n\n{}\n\n{}'.format(reason, actual.to_string(), expected.to_string())
python
{ "resource": "" }
q40524
decompress
train
def decompress(zdata): """ Unserializes an AstonFrame. Parameters ---------- zdata : bytes Returns ------- Trace or Chromatogram """ data = zlib.decompress(zdata) lc = struct.unpack('<L', data[0:4])[0] li = struct.unpack('<L', data[4:8])[0] c = json.loads(data[8:8 + lc].decode('utf-8')) i = np.fromstring(data[8 + lc:8 + lc + li], dtype=np.float32) v = np.fromstring(data[8 + lc + li:], dtype=np.float64) if len(c) == 1: return Trace(v, i, name=c[0]) else: return Chromatogram(v.reshape(len(i), len(c)), i, c)
python
{ "resource": "" }
q40525
Trace._apply_data
train
def _apply_data(self, f, ts, reverse=False): """ Convenience function for all of the math stuff. """ # TODO: needs to catch np numeric types? if isinstance(ts, (int, float)): d = ts * np.ones(self.shape[0]) elif ts is None: d = None elif np.array_equal(ts.index, self.index): d = ts.values else: d = ts._retime(self.index) if not reverse: new_data = np.apply_along_axis(f, 0, self.values, d) else: new_data = np.apply_along_axis(f, 0, d, self.values) return Trace(new_data, self.index, name=self.name)
python
{ "resource": "" }
q40526
Chromatogram.traces
train
def traces(self): """ Decomposes the Chromatogram into a collection of Traces. Returns ------- list """ traces = [] for v, c in zip(self.values.T, self.columns): traces.append(Trace(v, self.index, name=c)) return traces
python
{ "resource": "" }
q40527
Chromatogram.as_sound
train
def as_sound(self, filename, speed=60, cutoff=50): """ Convert AstonFrame into a WAV file. Parameters ---------- filename : str Name of wavfile to create. speed : float, optional How much to speed up for sound recording, e.g. a value of 60 will turn an hour-long AstonFrame into a minute-long sound clip. cutoff : float, optional m/z's under this value will be clipped out. """ # make a 1d array for the sound def to_t(t): return (t - self.index[0]) / speed wav_len = int(to_t(self.index[-1]) * 60 * 44100) wav = np.zeros(wav_len) # create an artificial array to interpolate times out of tmask = np.linspace(0, 1, self.shape[0]) # come up with a mapping from mz to tone min_hz, max_hz = 50, 1000 min_mz, max_mz = min(self.columns), max(self.columns) def mz_to_wv(mz): """ Maps a wavelength/mz to a tone. """ try: mz = float(mz) except: return 100 wv = (mz * (max_hz - min_hz) - max_hz * min_mz + min_hz * max_mz) / (max_mz - min_mz) return int(44100 / wv) # go through each trace and map it into the sound array for i, mz in enumerate(self.columns): if float(mz) < cutoff: # clip out mz/wv below a certain threshold # handy if data has low level noise continue print(str(i) + '/' + str(self.shape[1])) inter_x = np.linspace(0, 1, wav[::mz_to_wv(mz)].shape[0]) wav[::mz_to_wv(mz)] += np.interp(inter_x, tmask, self.values[:, i]) # scale the new array and write it out scaled = wav / np.max(np.abs(wav)) scaled = scipy.signal.fftconvolve(scaled, np.ones(5) / 5, mode='same') scaled = np.int16(scaled * 32767) scipy.io.wavfile.write(filename, 44100, scaled)
python
{ "resource": "" }
q40528
Chromatogram.scan
train
def scan(self, t, dt=None, aggfunc=None): """ Returns the spectrum from a specific time. Parameters ---------- t : float dt : float """ idx = (np.abs(self.index - t)).argmin() if dt is None: # only take the spectra at the nearest time mz_abn = self.values[idx, :].copy() else: # sum up all the spectra over a range en_idx = (np.abs(self.index - t - dt)).argmin() idx, en_idx = min(idx, en_idx), max(idx, en_idx) if aggfunc is None: mz_abn = self.values[idx:en_idx + 1, :].copy().sum(axis=0) else: mz_abn = aggfunc(self.values[idx:en_idx + 1, :].copy()) if isinstance(mz_abn, scipy.sparse.spmatrix): mz_abn = mz_abn.toarray()[0] return Scan(self.columns, mz_abn)
python
{ "resource": "" }
q40529
CassandraStorage.setup_cassandra
train
def setup_cassandra(self, namespaces): """ Set up a connection to the specified Cassandra cluster and create the specified keyspaces if they dont exist. """ connections_to_shutdown = [] self.cluster = Cluster(self.hosts) for namespace_name in namespaces: keyspace = '%s_%s' % (self.keyspace_prefix, namespace_name) namespace = Namespace(self.cluster, keyspace, self.replication_factor, self.read_size) connections_to_shutdown.append(namespace.session) self.namespaces[namespace_name] = namespace # Shutdown Cluster instance after shutting down all Sessions. connections_to_shutdown.append(self.cluster) # Shutdown all connections to Cassandra before exiting Python interpretter. atexit.register(lambda: map(lambda c: c.shutdown(), connections_to_shutdown))
python
{ "resource": "" }
q40530
index
train
def index(environment, start_response, headers): """ Return the status of this Kronos instance + its backends> Doesn't expect any URL parameters. """ response = {'service': 'kronosd', 'version': kronos.__version__, 'id': settings.node['id'], 'storage': {}, SUCCESS_FIELD: True} # Check if each backend is alive for name, backend in router.get_backends(): response['storage'][name] = {'alive': backend.is_alive(), 'backend': settings.storage[name]['backend']} start_response('200 OK', headers) return response
python
{ "resource": "" }
q40531
MerkleTree.build
train
def build(self): """Builds the tree from the leaves that have been added. This function populates the tree from the leaves down non-recursively """ self.order = MerkleTree.get_order(len(self.leaves)) n = 2 ** self.order self.nodes = [b''] * 2 * n # populate lowest nodes with leaf hashes for j in range(0, n): if (j < len(self.leaves)): self.nodes[j + n - 1] = self.leaves[j].get_hash() else: break # now populate the entire tree for i in range(1, self.order + 1): p = 2 ** (self.order - i) for j in range(0, p): k = p + j - 1 h = hashlib.sha256() l = self.nodes[MerkleTree.get_left_child(k)] if (len(l) > 0): h.update(l) r = self.nodes[MerkleTree.get_right_child(k)] if (len(r) > 0): h.update(r) self.nodes[k] = h.digest()
python
{ "resource": "" }
q40532
MerkleTree.get_branch
train
def get_branch(self, i): """Gets a branch associated with leaf i. This will trace the tree from the leaves down to the root, constructing a list of tuples that represent the pairs of nodes all the way from leaf i to the root. :param i: the leaf identifying the branch to retrieve """ branch = MerkleBranch(self.order) j = i + 2 ** self.order - 1 for k in range(0, self.order): if (self.is_left(j)): branch.set_row(k, (self.nodes[j], self.nodes[j + 1])) else: branch.set_row(k, (self.nodes[j - 1], self.nodes[j])) j = MerkleTree.get_parent(j) return branch
python
{ "resource": "" }
q40533
MerkleTree.verify_branch
train
def verify_branch(leaf, branch, root): """This will verify that the given branch fits the given leaf and root It calculates the hash of the leaf, and then verifies that one of the bottom level nodes in the branch matches the leaf hash. Then it calculates the hash of the two nodes on the next level and checks that one of the nodes on the level above matches. It continues this until it reaches the top level of the tree where it asserts that the root is equal to the hash of the nodes below :param leaf: the leaf to check :param branch: a list of tuples (pairs) of the nodes in the branch, ordered from leaf to root. :param root: the root node """ # just check the hashes are correct try: lh = leaf.get_hash() except: return False for i in range(0, branch.get_order()): if (branch.get_left(i) != lh and branch.get_right(i) != lh): return False h = hashlib.sha256() if (len(branch.get_left(i)) > 0): h.update(branch.get_left(i)) if (len(branch.get_right(i)) > 0): h.update(branch.get_right(i)) lh = h.digest() if (root != lh): return False return True
python
{ "resource": "" }
q40534
Model.save
train
def save(self): """Save this entry. If the entry does not have an :attr:`id`, a new id will be assigned, and the :attr:`id` attribute set accordingly. Pre-save processing of the fields saved can be done by overriding the :meth:`prepare_save` method. Additional actions to be done after the save operation has been completed can be added by defining the :meth:`post_save` method. """ id = self.id or self.objects.id(self.name) self.objects[id] = self.prepare_save(dict(self)) self.id = id self.post_save() return id
python
{ "resource": "" }
q40535
Manager.get_many
train
def get_many(self, ids): """Get several entries at once.""" return [self.instance(id, **fields) for id, fields in zip(ids, self.api.mget(ids))]
python
{ "resource": "" }
q40536
Manager.create
train
def create(self, **fields): """Create new entry.""" entry = self.instance(**fields) entry.save() return entry
python
{ "resource": "" }
q40537
generous_parse_uri
train
def generous_parse_uri(uri): """Return a urlparse.ParseResult object with the results of parsing the given URI. This has the same properties as the result of parse_uri. When passed a relative path, it determines the absolute path, sets the scheme to file, the netloc to localhost and returns a parse of the result. """ parse_result = urlparse(uri) if parse_result.scheme == '': abspath = os.path.abspath(parse_result.path) if IS_WINDOWS: abspath = windows_to_unix_path(abspath) fixed_uri = "file://{}".format(abspath) parse_result = urlparse(fixed_uri) return parse_result
python
{ "resource": "" }
q40538
get_config_value
train
def get_config_value(key, config_path=None, default=None): """Get a configuration value. Preference: 1. From environment 2. From JSON configuration file supplied in ``config_path`` argument 3. The default supplied to the function :param key: name of lookup value :param config_path: path to JSON configuration file :param default: default fall back value :returns: value associated with the key """ if config_path is None: config_path = DEFAULT_CONFIG_PATH # Start by setting default value value = default # Update from config file value = get_config_value_from_file( key=key, config_path=config_path, default=value ) # Update from environment variable value = os.environ.get(key, value) return value
python
{ "resource": "" }
q40539
timestamp
train
def timestamp(datetime_obj): """Return Unix timestamp as float. The number of seconds that have elapsed since January 1, 1970. """ start_of_time = datetime.datetime(1970, 1, 1) diff = datetime_obj - start_of_time return diff.total_seconds()
python
{ "resource": "" }
q40540
name_is_valid
train
def name_is_valid(name): """Return True if the dataset name is valid. The name can only be 80 characters long. Valid characters: Alpha numeric characters [0-9a-zA-Z] Valid special characters: - _ . """ # The name can only be 80 characters long. if len(name) > MAX_NAME_LENGTH: return False return bool(NAME_VALID_CHARS_REGEX.match(name))
python
{ "resource": "" }
q40541
BaseStorageBroker.get_admin_metadata
train
def get_admin_metadata(self): """Return the admin metadata as a dictionary.""" logger.debug("Getting admin metdata") text = self.get_text(self.get_admin_metadata_key()) return json.loads(text)
python
{ "resource": "" }
q40542
BaseStorageBroker.get_manifest
train
def get_manifest(self): """Return the manifest as a dictionary.""" logger.debug("Getting manifest") text = self.get_text(self.get_manifest_key()) return json.loads(text)
python
{ "resource": "" }
q40543
BaseStorageBroker.put_admin_metadata
train
def put_admin_metadata(self, admin_metadata): """Store the admin metadata.""" logger.debug("Putting admin metdata") text = json.dumps(admin_metadata) key = self.get_admin_metadata_key() self.put_text(key, text)
python
{ "resource": "" }
q40544
BaseStorageBroker.put_manifest
train
def put_manifest(self, manifest): """Store the manifest.""" logger.debug("Putting manifest") text = json.dumps(manifest, indent=2, sort_keys=True) key = self.get_manifest_key() self.put_text(key, text)
python
{ "resource": "" }
q40545
BaseStorageBroker.put_readme
train
def put_readme(self, content): """Store the readme descriptive metadata.""" logger.debug("Putting readme") key = self.get_readme_key() self.put_text(key, content)
python
{ "resource": "" }
q40546
BaseStorageBroker.update_readme
train
def update_readme(self, content): """Update the readme descriptive metadata.""" logger.debug("Updating readme") key = self.get_readme_key() # Back up old README content. backup_content = self.get_readme_content() backup_key = key + "-{}".format( timestamp(datetime.datetime.now()) ) logger.debug("README.yml backup key: {}".format(backup_key)) self.put_text(backup_key, backup_content) self.put_text(key, content)
python
{ "resource": "" }
q40547
BaseStorageBroker.put_overlay
train
def put_overlay(self, overlay_name, overlay): """Store the overlay.""" logger.debug("Putting overlay: {}".format(overlay_name)) key = self.get_overlay_key(overlay_name) text = json.dumps(overlay, indent=2) self.put_text(key, text)
python
{ "resource": "" }
q40548
BaseStorageBroker.item_properties
train
def item_properties(self, handle): """Return properties of the item with the given handle.""" logger.debug("Getting properties for handle: {}".format(handle)) properties = { 'size_in_bytes': self.get_size_in_bytes(handle), 'utc_timestamp': self.get_utc_timestamp(handle), 'hash': self.get_hash(handle), 'relpath': self.get_relpath(handle) } logger.debug("{} properties: {}".format(handle, properties)) return properties
python
{ "resource": "" }
q40549
BaseStorageBroker._document_structure
train
def _document_structure(self): """Document the structure of the dataset.""" logger.debug("Documenting dataset structure") key = self.get_structure_key() text = json.dumps(self._structure_parameters, indent=2, sort_keys=True) self.put_text(key, text) key = self.get_dtool_readme_key() self.put_text(key, self._dtool_readme_txt)
python
{ "resource": "" }
q40550
DiskStorageBroker.list_dataset_uris
train
def list_dataset_uris(cls, base_uri, config_path): """Return list containing URIs in location given by base_uri.""" parsed_uri = generous_parse_uri(base_uri) uri_list = [] path = parsed_uri.path if IS_WINDOWS: path = unix_to_windows_path(parsed_uri.path, parsed_uri.netloc) for d in os.listdir(path): dir_path = os.path.join(path, d) if not os.path.isdir(dir_path): continue storage_broker = cls(dir_path, config_path) if not storage_broker.has_admin_metadata(): continue uri = storage_broker.generate_uri( name=d, uuid=None, base_uri=base_uri ) uri_list.append(uri) return uri_list
python
{ "resource": "" }
q40551
DiskStorageBroker.put_text
train
def put_text(self, key, text): """Put the text into the storage associated with the key.""" with open(key, "w") as fh: fh.write(text)
python
{ "resource": "" }
q40552
DiskStorageBroker.get_utc_timestamp
train
def get_utc_timestamp(self, handle): """Return the UTC timestamp.""" fpath = self._fpath_from_handle(handle) datetime_obj = datetime.datetime.utcfromtimestamp( os.stat(fpath).st_mtime ) return timestamp(datetime_obj)
python
{ "resource": "" }
q40553
DiskStorageBroker.get_hash
train
def get_hash(self, handle): """Return the hash.""" fpath = self._fpath_from_handle(handle) return DiskStorageBroker.hasher(fpath)
python
{ "resource": "" }
q40554
is_parans_exp
train
def is_parans_exp(istr): """ Determines if an expression is a valid function "call" """ fxn = istr.split('(')[0] if (not fxn.isalnum() and fxn != '(') or istr[-1] != ')': return False plevel = 1 for c in '('.join(istr[:-1].split('(')[1:]): if c == '(': plevel += 1 elif c == ')': plevel -= 1 if plevel == 0: return False return True
python
{ "resource": "" }
q40555
parse_ion_string
train
def parse_ion_string(istr, analyses, twin=None): """ Recursive string parser that handles "ion" strings. """ if istr.strip() == '': return Trace() # remove (unnessary?) pluses from the front # TODO: plus should be abs? istr = istr.lstrip('+') # invert it if preceded by a minus sign if istr[0] == '-': return -parse_ion_string(istr[1:], analyses, twin) # this is a function or paranthesized expression if is_parans_exp(istr): if ')' not in istr: # unbalanced parantheses pass fxn = istr.split('(')[0] args = istr[istr.find('(') + 1:istr.find(')')].split(',') if fxn == '': # strip out the parantheses and continue istr = args[0] else: ts = parse_ion_string(args[0], analyses, twin) # FIXME return ts # return fxn_resolver(ts, fxn, *args[1:]) # all the complicated math is gone, so simple lookup if set(istr).intersection(set('+-/*()')) == set(): if istr in SHORTCUTS: # allow some shortcuts to pull out common ions return parse_ion_string(SHORTCUTS[istr], analyses, twin) elif istr[0] == '!' and all(i in '0123456789.' for i in istr[1:]): # TODO: should this handle negative numbers? return float(istr[1:]) elif istr == '!pi': return np.pi elif istr == '!e': return np.e else: return trace_resolver(istr, analyses, twin) # go through and handle operators for token in '/*+-^': if len(tokenize(istr, token)) != 1: ts = tokenize(istr, token) s = parse_ion_string(ts[0], analyses, twin) for t in ts[1:]: if token == '/': s /= parse_ion_string(t, analyses, twin) elif token == '*': s *= parse_ion_string(t, analyses, twin) elif token == '+': s += parse_ion_string(t, analyses, twin) elif token == '-': s -= parse_ion_string(t, analyses, twin) elif token == '^': s **= parse_ion_string(t, analyses, twin) return s raise Exception('Parser hit a point it shouldn\'t have!')
python
{ "resource": "" }
q40556
_validate_and_get_value
train
def _validate_and_get_value(options, options_name, key, _type): """ Check that `options` has a value for `key` with type `_type`. Return that value. `options_name` is a string representing a human-readable name for `options` to be used when printing errors. """ if isinstance(options, dict): has = lambda k: k in options get = lambda k: options[k] elif isinstance(options, object): has = lambda k: hasattr(options, k) get = lambda k: getattr(options, k) else: raise ImproperlyConfigured( '`{}` must be a dictionary-like object.'.format(options_name)) if not has(key): raise ImproperlyConfigured( '`{}` must be specified in `{}`'.format(key, options_name)) value = get(key) if not isinstance(value, _type): raise ImproperlyConfigured( '`{}` in `{}` must be a {}'.format(key, options_name, repr(_type))) return value
python
{ "resource": "" }
q40557
validate_event_and_assign_id
train
def validate_event_and_assign_id(event): """ Ensure that the event has a valid time. Assign a random UUID based on the event time. """ event_time = event.get(TIMESTAMP_FIELD) if event_time is None: event[TIMESTAMP_FIELD] = event_time = epoch_time_to_kronos_time(time.time()) elif type(event_time) not in (int, long): raise InvalidEventTime(event_time) # Generate a uuid1-like sequence from the event time with the non-time bytes # set to random values. _id = uuid_from_kronos_time(event_time) event[ID_FIELD] = str(_id) return _id, event
python
{ "resource": "" }
q40558
validate_stream
train
def validate_stream(stream): """ Check that the stream name is well-formed. """ if not STREAM_REGEX.match(stream) or len(stream) > MAX_STREAM_LENGTH: raise InvalidStreamName(stream)
python
{ "resource": "" }
q40559
validate_storage_settings
train
def validate_storage_settings(storage_class, settings): """ Given a `storage_class` and a dictionary of `settings` to initialize it, this method verifies that all the settings are valid. """ if not isinstance(settings, dict): raise ImproperlyConfigured( '{}: storage class settings must be a dict'.format(storage_class)) if not hasattr(storage_class, 'SETTINGS_VALIDATORS'): raise NotImplementedError( '{}: storage class must define `SETTINGS_VALIDATORS`'.format( storage_class)) settings_validators = getattr(storage_class, 'SETTINGS_VALIDATORS') settings = settings.copy() settings.pop('backend', None) # No need to validate the `backend` key. invalid_settings = set(settings.keys()) - set(settings_validators.keys()) if invalid_settings: raise ImproperlyConfigured( '{}: invalid settings: {}'.format(storage_class, invalid_settings)) for setting, value in settings.iteritems(): if not settings_validators[setting](value): raise ImproperlyConfigured( '{}: invalid value for {}'.format(storage_class, setting))
python
{ "resource": "" }
q40560
BaseModel.from_api
train
def from_api(cls, **kwargs): """Create a new instance from API arguments. This will switch camelCase keys into snake_case for instantiation. It will also identify any ``Instance`` or ``List`` properties, and instantiate the proper objects using the values. The end result being a fully Objectified and Pythonified API response. Returns: BaseModel: Instantiated model using the API values. """ vals = cls.get_non_empty_vals({ cls._to_snake_case(k): v for k, v in kwargs.items() }) remove = [] for attr, val in vals.items(): try: vals[attr] = cls._parse_property(attr, val) except HelpScoutValidationException: remove.append(attr) logger.info( 'Unexpected property received in API response', exc_info=True, ) for attr in remove: del vals[attr] return cls(**cls.get_non_empty_vals(vals))
python
{ "resource": "" }
q40561
BaseModel.to_api
train
def to_api(self): """Return a dictionary to send to the API. Returns: dict: Mapping representing this object that can be sent to the API. """ vals = {} for attribute, attribute_type in self._props.items(): prop = getattr(self, attribute) vals[self._to_camel_case(attribute)] = self._to_api_value( attribute_type, prop, ) return vals
python
{ "resource": "" }
q40562
BaseModel._to_api_value
train
def _to_api_value(self, attribute_type, value): """Return a parsed value for the API.""" if not value: return None if isinstance(attribute_type, properties.Instance): return value.to_api() if isinstance(attribute_type, properties.List): return self._parse_api_value_list(value) return attribute_type.serialize(value)
python
{ "resource": "" }
q40563
BaseModel._parse_api_value_list
train
def _parse_api_value_list(self, values): """Return a list field compatible with the API.""" try: return [v.to_api() for v in values] # Not models except AttributeError: return list(values)
python
{ "resource": "" }
q40564
BaseModel._parse_property
train
def _parse_property(cls, name, value): """Parse a property received from the API into an internal object. Args: name (str): Name of the property on the object. value (mixed): The unparsed API value. Raises: HelpScoutValidationException: In the event that the property name is not found. Returns: mixed: A value compatible with the internal models. """ prop = cls._props.get(name) return_value = value if not prop: logger.debug( '"%s" with value "%s" is not a valid property for "%s".' % ( name, value, cls, ), ) return_value = None elif isinstance(prop, properties.Instance): return_value = prop.instance_class.from_api(**value) elif isinstance(prop, properties.List): return_value = cls._parse_property_list(prop, value) elif isinstance(prop, properties.Color): return_value = cls._parse_property_color(value) return return_value
python
{ "resource": "" }
q40565
BaseModel._parse_property_list
train
def _parse_property_list(prop, value): """Parse a list property and return a list of the results.""" attributes = [] for v in value: try: attributes.append( prop.prop.instance_class.from_api(**v), ) except AttributeError: attributes.append(v) return attributes
python
{ "resource": "" }
q40566
BaseModel._to_snake_case
train
def _to_snake_case(string): """Return a snake cased version of the input string. Args: string (str): A camel cased string. Returns: str: A snake cased string. """ sub_string = r'\1_\2' string = REGEX_CAMEL_FIRST.sub(sub_string, string) return REGEX_CAMEL_SECOND.sub(sub_string, string).lower()
python
{ "resource": "" }
q40567
BaseModel._to_camel_case
train
def _to_camel_case(string): """Return a camel cased version of the input string. Args: string (str): A snake cased string. Returns: str: A camel cased string. """ components = string.split('_') return '%s%s' % ( components[0], ''.join(c.title() for c in components[1:]), )
python
{ "resource": "" }
q40568
TrigramsDB.save
train
def save(self, output=None): """ Save the database to a file. If ``output`` is not given, the ``dbfile`` given in the constructor is used. """ if output is None: if self.dbfile is None: return output = self.dbfile with open(output, 'w') as f: f.write(self._dump())
python
{ "resource": "" }
q40569
TrigramsDB.generate
train
def generate(self, **kwargs): """ Generate some text from the database. By default only 70 words are generated, but you can change this using keyword arguments. Keyword arguments: - ``wlen``: maximum length (words) - ``words``: a list of words to use to begin the text with """ words = list(map(self._sanitize, kwargs.get('words', []))) max_wlen = kwargs.get('wlen', 70) wlen = len(words) if wlen < 2: if not self._db: return '' if wlen == 0: words = sample(self._db.keys(), 1)[0].split(self._WSEP) elif wlen == 1: spl = [k for k in self._db.keys() if k.startswith(words[0]+self._WSEP)] words.append(sample(spl, 1)[0].split(self._WSEP)[1]) wlen = 2 while wlen < max_wlen: next_word = self._get(words[-2], words[-1]) if next_word is None: break words.append(next_word) wlen += 1 return ' '.join(words)
python
{ "resource": "" }
q40570
TrigramsDB._load
train
def _load(self): """ Load the database from its ``dbfile`` if it has one """ if self.dbfile is not None: with open(self.dbfile, 'r') as f: self._db = json.loads(f.read()) else: self._db = {}
python
{ "resource": "" }
q40571
TrigramsDB._get
train
def _get(self, word1, word2): """ Return a possible next word after ``word1`` and ``word2``, or ``None`` if there's no possibility. """ key = self._WSEP.join([self._sanitize(word1), self._sanitize(word2)]) key = key.lower() if key not in self._db: return return sample(self._db[key], 1)[0]
python
{ "resource": "" }
q40572
TrigramsDB._insert
train
def _insert(self, trigram): """ Insert a trigram in the DB """ words = list(map(self._sanitize, trigram)) key = self._WSEP.join(words[:2]).lower() next_word = words[2] self._db.setdefault(key, []) # we could use a set here, but sets are not serializables in JSON. This # is the same reason we use dicts instead of defaultdicts. if next_word not in self._db[key]: self._db[key].append(next_word)
python
{ "resource": "" }
q40573
planetType
train
def planetType(temperature, mass, radius): """ Returns the planet type as 'temperatureType massType' """ if mass is not np.nan: sizeType = planetMassType(mass) elif radius is not np.nan: sizeType = planetRadiusType(radius) else: return None return '{0} {1}'.format(planetTempType(temperature), sizeType)
python
{ "resource": "" }
q40574
split_array_like
train
def split_array_like(df, columns=None): #TODO rename TODO if it's not a big performance hit, just make them arraylike? We already indicated the column explicitly (sort of) so... ''' Split cells with array-like values along row axis. Column names are maintained. The index is dropped. Parameters ---------- df : ~pandas.DataFrame Data frame ``df[columns]`` should contain :py:class:`~pytil.numpy.ArrayLike` values. columns : ~typing.Collection[str] or str or None Columns (or column) whose values to split. Defaults to ``df.columns``. Returns ------- ~pandas.DataFrame Data frame with array-like values in ``df[columns]`` split across rows, and corresponding values in other columns repeated. Examples -------- >>> df = pd.DataFrame([[1,[1,2],[1]],[1,[1,2],[3,4,5]],[2,[1],[1,2]]], columns=('check', 'a', 'b')) >>> df check a b 0 1 [1, 2] [1] 1 1 [1, 2] [3, 4, 5] 2 2 [1] [1, 2] >>> split_array_like(df, ['a', 'b']) check a b 0 1 1 1 1 1 2 1 2 1 1 3 3 1 1 4 4 1 1 5 5 1 2 3 6 1 2 4 7 1 2 5 8 2 1 1 9 2 1 2 ''' # TODO could add option to keep_index by using reset_index and eventually # set_index. index names trickery: MultiIndex.names, Index.name. Both can be # None. If Index.name can be None in which case it translates to 'index' or # if that already exists, it translates to 'level_0'. If MultiIndex.names is # None, it translates to level_0,... level_N dtypes = df.dtypes if columns is None: columns = df.columns elif isinstance(columns, str): columns = [columns] for column in columns: expanded = np.repeat(df.values, df[column].apply(len).values, axis=0) expanded[:, df.columns.get_loc(column)] = np.concatenate(df[column].tolist()) df = pd.DataFrame(expanded, columns=df.columns) # keep types unchanged for i, dtype in enumerate(dtypes): df.iloc[:,i] = df.iloc[:,i].astype(dtype) return df
python
{ "resource": "" }
q40575
equals
train
def equals(df1, df2, ignore_order=set(), ignore_indices=set(), all_close=False, _return_reason=False): ''' Get whether 2 data frames are equal. ``NaN`` is considered equal to ``NaN`` and `None`. Parameters ---------- df1 : ~pandas.DataFrame Data frame to compare. df2 : ~pandas.DataFrame Data frame to compare. ignore_order : ~typing.Set[int] Axi in which to ignore order. ignore_indices : ~typing.Set[int] Axi of which to ignore the index. E.g. ``{1}`` allows differences in ``df.columns.name`` and ``df.columns.equals(df2.columns)``. all_close : bool If `False`, values must match exactly, if `True`, floats are compared as if compared with `numpy.isclose`. _return_reason : bool Internal. If `True`, `equals` returns a tuple containing the reason, else `equals` only returns a bool indicating equality (or equivalence rather). Returns ------- bool Whether they are equal (after ignoring according to the parameters). Internal note: if ``_return_reason``, ``Tuple[bool, str or None]`` is returned. The former is whether they're equal, the latter is `None` if equal or a short explanation of why the data frames aren't equal, otherwise. Notes ----- All values (including those of indices) must be copyable and ``__eq__`` must be such that a copy must equal its original. A value must equal itself unless it's ``NaN``. Values needn't be orderable or hashable (however pandas requires index values to be orderable and hashable). By consequence, this is not an efficient function, but it is flexible. Examples -------- >>> from pytil import data_frame as df_ >>> import pandas as pd >>> df = pd.DataFrame([ ... [1, 2, 3], ... [4, 5, 6], ... [7, 8, 9] ... ], ... index=pd.Index(('i1', 'i2', 'i3'), name='index1'), ... columns=pd.Index(('c1', 'c2', 'c3'), name='columns1') ... ) >>> df columns1 c1 c2 c3 index1 i1 1 2 3 i2 4 5 6 i3 7 8 9 >>> df2 = df.reindex(('i3', 'i1', 'i2'), columns=('c2', 'c1', 'c3')) >>> df2 columns1 c2 c1 c3 index1 i3 8 7 9 i1 2 1 3 i2 5 4 6 >>> df_.equals(df, df2) False >>> df_.equals(df, df2, ignore_order=(0,1)) True >>> df2 = df.copy() >>> df2.index = [1,2,3] >>> df2 columns1 c1 c2 c3 1 1 2 3 2 4 5 6 3 7 8 9 >>> df_.equals(df, df2) False >>> df_.equals(df, df2, ignore_indices={0}) True >>> df2 = df.reindex(('i3', 'i1', 'i2')) >>> df2 columns1 c1 c2 c3 index1 i3 7 8 9 i1 1 2 3 i2 4 5 6 >>> df_.equals(df, df2, ignore_indices={0}) # does not ignore row order! False >>> df_.equals(df, df2, ignore_order={0}) True >>> df2 = df.copy() >>> df2.index.name = 'other' >>> df_.equals(df, df2) # df.index.name must match as well, same goes for df.columns.name False ''' result = _equals(df1, df2, ignore_order, ignore_indices, all_close) if _return_reason: return result else: return result[0]
python
{ "resource": "" }
q40576
_try_mask_first_row
train
def _try_mask_first_row(row, values, all_close, ignore_order): ''' mask first row in 2d array values : 2d masked array Each row is either fully masked or not masked at all ignore_order : bool Ignore column order Return whether masked a row. If False, masked nothing. ''' for row2 in values: mask = ma.getmaskarray(row2) assert mask.sum() in (0, len(mask)) # sanity check: all or none masked if mask[0]: # Note: at this point row2's mask is either all False or all True continue # mask each value of row1 in row2 if _try_mask_row(row, row2, all_close, ignore_order): return True # row did not match return False
python
{ "resource": "" }
q40577
_try_mask_row
train
def _try_mask_row(row1, row2, all_close, ignore_order): ''' if each value in row1 matches a value in row2, mask row2 row1 1d array row2 1d masked array whose mask is all False ignore_order : bool Ignore column order all_close : bool compare with np.isclose instead of == Return whether masked the row ''' if ignore_order: for value1 in row1: if not _try_mask_first_value(value1, row2, all_close): row2.mask = ma.nomask return False else: for value1, value2 in zip(row1, row2): if not _value_equals(value1, value2, all_close): return False row2[:] = ma.masked assert row2.mask.all() # sanity check return True
python
{ "resource": "" }
q40578
_try_mask_first_value
train
def _try_mask_first_value(value, row, all_close): ''' mask first value in row value1 : ~typing.Any row : 1d masked array all_close : bool compare with np.isclose instead of == Return whether masked a value ''' # Compare value to row for i, value2 in enumerate(row): if _value_equals(value, value2, all_close): row[i] = ma.masked return True return False
python
{ "resource": "" }
q40579
_value_equals
train
def _value_equals(value1, value2, all_close): ''' Get whether 2 values are equal value1, value2 : ~typing.Any all_close : bool compare with np.isclose instead of == ''' if value1 is None: value1 = np.nan if value2 is None: value2 = np.nan are_floats = np.can_cast(type(value1), float) and np.can_cast(type(value2), float) if all_close and are_floats: return np.isclose(value1, value2, equal_nan=True) else: if are_floats: return value1 == value2 or (value1 != value1 and value2 != value2) else: return value1 == value2
python
{ "resource": "" }
q40580
assert_equals
train
def assert_equals(df1, df2, ignore_order=set(), ignore_indices=set(), all_close=False, _return_reason=False): ''' Assert 2 data frames are equal A more verbose form of ``assert equals(df1, df2, ...)``. See `equals` for an explanation of the parameters. Parameters ---------- df1 : ~pandas.DataFrame Actual data frame. df2 : ~pandas.DataFrame Expected data frame. ignore_order : ~typing.Set[int] ignore_indices : ~typing.Set[int] all_close : bool ''' equals_, reason = equals(df1, df2, ignore_order, ignore_indices, all_close, _return_reason=True) assert equals_, '{}\n\n{}\n\n{}'.format(reason, df1.to_string(), df2.to_string())
python
{ "resource": "" }
q40581
is_equal_strings_ignore_case
train
def is_equal_strings_ignore_case(first, second): """The function compares strings ignoring case""" if first and second: return first.upper() == second.upper() else: return not (first or second)
python
{ "resource": "" }
q40582
Dataset.find_dimension_by_name
train
def find_dimension_by_name(self, dim_name): """the method searching dimension with a given name""" for dim in self.dimensions: if is_equal_strings_ignore_case(dim.name, dim_name): return dim return None
python
{ "resource": "" }
q40583
Dataset.find_dimension_by_id
train
def find_dimension_by_id(self, dim_id): """the method searching dimension with a given id""" for dim in self.dimensions: if is_equal_strings_ignore_case(dim.id, dim_id): return dim return None
python
{ "resource": "" }
q40584
DatasetUpload.save_to_json
train
def save_to_json(self): """The method saves DatasetUpload to json from object""" requestvalues = { 'DatasetId': self.dataset, 'Name': self.name, 'Description': self.description, 'Source': self.source, 'PubDate': self.publication_date, 'AccessedOn': self.accessed_on, 'Url': self.dataset_ref, 'UploadFormatType': self.upload_format_type, 'Columns': self.columns, 'FileProperty': self.file_property.__dict__, 'FlatDSUpdateOptions': self.flat_ds_update_options, 'Public': self.public } return json.dumps(requestvalues)
python
{ "resource": "" }
q40585
Proxy.keyspace
train
def keyspace(self, keyspace): """ Convenient, consistent access to a sub-set of all keys. """ if FORMAT_SPEC.search(keyspace): return KeyspacedProxy(self, keyspace) else: return KeyspacedProxy(self, self._keyspaces[keyspace])
python
{ "resource": "" }
q40586
dircmp.phase3
train
def phase3(self): """ Find out differences between common files. Ensure we are using content comparison with shallow=False. """ fcomp = filecmp.cmpfiles(self.left, self.right, self.common_files, shallow=False) self.same_files, self.diff_files, self.funny_files = fcomp
python
{ "resource": "" }
q40587
ElasticSearchStorage._insert
train
def _insert(self, namespace, stream, events, configuration): """ `namespace` acts as db for different streams `stream` is the name of a stream and `events` is a list of events to insert. """ index = self.index_manager.get_index(namespace) start_dts_to_add = set() def actions(): for _id, event in events: dt = kronos_time_to_datetime(uuid_to_kronos_time(_id)) start_dts_to_add.add(_round_datetime_down(dt)) event['_index'] = index event['_type'] = stream event[LOGSTASH_TIMESTAMP_FIELD] = dt.isoformat() yield event list(es_helpers.streaming_bulk(self.es, actions(), chunk_size=1000, refresh=self.force_refresh)) self.index_manager.add_aliases(namespace, index, start_dts_to_add)
python
{ "resource": "" }
q40588
TraceFile.scan
train
def scan(self, t, dt=None, aggfunc=None): """ Returns the spectrum from a specific time or range of times. """ return self.data.scan(t, dt, aggfunc)
python
{ "resource": "" }
q40589
_generate_storage_broker_lookup
train
def _generate_storage_broker_lookup(): """Return dictionary of available storage brokers.""" storage_broker_lookup = dict() for entrypoint in iter_entry_points("dtool.storage_brokers"): StorageBroker = entrypoint.load() storage_broker_lookup[StorageBroker.key] = StorageBroker return storage_broker_lookup
python
{ "resource": "" }
q40590
_get_storage_broker
train
def _get_storage_broker(uri, config_path): """Helper function to enable use lookup of appropriate storage brokers.""" uri = dtoolcore.utils.sanitise_uri(uri) storage_broker_lookup = _generate_storage_broker_lookup() parsed_uri = dtoolcore.utils.generous_parse_uri(uri) StorageBroker = storage_broker_lookup[parsed_uri.scheme] return StorageBroker(uri, config_path)
python
{ "resource": "" }
q40591
_admin_metadata_from_uri
train
def _admin_metadata_from_uri(uri, config_path): """Helper function for getting admin metadata.""" uri = dtoolcore.utils.sanitise_uri(uri) storage_broker = _get_storage_broker(uri, config_path) admin_metadata = storage_broker.get_admin_metadata() return admin_metadata
python
{ "resource": "" }
q40592
_is_dataset
train
def _is_dataset(uri, config_path): """Helper function for determining if a URI is a dataset.""" uri = dtoolcore.utils.sanitise_uri(uri) storage_broker = _get_storage_broker(uri, config_path) return storage_broker.has_admin_metadata()
python
{ "resource": "" }
q40593
generate_admin_metadata
train
def generate_admin_metadata(name, creator_username=None): """Return admin metadata as a dictionary.""" if not dtoolcore.utils.name_is_valid(name): raise(DtoolCoreInvalidNameError()) if creator_username is None: creator_username = dtoolcore.utils.getuser() datetime_obj = datetime.datetime.utcnow() admin_metadata = { "uuid": str(uuid.uuid4()), "dtoolcore_version": __version__, "name": name, "type": "protodataset", "creator_username": creator_username, "created_at": dtoolcore.utils.timestamp(datetime_obj) } return admin_metadata
python
{ "resource": "" }
q40594
_generate_uri
train
def _generate_uri(admin_metadata, base_uri): """Return dataset URI. :param admin_metadata: dataset administrative metadata :param base_uri: base URI from which to derive dataset URI :returns: dataset URI """ name = admin_metadata["name"] uuid = admin_metadata["uuid"] # storage_broker_lookup = _generate_storage_broker_lookup() # parse_result = urlparse(base_uri) # storage = parse_result.scheme StorageBroker = _get_storage_broker(base_uri, config_path=None) return StorageBroker.generate_uri(name, uuid, base_uri)
python
{ "resource": "" }
q40595
copy
train
def copy(src_uri, dest_base_uri, config_path=None, progressbar=None): """Copy a dataset to another location. :param src_uri: URI of dataset to be copied :param dest_base_uri: base of URI for copy target :param config_path: path to dtool configuration file :returns: URI of new dataset """ dataset = DataSet.from_uri(src_uri) proto_dataset = _copy_create_proto_dataset( dataset, dest_base_uri, config_path, progressbar ) _copy_content(dataset, proto_dataset, progressbar) proto_dataset.freeze(progressbar=progressbar) return proto_dataset.uri
python
{ "resource": "" }
q40596
copy_resume
train
def copy_resume(src_uri, dest_base_uri, config_path=None, progressbar=None): """Resume coping a dataset to another location. Items that have been copied to the destination and have the same size as in the source dataset are skipped. All other items are copied across and the dataset is frozen. :param src_uri: URI of dataset to be copied :param dest_base_uri: base of URI for copy target :param config_path: path to dtool configuration file :returns: URI of new dataset """ dataset = DataSet.from_uri(src_uri) # Generate the URI of the destination proto dataset. dest_uri = _generate_uri(dataset._admin_metadata, dest_base_uri) proto_dataset = ProtoDataSet.from_uri(dest_uri) _copy_content(dataset, proto_dataset, progressbar) proto_dataset.freeze(progressbar=progressbar) return proto_dataset.uri
python
{ "resource": "" }
q40597
_BaseDataSet.update_name
train
def update_name(self, new_name): """Update the name of the proto dataset. :param new_name: the new name of the proto dataset """ if not dtoolcore.utils.name_is_valid(new_name): raise(DtoolCoreInvalidNameError()) self._admin_metadata['name'] = new_name if self._storage_broker.has_admin_metadata(): self._storage_broker.put_admin_metadata(self._admin_metadata)
python
{ "resource": "" }
q40598
_BaseDataSet._put_overlay
train
def _put_overlay(self, overlay_name, overlay): """Store overlay so that it is accessible by the given name. :param overlay_name: name of the overlay :param overlay: overlay must be a dictionary where the keys are identifiers in the dataset :raises: TypeError if the overlay is not a dictionary, ValueError if identifiers in overlay and dataset do not match """ if not isinstance(overlay, dict): raise TypeError("Overlay must be dict") if set(self._identifiers()) != set(overlay.keys()): raise ValueError("Overlay keys must be dataset identifiers") self._storage_broker.put_overlay(overlay_name, overlay)
python
{ "resource": "" }
q40599
_BaseDataSet.generate_manifest
train
def generate_manifest(self, progressbar=None): """Return manifest generated from knowledge about contents.""" items = dict() if progressbar: progressbar.label = "Generating manifest" for handle in self._storage_broker.iter_item_handles(): key = dtoolcore.utils.generate_identifier(handle) value = self._storage_broker.item_properties(handle) items[key] = value if progressbar: progressbar.item_show_func = lambda x: handle progressbar.update(1) manifest = { "items": items, "dtoolcore_version": __version__, "hash_function": self._storage_broker.hasher.name } return manifest
python
{ "resource": "" }