_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q38900
IndexPage.run
train
def run(self): """ Index the document. Since ids are predictable, we won't index anything twice. """ with self.input().open() as handle: body = json.loads(handle.read()) es = elasticsearch.Elasticsearch() id = body.get('_id') es.index(index='frontpage', doc_type='html', id=id, body=body)
python
{ "resource": "" }
q38901
DailyIndex.requires
train
def requires(self): """ Index all pages. """ for url in NEWSPAPERS: yield IndexPage(url=url, date=self.date)
python
{ "resource": "" }
q38902
Query.get_name
train
def get_name(self, data): """ For non-specific queries, this will return the actual name in the result. """ if self.node.specific_attribute: return self.node.name name = data.get(self.predicate_var) if str(RDF.type) in [self.node.name, name]: return '$schema' if name.startswith(PRED): name = name[len(PRED):] return name
python
{ "resource": "" }
q38903
Query.project
train
def project(self, q, parent=False): """ Figure out which attributes should be returned for the current level of the query. """ if self.parent: print (self.parent.var, self.predicate, self.var) q = q.project(self.var, append=True) if parent and self.parent: q = q.project(self.parent.var, append=True) if not self.node.specific_attribute: q = q.project(self.predicate, append=True) for child in self.children: if child.node.leaf: q = child.project(q) return q
python
{ "resource": "" }
q38904
Query.filter
train
def filter(self, q, parents=None): """ Apply any filters to the query. """ if self.node.leaf and self.node.filtered: # TODO: subject filters? q = q.where((self.parent.var, self.predicate, self.var)) # TODO: inverted nodes q = self.filter_value(q, self.var) elif self.parent is not None: q = q.where((self.parent.var, self.predicate, self.var)) if parents is not None: parents = [URIRef(p) for p in parents] q = q.filter(self.parent.var.in_(*parents)) # TODO: forbidden nodes. for child in self.children: q = child.filter(q) return q
python
{ "resource": "" }
q38905
Query.query
train
def query(self, parents=None): """ Compose the query and generate SPARQL. """ # TODO: benchmark single-query strategy q = Select([]) q = self.project(q, parent=True) q = self.filter(q, parents=parents) if self.parent is None: subq = Select([self.var]) subq = self.filter(subq, parents=parents) subq = subq.offset(self.node.offset) subq = subq.limit(self.node.limit) subq = subq.distinct() # TODO: sorting. subq = subq.order_by(desc(self.var)) q = q.where(subq) # if hasattr(self.context, 'identifier'): # q._where = graph(self.context.identifier, q._where) log.debug("Compiled query: %r", q.compile()) return q
python
{ "resource": "" }
q38906
Query.base_object
train
def base_object(self, data): """ Make sure to return all the existing filter fields for query results. """ obj = {'id': data.get(self.id)} if self.parent is not None: obj['$parent'] = data.get(self.parent.id) return obj
python
{ "resource": "" }
q38907
Query.execute
train
def execute(self, parents=None): """ Run the data query and construct entities from it's results. """ results = OrderedDict() for row in self.query(parents=parents).execute(self.context.graph): data = {k: v.toPython() for (k, v) in row.asdict().items()} id = data.get(self.id) if id not in results: results[id] = self.base_object(data) for child in self.children: if child.id in data: name = child.get_name(data) value = data.get(child.id) if child.node.many and \ child.node.op not in [OP_IN, OP_NIN]: if name not in results[id]: results[id][name] = [value] else: results[id][name].append(value) else: results[id][name] = value return results
python
{ "resource": "" }
q38908
Query.collect
train
def collect(self, parents=None): """ Given re-constructed entities, conduct queries for child entities and merge them into the current level's object graph. """ results = self.execute(parents=parents) ids = results.keys() for child in self.nested(): name = child.node.name for child_data in child.collect(parents=ids).values(): parent_id = child_data.pop('$parent', None) if child.node.many: if name not in results[parent_id]: results[parent_id][name] = [] results[parent_id][name].append(child_data) else: results[parent_id][name] = child_data return results
python
{ "resource": "" }
q38909
_download_and_decompress_if_necessary
train
def _download_and_decompress_if_necessary( full_path, download_url, timeout=None, use_wget_if_available=False): """ Downloads remote file at `download_url` to local file at `full_path` """ logger.info("Downloading %s to %s", download_url, full_path) filename = os.path.split(full_path)[1] base_name, ext = os.path.splitext(filename) tmp_path = _download_to_temp_file( download_url=download_url, timeout=timeout, base_name=base_name, ext=ext, use_wget_if_available=use_wget_if_available) if download_url.endswith("zip") and not filename.endswith("zip"): logger.info("Decompressing zip into %s...", filename) with zipfile.ZipFile(tmp_path) as z: names = z.namelist() assert len(names) > 0, "Empty zip archive" if filename in names: chosen_filename = filename else: # If zip archive contains multiple files, choose the biggest. biggest_size = 0 chosen_filename = names[0] for info in z.infolist(): if info.file_size > biggest_size: chosen_filename = info.filename biggest_size = info.file_size extract_path = z.extract(chosen_filename) move(extract_path, full_path) os.remove(tmp_path) elif download_url.endswith("gz") and not filename.endswith("gz"): logger.info("Decompressing gzip into %s...", filename) with gzip.GzipFile(tmp_path) as src: contents = src.read() os.remove(tmp_path) with open(full_path, 'wb') as dst: dst.write(contents) elif download_url.endswith(("html", "htm")) and full_path.endswith(".csv"): logger.info("Extracting HTML table into CSV %s...", filename) df = pd.read_html(tmp_path, header=0)[0] df.to_csv(full_path, sep=',', index=False, encoding='utf-8') else: move(tmp_path, full_path)
python
{ "resource": "" }
q38910
fetch_and_transform
train
def fetch_and_transform( transformed_filename, transformer, loader, source_filename, source_url, subdir=None): """ Fetch a remote file from `source_url`, save it locally as `source_filename` and then use the `loader` and `transformer` function arguments to turn this saved data into an in-memory object. """ transformed_path = build_path(transformed_filename, subdir) if not os.path.exists(transformed_path): source_path = fetch_file(source_url, source_filename, subdir) logger.info("Generating data file %s from %s", transformed_path, source_path) result = transformer(source_path, transformed_path) else: logger.info("Cached data file: %s", transformed_path) result = loader(transformed_path) assert os.path.exists(transformed_path) return result
python
{ "resource": "" }
q38911
fetch_csv_dataframe
train
def fetch_csv_dataframe( download_url, filename=None, subdir=None, **pandas_kwargs): """ Download a remote file from `download_url` and save it locally as `filename`. Load that local file as a CSV into Pandas using extra keyword arguments such as sep='\t'. """ path = fetch_file( download_url=download_url, filename=filename, decompress=True, subdir=subdir) return pd.read_csv(path, **pandas_kwargs)
python
{ "resource": "" }
q38912
getLoader
train
def getLoader(*a, **kw): """ Deprecated. Don't use this. """ warn("xmantissa.publicweb.getLoader is deprecated, use " "PrivateApplication.getDocFactory or SiteTemplateResolver." "getDocFactory.", category=DeprecationWarning, stacklevel=2) from xmantissa.webtheme import getLoader return getLoader(*a, **kw)
python
{ "resource": "" }
q38913
_CustomizingResource.locateChild
train
def locateChild(self, ctx, segments): """ Return a Deferred which will fire with the customized version of the resource being located. """ D = defer.maybeDeferred( self.currentResource.locateChild, ctx, segments) def finishLocating((nextRes, nextPath)): custom = ixmantissa.ICustomizable(nextRes, None) if custom is not None: return (custom.customizeFor(self.forWho), nextPath) self.currentResource = nextRes if nextRes is None: return (nextRes, nextPath) return (_CustomizingResource(nextRes, self.forWho), nextPath) return D.addCallback(finishLocating)
python
{ "resource": "" }
q38914
_PublicPageMixin.render_authenticateLinks
train
def render_authenticateLinks(self, ctx, data): """ For unauthenticated users, add login and signup links to the given tag. For authenticated users, remove the given tag from the output. When necessary, the I{signup-link} pattern will be loaded from the tag. Each copy of it will have I{prompt} and I{url} slots filled. The list of copies will be added as children of the tag. """ if self.username is not None: return '' # there is a circular import here which should probably be avoidable, # since we don't actually need signup links on the signup page. on the # other hand, maybe we want to eventually put those there for # consistency. for now, this import is easiest, and although it's a # "friend" API, which I dislike, it doesn't seem to cause any real # problems... -glyph from xmantissa.signup import _getPublicSignupInfo IQ = inevow.IQ(ctx.tag) signupPattern = IQ.patternGenerator('signup-link') signups = [] for (prompt, url) in _getPublicSignupInfo(self.store): signups.append(signupPattern.fillSlots( 'prompt', prompt).fillSlots( 'url', url)) return ctx.tag[signups]
python
{ "resource": "" }
q38915
_PublicPageMixin.render_startmenu
train
def render_startmenu(self, ctx, data): """ For authenticated users, add the start-menu style navigation to the given tag. For unauthenticated users, remove the given tag from the output. @see L{xmantissa.webnav.startMenu} """ if self.username is None: return '' translator = self._getViewerPrivateApplication() pageComponents = translator.getPageComponents() return startMenu(translator, pageComponents.navigation, ctx.tag)
python
{ "resource": "" }
q38916
_PublicPageMixin.render_settingsLink
train
def render_settingsLink(self, ctx, data): """ For authenticated users, add the URL of the settings page to the given tag. For unauthenticated users, remove the given tag from the output. """ if self.username is None: return '' translator = self._getViewerPrivateApplication() return settingsLink( translator, translator.getPageComponents().settings, ctx.tag)
python
{ "resource": "" }
q38917
_PublicPageMixin.render_applicationNavigation
train
def render_applicationNavigation(self, ctx, data): """ For authenticated users, add primary application navigation to the given tag. For unauthenticated users, remove the given tag from the output. @see L{xmantissa.webnav.applicationNavigation} """ if self.username is None: return '' translator = self._getViewerPrivateApplication() return applicationNavigation( ctx, translator, translator.getPageComponents().navigation)
python
{ "resource": "" }
q38918
_PublicPageMixin.render_search
train
def render_search(self, ctx, data): """ Render some UI for performing searches, if we know about a search aggregator. """ if self.username is None: return '' translator = self._getViewerPrivateApplication() searchAggregator = translator.getPageComponents().searchAggregator if searchAggregator is None or not searchAggregator.providers(): return '' return ctx.tag.fillSlots( 'form-action', translator.linkTo(searchAggregator.storeID))
python
{ "resource": "" }
q38919
_PublicPageMixin.getHeadContent
train
def getHeadContent(self, req): """ Retrieve a list of header content from all installed themes on the site store. """ site = ixmantissa.ISiteURLGenerator(self.store) for t in getInstalledThemes(self.store): yield t.head(req, site)
python
{ "resource": "" }
q38920
_OfferingsFragment.data_offerings
train
def data_offerings(self, ctx, data): """ Generate a list of installed offerings. @return: a generator of dictionaries mapping 'name' to the name of an offering installed on the store. """ for io in self.original.store.query(offering.InstalledOffering): pp = ixmantissa.IPublicPage(io.application, None) if pp is not None and getattr(pp, 'index', True): warn("Use the sharing system to provide public pages," " not IPublicPage", category=DeprecationWarning, stacklevel=2) yield {'name': io.offeringName} else: s = io.application.open() try: pp = getEveryoneRole(s).getShare(getDefaultShareID(s)) yield {'name': io.offeringName} except NoSuchShare: continue
python
{ "resource": "" }
q38921
_PublicFrontPage._getAppStoreResource
train
def _getAppStoreResource(self, ctx, name): """ Customize child lookup such that all installed offerings on the site store that this page is viewing are given an opportunity to display their own page. """ offer = self.frontPageItem.store.findFirst( offering.InstalledOffering, offering.InstalledOffering.offeringName == unicode(name, 'ascii')) if offer is not None: pp = ixmantissa.IPublicPage(offer.application, None) if pp is not None: warn("Use the sharing system to provide public pages," " not IPublicPage", category=DeprecationWarning, stacklevel=2) return pp.getResource() return SharingIndex(offer.application.open(), self.webViewer) return None
python
{ "resource": "" }
q38922
_PublicFrontPage.child_
train
def child_(self, ctx): """ If the root resource is requested, return the primary application's front page, if a primary application has been chosen. Otherwise return 'self', since this page can render a simple index. """ if self.frontPageItem.defaultApplication is None: return self.webViewer.wrapModel( _OfferingsFragment(self.frontPageItem)) else: return SharingIndex(self.frontPageItem.defaultApplication.open(), self.webViewer).locateChild(ctx, [''])[0]
python
{ "resource": "" }
q38923
AnonymousSite.rootChild_resetPassword
train
def rootChild_resetPassword(self, req, webViewer): """ Return a page which will allow the user to re-set their password. """ from xmantissa.signup import PasswordResetResource return PasswordResetResource(self.store)
python
{ "resource": "" }
q38924
BaseConfigurator.get_configurable_by_name
train
def get_configurable_by_name(self, name): """ Returns the registered configurable with the specified name or ``None`` if no such configurator exists. """ l = [c for c in self.configurables if c.name == name] if l: return l[0]
python
{ "resource": "" }
q38925
Project.results
train
def results(self, trial_ids): """ Accepts a sequence of trial ids and returns a pandas dataframe with the schema trial_id, iteration?, *metric_schema_union where iteration is an optional column that specifies the iteration when a user logged a metric, if the user supplied one. The iteration column is added if any metric was logged with an iteration. Then, every metric name that was ever logged is a column in the metric_schema_union. """ metadata_folder = os.path.join(self.log_dir, constants.METADATA_FOLDER) dfs = [] # TODO: various file-creation corner cases like the result file not # always existing if stuff is not logged and etc should be ironed out # (would probably be easier if we had a centralized Sync class which # relied on some formal remote store semantics). for trial_id in trial_ids: # TODO constants should just contain the recipes for filename # construction instead of this multi-file implicit constraint result_file = os.path.join( metadata_folder, trial_id + "_" + constants.RESULT_SUFFIX) assert os.path.isfile(result_file), result_file dfs.append(pd.read_json(result_file, typ='frame', lines=True)) df = pd.concat(dfs, axis=0, ignore_index=True, sort=False) return df
python
{ "resource": "" }
q38926
Project.fetch_artifact
train
def fetch_artifact(self, trial_id, prefix): """ Verifies that all children of the artifact prefix path are available locally. Fetches them if not. Returns the local path to the given trial's artifacts at the specified prefix, which is always just {log_dir}/{trial_id}/{prefix} """ # TODO: general windows concern: local prefix will be in # backslashes but remote dirs will be expecting / # TODO: having s3 logic split between project and sync.py # worries me local = os.path.join(self.log_dir, trial_id, prefix) if self.upload_dir: remote = '/'.join([self.upload_dir, trial_id, prefix]) _remote_to_local_sync(remote, local) return local
python
{ "resource": "" }
q38927
MetaData._load
train
def _load(self): """ Load provenance info from the main store. """ graph = self.context.parent.graph.get_context(self.context.identifier) data = {} for (_, p, o) in graph.triples((self.context.identifier, None, None)): if not p.startswith(META): continue name = p[len(META):] data[name] = o.toPython() return data
python
{ "resource": "" }
q38928
MetaData.generate
train
def generate(self): """ Add provenance info to the context graph. """ t = (self.context.identifier, RDF.type, META.Provenance) if t not in self.context.graph: self.context.graph.add(t) for name, value in self.data.items(): pat = (self.context.identifier, META[name], None) if pat in self.context.graph: self.context.graph.remove(pat) self.context.graph.add((pat[0], META[name], Literal(value)))
python
{ "resource": "" }
q38929
jsonresolver_loader
train
def jsonresolver_loader(url_map): """Jsonresolver hook for funders resolving.""" def endpoint(doi_code): pid_value = "10.13039/{0}".format(doi_code) _, record = Resolver(pid_type='frdoi', object_type='rec', getter=Record.get_record).resolve(pid_value) return record pattern = '/10.13039/<doi_code>' url_map.add(Rule(pattern, endpoint=endpoint, host='doi.org')) url_map.add(Rule(pattern, endpoint=endpoint, host='dx.doi.org'))
python
{ "resource": "" }
q38930
SR830.snap
train
def snap(self, *args): """Records up to 6 parameters at a time. :param args: Specifies the values to record. Valid ones are 'X', 'Y', 'R', 'theta', 'AuxIn1', 'AuxIn2', 'AuxIn3', 'AuxIn4', 'Ref', 'CH1' and 'CH2'. If none are given 'X' and 'Y' are used. """ # TODO: Do not use transport directly. params = {'X': 1, 'Y': 2, 'R': 3, 'Theta': 4, 'AuxIn1': 5, 'AuxIn2': 6, 'AuxIn3': 7, 'AuxIn4': 8, 'Ref': 9, 'CH1': 10, 'CH2': 11} if not args: args = ['X', 'Y'] if len(args) > 6: raise ValueError('Too many parameters (max: 6).') cmd = 'SNAP? ' + ','.join(map(lambda x: str(params[x]), args)) result = self.transport.ask(cmd) return map(float, result.split(','))
python
{ "resource": "" }
q38931
SR830.trace
train
def trace(self, buffer, start, length=1): """Reads the points stored in the channel buffer. :param buffer: Selects the channel buffer (either 1 or 2). :param start: Selects the bin where the reading starts. :param length: The number of bins to read. .. todo:: Use binary command TRCB to speed up data transmission. """ # TODO: Do not use transport directly. query = 'TRCA? {0}, {1}, {2}'.format(buffer, start, length) result = self.transport.ask(query) # Result format: "1.0e-004,1.2e-004,". Strip trailing comma then split. return (float(f) for f in result.strip(',').split(','))
python
{ "resource": "" }
q38932
isAppStore
train
def isAppStore(s): """ Return whether the given store is an application store or not. @param s: A Store. """ if s.parent is None: return False substore = s.parent.getItemByID(s.idInParent) return s.parent.query(InstalledOffering, InstalledOffering.application == substore ).count() > 0
python
{ "resource": "" }
q38933
RouterHandler.send_file
train
def send_file(self, file): """ Send a file to the client, it is a convenient method to avoid duplicated code """ if self.logger: self.logger.debug("[ioc.extra.tornado.RouterHandler] send file %s" % file) self.send_file_header(file) fp = open(file, 'rb') self.write(fp.read()) fp.close()
python
{ "resource": "" }
q38934
triplify_object
train
def triplify_object(binding): """ Create bi-directional bindings for object relationships. """ triples = [] if binding.uri: triples.append((binding.subject, RDF.type, binding.uri)) if binding.parent is not None: parent = binding.parent.subject if binding.parent.is_array: parent = binding.parent.parent.subject triples.append((parent, binding.predicate, binding.subject)) if binding.reverse is not None: triples.append((binding.subject, binding.reverse, parent)) for prop in binding.properties: _, prop_triples = triplify(prop) triples.extend(prop_triples) return binding.subject, triples
python
{ "resource": "" }
q38935
triplify
train
def triplify(binding): """ Recursively generate RDF statement triples from the data and schema supplied to the application. """ triples = [] if binding.data is None: return None, triples if binding.is_object: return triplify_object(binding) elif binding.is_array: for item in binding.items: _, item_triples = triplify(item) triples.extend(item_triples) return None, triples else: subject = binding.parent.subject triples.append((subject, binding.predicate, binding.object)) if binding.reverse is not None: triples.append((binding.object, binding.reverse, subject)) return subject, triples
python
{ "resource": "" }
q38936
_candidate_type_names
train
def _candidate_type_names(python_type_representation): """Generator which yields possible type names to look up in the conversion dictionary. Parameters ---------- python_type_representation : object Any Python object which represents a type, such as `int`, `dtype('int8')`, `np.int8`, or `"int8"`. """ # if we get a single character code we should normalize to a NumPy type # using np.typeDict, which maps string representations of types to NumPy # type objects if python_type_representation in np.typeDict: python_type_representation = np.typeDict[python_type_representation] yield python_type_representation.__name__ # if we get a dtype object i.e. dtype('int16'), then pull out its name if hasattr(python_type_representation, 'name'): yield python_type_representation.name # convert Python types by adding their type's name if hasattr(python_type_representation, '__name__'): yield python_type_representation.__name__ # for a dtype like dtype('S3') need to access dtype.type.__name__ # to get 'string_' if hasattr(python_type_representation, 'type'): if hasattr(python_type_representation.type, '__name__'): yield python_type_representation.type.__name__ yield str(python_type_representation)
python
{ "resource": "" }
q38937
Droplet.fetch
train
def fetch(self): """ Fetch & return a new `Droplet` object representing the droplet's current state :rtype: Droplet :raises DOAPIError: if the API endpoint replies with an error (e.g., if the droplet no longer exists) """ api = self.doapi_manager return api._droplet(api.request(self.url)["droplet"])
python
{ "resource": "" }
q38938
Droplet.fetch_all_neighbors
train
def fetch_all_neighbors(self): r""" Returns a generator that yields all of the droplets running on the same physical server as the droplet :rtype: generator of `Droplet`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager return map(api._droplet, api.paginate(self.url + '/neighbors', 'droplets'))
python
{ "resource": "" }
q38939
Droplet.fetch_all_snapshots
train
def fetch_all_snapshots(self): r""" Returns a generator that yields all of the snapshot images created from the droplet :rtype: generator of `Image`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager for obj in api.paginate(self.url + '/snapshots', 'snapshots'): yield Image(obj, doapi_manager=api)
python
{ "resource": "" }
q38940
Droplet.fetch_all_backups
train
def fetch_all_backups(self): r""" Returns a generator that yields all of the backup images created from the droplet :rtype: generator of `Image`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager for obj in api.paginate(self.url + '/backups', 'backups'): yield Image(obj, doapi_manager=api)
python
{ "resource": "" }
q38941
Droplet.fetch_all_kernels
train
def fetch_all_kernels(self): r""" Returns a generator that yields all of the kernels available to the droplet :rtype: generator of `Kernel`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager for kern in api.paginate(self.url + '/kernels', 'kernels'): yield Kernel(kern, doapi_manager=api)
python
{ "resource": "" }
q38942
Droplet.restore
train
def restore(self, image): """ Restore the droplet to the specified backup image A Droplet restoration will rebuild an image using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. [APIDocs]_ :param image: an image ID, an image slug, or an `Image` object representing a backup image of the droplet :type image: integer, string, or `Image` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(image, Image): image = image.id return self.act(type='restore', image=image)
python
{ "resource": "" }
q38943
Droplet.resize
train
def resize(self, size, disk=None): """ Resize the droplet :param size: a size slug or a `Size` object representing the size to resize to :type size: string or `Size` :param bool disk: Set to `True` for a permanent resize, including disk changes :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(size, Size): size = size.slug opts = {"disk": disk} if disk is not None else {} return self.act(type='resize', size=size, **opts)
python
{ "resource": "" }
q38944
Droplet.rebuild
train
def rebuild(self, image): """ Rebuild the droplet with the specified image A rebuild action functions just like a new create. [APIDocs]_ :param image: an image ID, an image slug, or an `Image` object representing the image the droplet should use as a base :type image: integer, string, or `Image` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(image, Image): image = image.id return self.act(type='rebuild', image=image)
python
{ "resource": "" }
q38945
Droplet.change_kernel
train
def change_kernel(self, kernel): """ Change the droplet's kernel :param kernel: a kernel ID or `Kernel` object representing the new kernel :type kernel: integer or `Kernel` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error """ if isinstance(kernel, Kernel): kernel = kernel.id return self.act(type='change_kernel', kernel=kernel)
python
{ "resource": "" }
q38946
minter
train
def minter(record_uuid, data, pid_type, key): """Mint PIDs for a record.""" pid = PersistentIdentifier.create( pid_type, data[key], object_type='rec', object_uuid=record_uuid, status=PIDStatus.REGISTERED ) for scheme, identifier in data['identifiers'].items(): if identifier: PersistentIdentifier.create( scheme, identifier, object_type='rec', object_uuid=record_uuid, status=PIDStatus.REGISTERED ) return pid
python
{ "resource": "" }
q38947
date_range
train
def date_range(start_date, end_date, increment, period): """ Generate `date` objects between `start_date` and `end_date` in `increment` `period` intervals. """ next = start_date delta = relativedelta.relativedelta(**{period:increment}) while next <= end_date: yield next next += delta
python
{ "resource": "" }
q38948
send_message
train
def send_message(frm=None, to=None, text=None): """Shortcut to send a sms using libnexmo api. :param frm: The originator of the message :param to: The message's recipient :param text: The text message body Example usage: >>> send_message(to='+33123456789', text='My sms message body') """ assert to is not None assert text is not None if frm is None: frm = settings.NEXMO_DEFAULT_FROM client = nexmo.Client(key=settings.NEXMO_API_KEY, secret=settings.NEXMO_API_SECRET) response = client.send_message({ 'from': frm, 'to': to, 'text': text }) return response
python
{ "resource": "" }
q38949
filtered
train
def filtered(f): ''' Decorator function that wraps functions returning pandas dataframes, such that the dataframe is filtered according to left and right bounds set. ''' def _filter(f, self, *args, **kwargs): frame = f(self, *args, **kwargs) ret = type(self)(frame) ret._lbound = self._lbound ret._rbound = self._rbound return ret if HAS_DECORATOR: return decorator(_filter, f) else: def err_func(*args, **kwargs): raise RuntimeError("`pip install decorator` required") return err_func
python
{ "resource": "" }
q38950
Result.to_datetime
train
def to_datetime(self, column): ''' This function converts epoch timestamps to datetimes. :param column: column to convert from current state -> datetime ''' if column in self: if self[column].dtype in NUMPY_NUMERICAL: self[column] = pd.to_datetime(self[column], unit='s') else: self[column] = pd.to_datetime(self[column], utc=True)
python
{ "resource": "" }
q38951
Result.set_date_bounds
train
def set_date_bounds(self, date): ''' Pass in the date used in the original query. :param date: Date (date range) that was queried: date -> 'd', '~d', 'd~', 'd~d' d -> '%Y-%m-%d %H:%M:%S,%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d' ''' if date is not None: split = date.split('~') if len(split) == 1: self._lbound = ts2dt(date) self._rbound = ts2dt(date) elif len(split) == 2: if split[0] != '': self._lbound = ts2dt(split[0]) if split[1] != '': self._rbound = ts2dt(split[1]) else: raise Exception('Date %s is not in the correct format' % date)
python
{ "resource": "" }
q38952
Result.check_in_bounds
train
def check_in_bounds(self, date): '''Check that left and right bounds are sane :param date: date to validate left/right bounds for ''' dt = Timestamp(date) return ((self._lbound is None or dt >= self._lbound) and (self._rbound is None or dt <= self._rbound))
python
{ "resource": "" }
q38953
Result.on_date
train
def on_date(self, date, only_count=False): ''' Filters out only the rows that match the spectified date. Works only on a Result that has _start and _end columns. :param date: date can be anything Pandas.Timestamp supports parsing :param only_count: return back only the match count ''' if not self.check_in_bounds(date): raise ValueError('Date %s is not in the queried range.' % date) date = Timestamp(date) after_start = self._start <= date before_end = (self._end > date) | self._end_isnull if only_count: return np.sum(before_end & after_start) else: return self.filter(before_end & after_start)
python
{ "resource": "" }
q38954
Result.history
train
def history(self, dates=None, linreg_since=None, lin_reg_days=20): ''' Works only on a Result that has _start and _end columns. :param dates: list of dates to query :param linreg_since: estimate future values using linear regression. :param lin_reg_days: number of past days to use as prediction basis ''' dates = dates or self.get_dates_range() vals = [self.on_date(dt, only_count=True) for dt in dates] ret = Series(vals, index=dates) if linreg_since is not None: ret = self._linreg_future(ret, linreg_since, lin_reg_days) return ret.sort_index()
python
{ "resource": "" }
q38955
Result._linreg_future
train
def _linreg_future(self, series, since, days=20): ''' Predicts future using linear regression. :param series: A series in which the values will be places. The index will not be touched. Only the values on dates > `since` will be predicted. :param since: The starting date from which the future will be predicted. :param days: Specifies how many past days should be used in the linear regression. ''' last_days = pd.date_range(end=since, periods=days) hist = self.history(last_days) xi = np.array(map(dt2ts, hist.index)) A = np.array([xi, np.ones(len(hist))]) y = hist.values w = np.linalg.lstsq(A.T, y)[0] for d in series.index[series.index > since]: series[d] = w[0] * dt2ts(d) + w[1] series[d] = 0 if series[d] < 0 else series[d] return series
python
{ "resource": "" }
q38956
Result.get_dates_range
train
def get_dates_range(self, scale='auto', start=None, end=None, date_max='2010-01-01'): ''' Returns a list of dates sampled according to the specified parameters. :param scale: {'auto', 'maximum', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly'} Scale specifies the sampling intervals. 'auto' will heuristically choose a scale for quick processing :param start: First date that will be included. :param end: Last date that will be included ''' if scale not in ['auto', 'maximum', 'daily', 'weekly', 'monthly', 'quarterly', 'yearly']: raise ValueError('Incorrect scale: %s' % scale) start = Timestamp(start or self._start.min() or date_max) # FIXME: start != start is true for NaN objects... is NaT the same? start = Timestamp(date_max) if repr(start) == 'NaT' else start end = Timestamp(end or max(Timestamp(self._end.max()), self._start.max())) # FIXME: end != end ? end = datetime.utcnow() if repr(end) == 'NaT' else end start = start if self.check_in_bounds(start) else self._lbound end = end if self.check_in_bounds(end) else self._rbound if scale == 'auto': scale = self._auto_select_scale(start, end) if scale == 'maximum': start_dts = list(self._start.dropna().values) end_dts = list(self._end.dropna().values) dts = map(Timestamp, set(start_dts + end_dts)) dts = filter(lambda ts: self.check_in_bounds(ts) and ts >= start and ts <= end, dts) return dts freq = dict(daily='D', weekly='W', monthly='M', quarterly='3M', yearly='12M') offset = dict(daily=off.Day(n=0), weekly=off.Week(), monthly=off.MonthEnd(), quarterly=off.QuarterEnd(), yearly=off.YearEnd()) # for some reason, weekly date range gives one week less: end_ = end + off.Week() if scale == 'weekly' else end ret = list(pd.date_range(start + offset[scale], end_, freq=freq[scale])) ret = [dt for dt in ret if dt <= end] ret = [start] + ret if ret and start < ret[0] else ret ret = ret + [end] if ret and end > ret[-1] else ret ret = filter(lambda ts: self.check_in_bounds(ts), ret) return ret
python
{ "resource": "" }
q38957
Result._auto_select_scale
train
def _auto_select_scale(self, start=None, end=None, ideal=300): ''' Guess what a good timeseries scale might be, given a particular data set, attempting to make the total number of x values as close to `ideal` as possible This is a helper for plotting ''' start = start or self._start.min() end = end or max(self._end.max(), self._start.max()) daily_count = (end - start).days if daily_count <= ideal: return 'daily' elif daily_count / 7 <= ideal: return 'weekly' elif daily_count / 30 <= ideal: return 'monthly' elif daily_count / 91 <= ideal: return 'quarterly' else: return 'yearly'
python
{ "resource": "" }
q38958
Result.filter_oids
train
def filter_oids(self, oids): ''' Leaves only objects with specified oids. :param oids: list of oids to include ''' oids = set(oids) return self[self['_oid'].map(lambda x: x in oids)]
python
{ "resource": "" }
q38959
Result.unfinished_objects
train
def unfinished_objects(self): ''' Leaves only versions of those objects that has some version with `_end == None` or with `_end > right cutoff`. ''' mask = self._end_isnull if self._rbound is not None: mask = mask | (self._end > self._rbound) oids = set(self[mask]._oid.tolist()) return self[self._oid.apply(lambda oid: oid in oids)]
python
{ "resource": "" }
q38960
Result.last_chain
train
def last_chain(self): ''' Leaves only the last chain for each object. Chain is a series of consecutive versions where `_end` of one is `_start` of another. ''' cols = self.columns.tolist() i_oid = cols.index('_oid') i_start = cols.index('_start') i_end = cols.index('_end') start_map = {} end_map = {} for row in self.values: oid = row[i_oid] if oid not in start_map: start_map[oid] = set() end_map[oid] = set() start_map[oid].add(row[i_start]) end_map[oid].add(row[i_end]) cutoffs = {} for oid in start_map: maxend = pd.NaT if pd.NaT in end_map[oid] else max(end_map[oid]) ends = end_map[oid] - start_map[oid] - set([maxend]) cutoffs[oid] = None if len(ends) == 0 else max(ends) vals = [row for row in self.values if cutoffs[row[i_oid]] is None or cutoffs[row[i_oid]] < row[i_start]] return pd.DataFrame(vals, columns=cols)
python
{ "resource": "" }
q38961
Result.one_version
train
def one_version(self, index=0): ''' Leaves only one version for each object. :param index: List-like index of the version. 0 == first; -1 == last ''' def prep(df): start = sorted(df._start.tolist())[index] return df[df._start == start] return pd.concat([prep(df) for _, df in self.groupby(self._oid)])
python
{ "resource": "" }
q38962
Result.started_after
train
def started_after(self, date): ''' Leaves only those objects whose first version started after the specified date. :param date: date string to use in calculation ''' dt = Timestamp(date) starts = self.groupby(self._oid).apply(lambda df: df._start.min()) oids = set(starts[starts > dt].index.tolist()) return self[self._oid.apply(lambda v: v in oids)]
python
{ "resource": "" }
q38963
Result.object_apply
train
def object_apply(self, function): ''' Groups by _oid, then applies the function to each group and finally concatenates the results. :param function: func that takes a DataFrame and returns a DataFrame ''' return pd.concat([function(df) for _, df in self.groupby(self._oid)])
python
{ "resource": "" }
q38964
IIIVZincBlendeQuaternary._has_x
train
def _has_x(self, kwargs): '''Returns True if x is explicitly defined in kwargs''' return (('x' in kwargs) or (self._element_x in kwargs) or (self._type == 3 and self._element_1mx in kwargs))
python
{ "resource": "" }
q38965
IIIVZincBlendeQuaternary._get_x
train
def _get_x(self, kwargs): ''' Returns x if it is explicitly defined in kwargs. Otherwise, raises TypeError. ''' if 'x' in kwargs: return round(float(kwargs['x']), 6) elif self._element_x in kwargs: return round(float(kwargs[self._element_x]), 6) elif self._type == 3 and self._element_1mx in kwargs: return round(1. - float(kwargs[self._element_1mx]), 6) else: raise TypeError()
python
{ "resource": "" }
q38966
IIIVZincBlendeQuaternary._has_y
train
def _has_y(self, kwargs): '''Returns True if y is explicitly defined in kwargs''' return (('y' in kwargs) or (self._element_y in kwargs) or (self._type == 3 and self._element_1my in kwargs))
python
{ "resource": "" }
q38967
IIIVZincBlendeQuaternary._get_y
train
def _get_y(self, kwargs): ''' Returns y if it is explicitly defined in kwargs. Otherwise, raises TypeError. ''' if 'y' in kwargs: return round(float(kwargs['y']), 6) elif self._element_y in kwargs: return round(float(kwargs[self._element_y]), 6) elif self._type == 3 and self._element_1my in kwargs: return round(1. - float(kwargs[self._element_1my]), 6) else: raise TypeError()
python
{ "resource": "" }
q38968
IIIVZincBlendeQuaternary._has_z
train
def _has_z(self, kwargs): ''' Returns True if type is 1 or 2 and z is explicitly defined in kwargs. ''' return ((self._type == 1 or self._type ==2) and (('z' in kwargs) or (self._element_z in kwargs)))
python
{ "resource": "" }
q38969
IIIVZincBlendeQuaternary._get_z
train
def _get_z(self, kwargs): ''' Returns z if type is 1 or 2 and z is explicitly defined in kwargs. Otherwise, raises TypeError. ''' if self._type == 1 or self._type == 2: if 'z' in kwargs: return round(float(kwargs['z']), 6) elif self._element_z in kwargs: return round(float(kwargs[self._element_z]), 6) raise TypeError()
python
{ "resource": "" }
q38970
DatabaseObject.db
train
def db(cls, path=None): """ Returns a pymongo Collection object from the current database connection. If the database connection is in test mode, collection will be in the test database. @param path: if is None, the PATH attribute of the current class is used; if is not None, this is used instead @raise Exception: if neither cls.PATH or path are valid """ if cls.PATH is None and path is None: raise Exception("No database specified") if path is None: path = cls.PATH if "." not in path: raise Exception(('invalid path "%s"; database paths must be ' + 'of the form "database.collection"') % (path,)) if CONNECTION.test_mode: return CONNECTION.get_connection()[TEST_DATABASE_NAME][path] (db, coll) = path.split('.', 1) return CONNECTION.get_connection()[db][coll]
python
{ "resource": "" }
q38971
DatabaseObject.rename
train
def rename(self, new_id): """ Renames the DatabaseObject to have ID_KEY new_id. This is the only way allowed by DatabaseObject to change the ID_KEY of an object. Trying to modify ID_KEY in the dictionary will raise an exception. @param new_id: the new value for ID_KEY NOTE: This is actually a create and delete. WARNING: If the system fails during a rename, data may be duplicated. """ old_id = dict.__getitem__(self, ID_KEY) dict.__setitem__(self, ID_KEY, new_id) self._collection.save(self) self._collection.remove({ID_KEY: old_id})
python
{ "resource": "" }
q38972
DatabaseObject.copy
train
def copy(self, new_id=None, attribute_overrides={}): """ Copies the DatabaseObject under the ID_KEY new_id. @param new_id: the value for ID_KEY of the copy; if this is none, creates the new object with a random ID_KEY @param attribute_overrides: dictionary of attribute names -> values that you would like to override with. """ data = dict(self) data.update(attribute_overrides) if new_id is not None: data[ID_KEY] = new_id return self.create(data, path=self.PATH) else: del data[ID_KEY] return self.create(data, random_id=True, path=self.PATH)
python
{ "resource": "" }
q38973
raxml_alignment
train
def raxml_alignment(align_obj, raxml_model="GTRCAT", params={}, SuppressStderr=True, SuppressStdout=True): """Run raxml on alignment object align_obj: Alignment object params: you can set any params except -w and -n returns: tuple (phylonode, parsimonyphylonode, log likelihood, total exec time) """ # generate temp filename for output params["-w"] = "/tmp/" params["-n"] = get_tmp_filename().split("/")[-1] params["-m"] = raxml_model params["-p"] = randint(1,100000) ih = '_input_as_multiline_string' seqs, align_map = align_obj.toPhylip() #print params["-n"] # set up command raxml_app = Raxml( params=params, InputHandler=ih, WorkingDir=None, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) # run raxml ra = raxml_app(seqs) # generate tree tree_node = DndParser(ra["Result"]) # generate parsimony tree parsimony_tree_node = DndParser(ra["ParsimonyTree"]) # extract log likelihood from log file log_file = ra["Log"] total_exec_time = exec_time = log_likelihood = 0.0 for line in log_file: exec_time, log_likelihood = map(float, line.split()) total_exec_time += exec_time # remove output files ra.cleanUp() return tree_node, parsimony_tree_node, log_likelihood, total_exec_time
python
{ "resource": "" }
q38974
insert_sequences_into_tree
train
def insert_sequences_into_tree(seqs, moltype, params={}, write_log=True): """Insert sequences into Tree. aln: an xxx.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object params: dict of parameters to pass in to the RAxML app controller. The result will be a tree. """ ih = '_input_as_multiline_string' raxml_app = Raxml(params=params, InputHandler=ih, WorkingDir=None, SuppressStderr=False, SuppressStdout=False, HALT_EXEC=False) raxml_result = raxml_app(seqs) # write a log file if write_log: log_fp = join(params["-w"],'log_raxml_'+split(get_tmp_filename())[-1]) log_file=open(log_fp,'w') log_file.write(raxml_result['StdOut'].read()) log_file.close() ''' # getting setup since parsimony doesn't output tree..only jplace, however # it is currently corrupt # use guppy to convert json file into a placement tree guppy_params={'tog':None} new_tree=build_tree_from_json_using_params(raxml_result['json'].name, \ output_dir=params["-w"], \ params=guppy_params) ''' # get tree from 'Result Names' new_tree=raxml_result['Result'].readlines() filtered_tree=re.sub('\[I\d+\]','',str(new_tree)) tree = DndParser(filtered_tree, constructor=PhyloNode) raxml_result.cleanUp() return tree
python
{ "resource": "" }
q38975
Raxml._format_output
train
def _format_output(self, outfile_name, out_type): """ Prepend proper output prefix to output filename """ outfile_name = self._absolute(outfile_name) outparts = outfile_name.split("/") outparts[-1] = self._out_format % (out_type, outparts[-1] ) return '/'.join(outparts)
python
{ "resource": "" }
q38976
Raxml._checkpoint_out_filenames
train
def _checkpoint_out_filenames(self): """ RAxML generates a crapload of checkpoint files so need to walk directory to collect names of all of them. """ out_filenames = [] if self.Parameters['-n'].isOn(): out_name = str(self.Parameters['-n'].Value) walk_root = self.WorkingDir if self.Parameters['-w'].isOn(): walk_root = str(self.Parameters['-w'].Value) for tup in walk(walk_root): dpath, dnames, dfiles = tup if dpath == walk_root: for gen_file in dfiles: if out_name in gen_file and "checkpoint" in gen_file: out_filenames.append(walk_root + gen_file) break else: raise ValueError, "No output file specified." return out_filenames
python
{ "resource": "" }
q38977
Raxml._handle_app_result_build_failure
train
def _handle_app_result_build_failure(self,out,err,exit_status,result_paths): """ Catch the error when files are not produced """ try: raise ApplicationError, \ 'RAxML failed to produce an output file due to the following error: \n\n%s ' \ % err.read() except: raise ApplicationError,\ 'RAxML failed to run properly.'
python
{ "resource": "" }
q38978
Queue.names
train
def names(self): """ Returns a list of queues available, ``None`` if no such queues found. Remember this will only shows queues with at least one item enqueued. """ data = None if not self.connected: raise ConnectionError('Queue is not connected') try: data = self.rdb.keys("retaskqueue-*") except redis.exceptions.ConnectionError as err: raise ConnectionError(str(err)) return [name[12:] for name in data]
python
{ "resource": "" }
q38979
Queue.length
train
def length(self): """ Gives the length of the queue. Returns ``None`` if the queue is not connected. If the queue is not connected then it will raise :class:`retask.ConnectionError`. """ if not self.connected: raise ConnectionError('Queue is not connected') try: length = self.rdb.llen(self._name) except redis.exceptions.ConnectionError as err: raise ConnectionError(str(err)) return length
python
{ "resource": "" }
q38980
Queue.connect
train
def connect(self): """ Creates the connection with the redis server. Return ``True`` if the connection works, else returns ``False``. It does not take any arguments. :return: ``Boolean`` value .. note:: After creating the ``Queue`` object the user should call the ``connect`` method to create the connection. .. doctest:: >>> from retask import Queue >>> q = Queue('test') >>> q.connect() True """ config = self.config self.rdb = redis.Redis(config['host'], config['port'], config['db'],\ config['password']) try: info = self.rdb.info() self.connected = True except redis.ConnectionError: return False return True
python
{ "resource": "" }
q38981
Queue.send
train
def send(self, task, result, expire=60): """ Sends the result back to the producer. This should be called if only you want to return the result in async manner. :arg task: ::class:`~retask.task.Task` object :arg result: Result data to be send back. Should be in JSON serializable. :arg expire: Time in seconds after the key expires. Default is 60 seconds. """ self.rdb.lpush(task.urn, json.dumps(result)) self.rdb.expire(task.urn, expire)
python
{ "resource": "" }
q38982
Queue.find
train
def find(self, obj): """Returns the index of the given object in the queue, it might be string which will be searched inside each task. :arg obj: object we are looking :return: -1 if the object is not found or else the location of the task """ if not self.connected: raise ConnectionError('Queue is not connected') data = self.rdb.lrange(self._name, 0, -1) for i, datum in enumerate(data): if datum.find(str(obj)) != -1: return i return -1
python
{ "resource": "" }
q38983
Job.result
train
def result(self): """ Returns the result from the worker for this job. This is used to pass result in async way. """ if self.__result: return self.__result data = self.rdb.rpop(self.urn) if data: self.rdb.delete(self.urn) data = json.loads(data) self.__result = data return data else: return None
python
{ "resource": "" }
q38984
Job.wait
train
def wait(self, wait_time=0): """ Blocking call to check if the worker returns the result. One can use job.result after this call returns ``True``. :arg wait_time: Time in seconds to wait, default is infinite. :return: `True` or `False`. .. note:: This is a blocking call, you can specity wait_time argument for timeout. """ if self.__result: return True data = self.rdb.brpop(self.urn, wait_time) if data: self.rdb.delete(self.urn) data = json.loads(data[1]) self.__result = data return True else: return False
python
{ "resource": "" }
q38985
PushConnection.messages_in_flight
train
def messages_in_flight(self): """ Returns True if there are messages waiting to be sent or that we're still waiting to see if errors occur for. """ self.prune_sent() if not self.send_queue.empty() or len(self.sent) > 0: return True return False
python
{ "resource": "" }
q38986
SQLAlchemyMiddleware.db
train
def db(self, connection_string=None): """Gets the SQLALchemy session for this request""" connection_string = connection_string or self.settings["db"] if not hasattr(self, "_db_conns"): self._db_conns = {} if not connection_string in self._db_conns: self._db_conns[connection_string] = oz.sqlalchemy.session(connection_string=connection_string) return self._db_conns[connection_string]
python
{ "resource": "" }
q38987
SQLAlchemyMiddleware._sqlalchemy_on_finish
train
def _sqlalchemy_on_finish(self): """ Closes the sqlalchemy transaction. Rolls back if an error occurred. """ if hasattr(self, "_db_conns"): try: if self.get_status() >= 200 and self.get_status() <= 399: for db_conn in self._db_conns.values(): db_conn.commit() else: for db_conn in self._db_conns.values(): db_conn.rollback() except: tornado.log.app_log.warning("Error occurred during database transaction cleanup: %s", str(sys.exc_info()[0])) raise finally: for db_conn in self._db_conns.values(): try: db_conn.close() except: tornado.log.app_log.warning("Error occurred when closing the database connection", exc_info=True)
python
{ "resource": "" }
q38988
SQLAlchemyMiddleware._sqlalchemy_on_connection_close
train
def _sqlalchemy_on_connection_close(self): """ Rollsback and closes the active session, since the client disconnected before the request could be completed. """ if hasattr(self, "_db_conns"): try: for db_conn in self._db_conns.values(): db_conn.rollback() except: tornado.log.app_log.warning("Error occurred during database transaction cleanup: %s", str(sys.exc_info()[0])) raise finally: for db_conn in self._db_conns.values(): try: db_conn.close() except: tornado.log.app_log.warning("Error occurred when closing the database connection", exc_info=True)
python
{ "resource": "" }
q38989
use_settings
train
def use_settings(**kwargs): ''' Context manager to temporarily override settings ''' from omnic import singletons singletons.settings.use_settings_dict(kwargs) yield singletons.settings.use_previous_settings()
python
{ "resource": "" }
q38990
GitHub2GitLab.add_key
train
def add_key(self): "Add ssh key to gitlab if necessary" try: with open(self.args.ssh_public_key) as f: public_key = f.read().strip() except: log.debug("No key found in {}".format(self.args.ssh_public_key)) return None g = self.gitlab url = g['url'] + "/user/keys" query = {'private_token': g['token']} keys = requests.get(url, params=query).json() log.debug("looking for '" + public_key + "' in " + str(keys)) if (list(filter(lambda key: key['key'] == public_key, keys))): log.debug(self.args.ssh_public_key + " already exists") return None else: name = 'github2gitlab' log.info("add " + name + " ssh public key from " + self.args.ssh_public_key) query['title'] = name query['key'] = public_key result = requests.post(url, query) if result.status_code != requests.codes.created: log.warn('Key {} already in GitLab. ' 'Possible under a different user. Skipping...' .format(self.args.ssh_public_key)) return public_key
python
{ "resource": "" }
q38991
GitHub2GitLab.add_project
train
def add_project(self): "Create project in gitlab if it does not exist" g = self.gitlab url = g['url'] + "/projects/" + g['repo'] query = {'private_token': g['token']} if (requests.get(url, params=query).status_code == requests.codes.ok): log.debug("project " + url + " already exists") return None else: log.info("add project " + g['repo']) url = g['url'] + "/projects" query['public'] = 'true' query['namespace'] = g['namespace'] query['name'] = g['name'] result = requests.post(url, params=query) if result.status_code != requests.codes.created: raise ValueError(result.text) log.debug("project " + g['repo'] + " added: " + result.text) return result.json()
python
{ "resource": "" }
q38992
GitHub2GitLab.unprotect_branches
train
def unprotect_branches(self): "Unprotect branches of the GitLab project" g = self.gitlab url = g['url'] + "/projects/" + g['repo'] + "/repository/branches" query = {'private_token': g['token']} unprotected = 0 r = requests.get(url, params=query) r.raise_for_status() for branch in r.json(): if branch['protected']: r = requests.put(url + "/" + branch['name'] + "/unprotect", params=query) r.raise_for_status() unprotected += 1 return unprotected
python
{ "resource": "" }
q38993
GitHub2GitLab.json_loads
train
def json_loads(payload): "Log the payload that cannot be parsed" try: return json.loads(payload) except ValueError as e: log.error("unable to json.loads(" + payload + ")") raise e
python
{ "resource": "" }
q38994
SeqPrep._unassembled_reads1_out_file_name
train
def _unassembled_reads1_out_file_name(self): """Checks file name is set for reads1 output. Returns absolute path.""" if self.Parameters['-1'].isOn(): unassembled_reads1 = self._absolute( str(self.Parameters['-1'].Value)) else: raise ValueError("No reads1 (flag: -1) output path specified") return unassembled_reads1
python
{ "resource": "" }
q38995
SeqPrep._unassembled_reads2_out_file_name
train
def _unassembled_reads2_out_file_name(self): """Checks if file name is set for reads2 output. Returns absolute path.""" if self.Parameters['-2'].isOn(): unassembled_reads2 = self._absolute( str(self.Parameters['-2'].Value)) else: raise ValueError("No reads2 (flag -2) output path specified") return unassembled_reads2
python
{ "resource": "" }
q38996
SeqPrep._discarded_reads1_out_file_name
train
def _discarded_reads1_out_file_name(self): """Checks if file name is set for discarded reads1 output. Returns absolute path.""" if self.Parameters['-3'].isOn(): discarded_reads1 = self._absolute(str(self.Parameters['-3'].Value)) else: raise ValueError( "No discarded-reads1 (flag -3) output path specified") return discarded_reads1
python
{ "resource": "" }
q38997
SeqPrep._discarded_reads2_out_file_name
train
def _discarded_reads2_out_file_name(self): """Checks if file name is set for discarded reads2 output. Returns absolute path.""" if self.Parameters['-4'].isOn(): discarded_reads2 = self._absolute(str(self.Parameters['-4'].Value)) else: raise ValueError( "No discarded-reads2 (flag -4) output path specified") return discarded_reads2
python
{ "resource": "" }
q38998
SeqPrep._assembled_out_file_name
train
def _assembled_out_file_name(self): """Checks file name is set for assembled output. Returns absolute path.""" if self.Parameters['-s'].isOn(): assembled_reads = self._absolute(str(self.Parameters['-s'].Value)) else: raise ValueError( "No assembled-reads (flag -s) output path specified") return assembled_reads
python
{ "resource": "" }
q38999
SeqPrep._pretty_alignment_out_file_name
train
def _pretty_alignment_out_file_name(self): """Checks file name is set for pretty alignment output. Returns absolute path.""" if self.Parameters['-E'].isOn(): pretty_alignment = self._absolute(str(self.Parameters['-E'].Value)) else: raise ValueError( "No pretty-=alignment (flag -E) output path specified") return pretty_alignment
python
{ "resource": "" }