_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q16700
PayuManifest.make_link
train
def make_link(self, filepath): """ Payu integration function for creating symlinks in work directories which point back to the original file. """ # Check file exists. It may have been deleted but still in manifest if not os.path.exists(self.fullpath(filepath)): print('File not found: {filepath}'.format( filepath=self.fullpath(filepath))) if self.contains(filepath): print('removing from manifest') self.delete(filepath) self.needsync = True else: try: destdir = os.path.dirname(filepath) # Make destination directory if not already exists # Necessary because sometimes this is called before # individual model setup if not os.path.exists(destdir): os.makedirs(destdir) if self.copy_file(filepath): shutil.copy(self.fullpath(filepath), filepath) perm = (stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR) os.chmod(filepath, perm) else: make_symlink(self.fullpath(filepath), filepath) except Exception: action = 'copying' if self.copy_file else 'linking' print('payu: error: {action} orig: {orig} ' 'local: {local}'.format(action=action, orig=self.fullpath(filepath), local=filepath)) raise
python
{ "resource": "" }
q16701
Manifest.add_filepath
train
def add_filepath(self, manifest, filepath, fullpath, copy=False): """ Wrapper to the add_filepath function in PayuManifest. Prevents outside code from directly calling anything in PayuManifest. """ filepath = os.path.normpath(filepath) if self.manifests[manifest].add_filepath(filepath, fullpath, copy): # Only link if filepath was added self.manifests[manifest].make_link(filepath)
python
{ "resource": "" }
q16702
commit_hash
train
def commit_hash(dir='.'): """ Return commit hash for HEAD of checked out branch of the specified directory. """ cmd = ['git', 'rev-parse', 'HEAD'] try: with open(os.devnull, 'w') as devnull: revision_hash = subprocess.check_output( cmd, cwd=dir, stderr=devnull ) if sys.version_info.major > 2: revision_hash = revision_hash.decode('ascii') return revision_hash.strip() except subprocess.CalledProcessError: return None
python
{ "resource": "" }
q16703
Runlog.create_manifest
train
def create_manifest(self): """Construct the list of files to be tracked by the runlog.""" config_path = os.path.join(self.expt.control_path, DEFAULT_CONFIG_FNAME) self.manifest = [] if os.path.isfile(config_path): self.manifest.append(config_path) for model in self.expt.models: config_files = model.config_files + model.optional_config_files self.manifest.extend(os.path.join(model.control_path, f) for f in config_files) # Add file manifests to runlog manifest for mf in self.expt.manifest: self.manifest.append(mf.path)
python
{ "resource": "" }
q16704
Runlog.push
train
def push(self): """Push the changes to the remote repository. Usage: payu push This command pushes local runlog changes to the remote runlog repository, currently named `payu`, using the SSH key associated with this experiment. For an experiment `test`, it is equivalent to the following command:: ssh-agent bash -c " ssh-add $HOME/.ssh/payu/id_rsa_payu_test git push --all payu " """ expt_name = self.config.get('name', self.expt.name) default_ssh_key = 'id_rsa_payu_' + expt_name ssh_key = self.config.get('sshid', default_ssh_key) ssh_key_path = os.path.join(os.path.expanduser('~'), '.ssh', 'payu', ssh_key) if not os.path.isfile(ssh_key_path): print('payu: error: Github SSH key {key} not found.' ''.format(key=ssh_key_path)) print('payu: error: Run `payu ghsetup` to generate a new key.') sys.exit(-1) cmd = ('ssh-agent bash -c "ssh-add {key}; git push --all payu"' ''.format(key=ssh_key_path)) subprocess.check_call(shlex.split(cmd), cwd=self.expt.control_path)
python
{ "resource": "" }
q16705
add_expiration_postfix
train
def add_expiration_postfix(expiration): """ Formats the expiration version and adds a version postfix if needed. :param expiration: the expiration version string. :return: the modified expiration string. """ if re.match(r'^[1-9][0-9]*$', expiration): return expiration + ".0a1" if re.match(r'^[1-9][0-9]*\.0$', expiration): return expiration + "a1" return expiration
python
{ "resource": "" }
q16706
load_yaml_file
train
def load_yaml_file(filename): """ Load a YAML file from disk, throw a ParserError on failure.""" try: with open(filename, 'r') as f: return yaml.safe_load(f) except IOError as e: raise ParserError('Error opening ' + filename + ': ' + e.message) except ValueError as e: raise ParserError('Error parsing processes in {}: {}' .format(filename, e.message))
python
{ "resource": "" }
q16707
StringTable.writeDefinition
train
def writeDefinition(self, f, name): """Writes the string table to a file as a C const char array. This writes out the string table as one single C char array for memory size reasons, separating the individual strings with '\0' characters. This way we can index directly into the string array and avoid the additional storage costs for the pointers to them (and potential extra relocations for those). :param f: the output stream. :param name: the name of the output array. """ entries = self.table.items() entries.sort(key=lambda x: x[1]) # Avoid null-in-string warnings with GCC and potentially # overlong string constants; write everything out the long way. def explodeToCharArray(string): def toCChar(s): if s == "'": return "'\\''" else: return "'%s'" % s return ", ".join(map(toCChar, string)) f.write("const char %s[] = {\n" % name) for (string, offset) in entries: if "*/" in string: raise ValueError("String in string table contains unexpected sequence '*/': %s" % string) e = explodeToCharArray(string) if e: f.write(" /* %5d - \"%s\" */ %s, '\\0',\n" % (offset, string, explodeToCharArray(string))) else: f.write(" /* %5d - \"%s\" */ '\\0',\n" % (offset, string)) f.write("};\n\n")
python
{ "resource": "" }
q16708
comments
train
def comments(context, obj): """ Render comments for obj. """ content_type = ContentType.objects.get_for_model(obj.__class__) comment_list = LogEntry.objects.filter( content_type=content_type, object_id=obj.pk, action_flag=COMMENT ) return { 'obj': obj, 'comment_list': comment_list, 'is_admin': context['is_admin'], }
python
{ "resource": "" }
q16709
get_disk_quota
train
def get_disk_quota(username, machine_name=None): """ Returns disk quota for username in KB """ try: ua = Account.objects.get( username=username, date_deleted__isnull=True) except Account.DoesNotExist: return 'Account not found' result = ua.get_disk_quota() if result is None: return False return result * 1048576
python
{ "resource": "" }
q16710
snap_to_beginning_of_week
train
def snap_to_beginning_of_week(day, weekday_start="Sunday"): """ Get the first day of the current week. :param day: The input date to snap. :param weekday_start: Either "Monday" or "Sunday", indicating the first day of the week. :returns: A date representing the first day of the current week. """ delta_days = ((day.weekday() + 1) % 7) if weekday_start is "Sunday" else day.weekday() return day - timedelta(days=delta_days)
python
{ "resource": "" }
q16711
get_last_week_range
train
def get_last_week_range(weekday_start="Sunday"): """ Gets the date for the first and the last day of the previous complete week. :param weekday_start: Either "Monday" or "Sunday", indicating the first day of the week. :returns: A tuple containing two date objects, for the first and the last day of the week respectively. """ today = date.today() # Get the first day of the past complete week. start_of_week = snap_to_beginning_of_week(today, weekday_start) - timedelta(weeks=1) end_of_week = start_of_week + timedelta(days=6) return (start_of_week, end_of_week)
python
{ "resource": "" }
q16712
get_last_month_range
train
def get_last_month_range(): """ Gets the date for the first and the last day of the previous complete month. :returns: A tuple containing two date objects, for the first and the last day of the month respectively. """ today = date.today() # Get the last day for the previous month. end_of_last_month = snap_to_beginning_of_month(today) - timedelta(days=1) start_of_last_month = snap_to_beginning_of_month(end_of_last_month) return (start_of_last_month, end_of_last_month)
python
{ "resource": "" }
q16713
read_main_summary
train
def read_main_summary(spark, submission_date_s3=None, sample_id=None, mergeSchema=True, path='s3://telemetry-parquet/main_summary/v4'): """ Efficiently read main_summary parquet data. Read data from the given path, optionally filtering to a specified set of partition values first. This can save a time, particularly if `mergeSchema` is True. Args: spark: Spark session submission_date_s3: Optional list of values to filter the `submission_date_s3` partition. Default is to read all partitions. Each value should be in the form `YYYYMMDD`. sample_id: Optional list of values to filter the `sample_id` partition. Default is to read all partitions. mergeSchema (bool): Determines whether or not to merge the schemas of the resulting parquet files (ie. whether to support schema evolution or not). Default is to merge schemas. path (str): Location (disk or S3) from which to read data. Default is to read from the "production" location on S3. Returns: A DataFrame loaded from the specified partitions. """ base_path = path # Specifying basePath retains the partition fields even # if we read a bunch of paths separately. reader = spark.read.option("basePath", base_path) if mergeSchema: reader = reader.option("mergeSchema", "true") if submission_date_s3 is not None and sample_id is None: paths = ["{}/submission_date_s3={}/".format(base_path, s) for s in submission_date_s3] return reader.parquet(*paths) if submission_date_s3 is not None and sample_id is not None: paths = [] for sd in submission_date_s3: for si in sample_id: paths.append("{}/submission_date_s3={}/sample_id={}/".format( base_path, sd, si)) return reader.parquet(*paths) if submission_date_s3 is None and sample_id is not None: # Ugh, why? We would have to iterate the entire path to identify # all the submission_date_s3 partitions, which may end up being # slower. data = reader.parquet(base_path) sids = ["{}".format(s) for s in sample_id] criteria = "sample_id IN ({})".format(",".join(sids)) return data.where(criteria) # Neither partition is filtered. return reader.parquet(base_path)
python
{ "resource": "" }
q16714
sampler
train
def sampler(dataframe, modulo, column="client_id", sample_id=42): """ Collect a sample of clients given an input column Filter dataframe based on the modulus of the CRC32 of a given string column matching a given sample_id. if dataframe has already been filtered by sample_id, then modulo should be a multiple of 100, column should be "client_id", and the given sample_id should match the value previously used, optionally plus multiples of 100. Args: dataframe: A Dataframe to be sampled modulo (int): selects a 1/modulo sampling of dataframe column (str): name of a string column to sample on sample_id (int): modulus result to select for sampling Returns: A DataFrame sampled on the given inputs. """ return dataframe \ .withColumn( "sampler", udf(lambda key: (crc32(key or "") & 0xffffffff) % modulo)(column), ).where("sampler = %s" % sample_id).drop("sampler")
python
{ "resource": "" }
q16715
progress
train
def progress(request): """ Check status of task. """ if 'delete' in request.GET: models.MachineCache.objects.all().delete() models.InstituteCache.objects.all().delete() models.PersonCache.objects.all().delete() models.ProjectCache.objects.all().delete() return render( template_name='main.html', context={'content': 'Deleted'}, request=request) if request.method == 'POST': if 'task_id' in request.POST: result = Task.AsyncResult(request.POST['task_id']) if result.failed(): value = { 'info': {}, 'ready': result.ready(), } else: value = { 'info': result.info, 'ready': result.ready(), } return HttpResponse( json.dumps(value), content_type="application/json") return None
python
{ "resource": "" }
q16716
synchronise
train
def synchronise(func): """ If task already queued, running, or finished, don't restart. """ def inner(request, *args): lock_id = '%s-%s-built-%s' % ( datetime.date.today(), func.__name__, ",".join([str(a) for a in args])) if cache.add(lock_id, 'true', LOCK_EXPIRE): result = func(request, *args) cache.set(lock_id, result.task_id) else: task_id = cache.get(lock_id) if not task_id: return None cache.set(lock_id, "") result = Task.AsyncResult(task_id) if result.ready(): result.forget() return None return result return inner
python
{ "resource": "" }
q16717
_group_by_size_greedy
train
def _group_by_size_greedy(obj_list, tot_groups): """Partition a list of objects in even buckets The idea is to choose the bucket for an object in a round-robin fashion. The list of objects is sorted to also try to keep the total size in bytes as balanced as possible. :param obj_list: a list of dict-like objects with a 'size' property :param tot_groups: number of partitions to split the data into. :return: a list of lists, one for each partition. """ sorted_list = sorted(obj_list, key=lambda x: x['size'], reverse=True) groups = [[] for _ in range(tot_groups)] for index, obj in enumerate(sorted_list): current_group = groups[index % len(groups)] current_group.append(obj) return groups
python
{ "resource": "" }
q16718
_group_by_equal_size
train
def _group_by_equal_size(obj_list, tot_groups, threshold=pow(2, 32)): """Partition a list of objects evenly and by file size Files are placed according to largest file in the smallest bucket. If the file is larger than the given threshold, then it is placed in a new bucket by itself. :param obj_list: a list of dict-like objects with a 'size' property :param tot_groups: number of partitions to split the data :param threshold: the maximum size of each bucket :return: a list of lists, one for each partition """ sorted_obj_list = sorted([(obj['size'], obj) for obj in obj_list], reverse=True) groups = [(random.random(), []) for _ in range(tot_groups)] if tot_groups <= 1: groups = _group_by_size_greedy(obj_list, tot_groups) return groups heapq.heapify(groups) for obj in sorted_obj_list: if obj[0] > threshold: heapq.heappush(groups, (obj[0], [obj[1]])) else: size, files = heapq.heappop(groups) size += obj[0] files.append(obj[1]) heapq.heappush(groups, (size, files)) groups = [group[1] for group in groups] return groups
python
{ "resource": "" }
q16719
Dataset.select
train
def select(self, *properties, **aliased_properties): """Specify which properties of the dataset must be returned Property extraction is based on `JMESPath <http://jmespath.org>`_ expressions. This method returns a new Dataset narrowed down by the given selection. :param properties: JMESPath to use for the property extraction. The JMESPath string will be used as a key in the output dictionary. :param aliased_properties: Same as properties, but the output dictionary will contain the parameter name instead of the JMESPath string. """ if not (properties or aliased_properties): return self merged_properties = dict(zip(properties, properties)) merged_properties.update(aliased_properties) for prop_name in (merged_properties.keys()): if prop_name in self.selection: raise Exception('The property {} has already been selected'.format(prop_name)) new_selection = self.selection.copy() new_selection.update(merged_properties) return self._copy(selection=new_selection)
python
{ "resource": "" }
q16720
Dataset.where
train
def where(self, **kwargs): """Return a new Dataset refined using the given condition :param kwargs: a map of `dimension` => `condition` to filter the elements of the dataset. `condition` can either be an exact value or a callable returning a boolean value. If `condition` is a value, it is converted to a string, then sanitized. If `condition` is a callable, note that it will be passed sanitized values -- i.e., characters outside [a-zA-Z0-9_.] are converted to `_`. """ clauses = copy(self.clauses) for dimension, condition in kwargs.items(): if dimension in self.clauses: raise Exception('There should be only one clause for {}'.format(dimension)) if dimension not in self.schema: raise Exception('The dimension {} doesn\'t exist'.format(dimension)) if isfunction(condition) or isinstance(condition, functools.partial): clauses[dimension] = condition else: clauses[dimension] = functools.partial((lambda x, y: x == y), self._sanitize_dimension(str(condition))) return self._copy(clauses=clauses)
python
{ "resource": "" }
q16721
Dataset.summaries
train
def summaries(self, sc, limit=None): """Summary of the files contained in the current dataset Every item in the summary is a dict containing a key name and the corresponding size of the key item in bytes, e.g.:: {'key': 'full/path/to/my/key', 'size': 200} :param limit: Max number of objects to retrieve :return: An iterable of summaries """ clauses = copy(self.clauses) schema = self.schema if self.prefix: schema = ['prefix'] + schema # Add a clause for the prefix that always returns True, in case # the output is not filtered at all (so that we do a scan/filter # on the prefix directory) clauses['prefix'] = lambda x: True with futures.ThreadPoolExecutor(self.max_concurrency) as executor: scanned = self._scan(schema, [self.prefix], clauses, executor) keys = sc.parallelize(scanned).flatMap(self.store.list_keys) return keys.take(limit) if limit else keys.collect()
python
{ "resource": "" }
q16722
Dataset.records
train
def records(self, sc, group_by='greedy', limit=None, sample=1, seed=42, decode=None, summaries=None): """Retrieve the elements of a Dataset :param sc: a SparkContext object :param group_by: specifies a partition strategy for the objects :param limit: maximum number of objects to retrieve :param decode: an optional transformation to apply to the objects retrieved :param sample: percentage of results to return. Useful to return a sample of the dataset. This parameter is ignored when `limit` is set. :param seed: initialize internal state of the random number generator (42 by default). This is used to make the dataset sampling reproducible. It can be set to None to obtain different samples. :param summaries: an iterable containing a summary for each item in the dataset. If None, it will computed calling the summaries dataset. :return: a Spark rdd containing the elements retrieved """ decode = decode or message_parser.parse_heka_message summaries = summaries or self.summaries(sc, limit) # Calculate the sample if summaries is not empty and limit is not set if summaries and limit is None and sample != 1: if sample < 0 or sample > 1: raise ValueError('sample must be between 0 and 1') print( "WARNING: THIS IS NOT A REPRESENTATIVE SAMPLE.\n" "This 'sampling' is based on s3 files and is highly\n" "susceptible to skew. Use only for quicker performance\n" "while prototyping." ) # We want this sample to be reproducible. # See https://bugzilla.mozilla.org/show_bug.cgi?id=1318681 seed_state = random.getstate() try: random.seed(seed) summaries = random.sample(summaries, int(len(summaries) * sample)) finally: random.setstate(seed_state) # Obtain size in MB total_size = functools.reduce(lambda acc, item: acc + item['size'], summaries, 0) total_size_mb = total_size / float(1 << 20) print("fetching %.5fMB in %s files..." % (total_size_mb, len(summaries))) if group_by == 'equal_size': groups = _group_by_equal_size(summaries, 10*sc.defaultParallelism) elif group_by == 'greedy': groups = _group_by_size_greedy(summaries, 10*sc.defaultParallelism) else: raise Exception("group_by specification is invalid") self._compile_selection() keys = ( sc.parallelize(groups, len(groups)) .flatMap(lambda x: x) .map(lambda x: x['key']) ) file_handles = keys.map(self.store.get_key) # decode(fp: file-object) -> list[dict] data = file_handles.flatMap(decode) return data.map(self._apply_selection)
python
{ "resource": "" }
q16723
Dataset.dataframe
train
def dataframe(self, spark, group_by='greedy', limit=None, sample=1, seed=42, decode=None, summaries=None, schema=None, table_name=None): """Convert RDD returned from records function to a dataframe :param spark: a SparkSession object :param group_by: specifies a paritition strategy for the objects :param limit: maximum number of objects to retrieve :param decode: an optional transformation to apply to the objects retrieved :param sample: percentage of results to return. Useful to return a sample of the dataset. This parameter is ignored when 'limit' is set. :param seed: initialize internal state of the random number generator (42 by default). This is used to make the dataset sampling reproducible. It an be set to None to obtain different samples. :param summaries: an iterable containing the summary for each item in the dataset. If None, it will compute calling the summaries dataset. :param schema: a Spark schema that overrides automatic conversion to a dataframe :param table_name: allows resulting dataframe to easily be queried using SparkSQL :return: a Spark DataFrame """ rdd = self.records(spark.sparkContext, group_by, limit, sample, seed, decode, summaries) if not schema: df = rdd.map(lambda d: Row(**d)).toDF() else: df = spark.createDataFrame(rdd, schema=schema) if table_name: df.createOrReplaceTempView(table_name) return df
python
{ "resource": "" }
q16724
Dataset.from_source
train
def from_source(source_name): """Create a Dataset configured for the given source_name This is particularly convenient when the user doesn't know the list of dimensions or the bucket name, but only the source name. Usage example:: records = Dataset.from_source('telemetry').where( docType='main', submissionDate='20160701', appUpdateChannel='nightly' ) """ meta_bucket = 'net-mozaws-prod-us-west-2-pipeline-metadata' store = S3Store(meta_bucket) try: source = json.loads(store.get_key('sources.json').read().decode('utf-8'))[source_name] except KeyError: raise Exception('Unknown source {}'.format(source_name)) schema = store.get_key('{}/schema.json'.format(source['metadata_prefix'])).read().decode('utf-8') dimensions = [f['field_name'] for f in json.loads(schema)['dimensions']] return Dataset(source['bucket'], dimensions, prefix=source['prefix'])
python
{ "resource": "" }
q16725
send_bounced_warning
train
def send_bounced_warning(person, leader_list): """Sends an email to each project leader for person informing them that person's email has bounced""" context = CONTEXT.copy() context['person'] = person for lp in leader_list: leader = lp['leader'] context['project'] = lp['project'] context['receiver'] = leader to_email = leader.email subject = render_to_string( 'karaage/people/emails/bounced_email_subject.txt', context) body = render_to_string( 'karaage/people/emails/bounced_email_body.txt', context) send_mail( subject.replace('\n', ''), body, settings.ACCOUNTS_EMAIL, [to_email]) log.change( leader, 'Sent email about bounced emails from %s' % person)
python
{ "resource": "" }
q16726
send_reset_password_email
train
def send_reset_password_email(person): """Sends an email to user allowing them to set their password.""" uid = urlsafe_base64_encode(force_bytes(person.pk)).decode("ascii") token = default_token_generator.make_token(person) url = '%s/persons/reset/%s/%s/' % ( settings.REGISTRATION_BASE_URL, uid, token) context = CONTEXT.copy() context.update({ 'url': url, 'receiver': person, }) to_email = person.email subject, body = render_email('reset_password', context) send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email])
python
{ "resource": "" }
q16727
send_confirm_password_email
train
def send_confirm_password_email(person): """Sends an email to user allowing them to confirm their password.""" url = '%s/profile/login/%s/' % ( settings.REGISTRATION_BASE_URL, person.username) context = CONTEXT.copy() context.update({ 'url': url, 'receiver': person, }) to_email = person.email subject, body = render_email('confirm_password', context) send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email])
python
{ "resource": "" }
q16728
StateWaitingForApproval.check_can_approve
train
def check_can_approve(self, request, application, roles): """ Check the person's authorization. """ try: authorised_persons = self.get_authorised_persons(application) authorised_persons.get(pk=request.user.pk) return True except Person.DoesNotExist: return False
python
{ "resource": "" }
q16729
StateWaitingForApproval.enter_state
train
def enter_state(self, request, application): """ This is becoming the new current state. """ authorised_persons = self.get_email_persons(application) link, is_secret = self.get_request_email_link(application) emails.send_request_email( self.authorised_text, self.authorised_role, authorised_persons, application, link, is_secret)
python
{ "resource": "" }
q16730
StateWithSteps.add_step
train
def add_step(self, step, step_id): """ Add a step to the list. The first step added becomes the initial step. """ assert step_id not in self._steps assert step_id not in self._order assert isinstance(step, Step) self._steps[step_id] = step self._order.append(step_id)
python
{ "resource": "" }
q16731
_init_datastores
train
def _init_datastores(): """ Initialize all datastores. """ global _DATASTORES array = settings.DATASTORES for config in array: cls = _lookup(config['ENGINE']) ds = _get_datastore(cls, DataStore, config) _DATASTORES.append(ds) legacy_settings = getattr(settings, 'MACHINE_CATEGORY_DATASTORES', None) if legacy_settings is not None: warnings.warn( "MACHINE_CATEGORY_DATASTORES is deprecated, " "please change to use DATASTORES", ) for name in ['ldap']: array = settings.MACHINE_CATEGORY_DATASTORES.get(name, []) for config in array: cls = _lookup(config['ENGINE']) ds = _get_datastore(cls, DataStore, config) _DATASTORES.append(ds)
python
{ "resource": "" }
q16732
get_group_details
train
def get_group_details(group): """ Get group details. """ result = [] for datastore in _get_datastores(): value = datastore.get_group_details(group) value['datastore'] = datastore.config['DESCRIPTION'] result.append(value) return result
python
{ "resource": "" }
q16733
set_project_pid
train
def set_project_pid(project, old_pid, new_pid): """ Project's PID was changed. """ for datastore in _get_datastores(): datastore.save_project(project) datastore.set_project_pid(project, old_pid, new_pid)
python
{ "resource": "" }
q16734
add_accounts_to_group
train
def add_accounts_to_group(accounts_query, group): """ Add accounts to group. """ query = accounts_query.filter(date_deleted__isnull=True) for account in query: add_account_to_group(account, group)
python
{ "resource": "" }
q16735
remove_accounts_from_group
train
def remove_accounts_from_group(accounts_query, group): """ Remove accounts from group. """ query = accounts_query.filter(date_deleted__isnull=True) for account in query: remove_account_from_group(account, group)
python
{ "resource": "" }
q16736
add_accounts_to_project
train
def add_accounts_to_project(accounts_query, project): """ Add accounts to project. """ query = accounts_query.filter(date_deleted__isnull=True) for account in query: add_account_to_project(account, project)
python
{ "resource": "" }
q16737
remove_accounts_from_project
train
def remove_accounts_from_project(accounts_query, project): """ Remove accounts from project. """ query = accounts_query.filter(date_deleted__isnull=True) for account in query: remove_account_from_project(account, project)
python
{ "resource": "" }
q16738
add_accounts_to_institute
train
def add_accounts_to_institute(accounts_query, institute): """ Add accounts to institute. """ query = accounts_query.filter(date_deleted__isnull=True) for account in query: add_account_to_institute(account, institute)
python
{ "resource": "" }
q16739
remove_accounts_from_institute
train
def remove_accounts_from_institute(accounts_query, institute): """ Remove accounts from institute. """ query = accounts_query.filter(date_deleted__isnull=True) for account in query: remove_account_from_institute(account, institute)
python
{ "resource": "" }
q16740
MamDataStoreBase._filter_string
train
def _filter_string(value): """ Filter the string so MAM doesn't have heart failure.""" if value is None: value = "" # replace whitespace with space value = value.replace("\n", " ") value = value.replace("\t", " ") # CSV seperator value = value.replace("|", " ") # remove leading/trailing whitespace value = value.strip() # hack because MAM doesn't quote sql correctly value = value.replace("\\", "") # Used for stripping non-ascii characters value = ''.join(c for c in value if 31 < ord(c) < 127) return value
python
{ "resource": "" }
q16741
MamDataStoreBase.get_user
train
def get_user(self, username): """ Get the user details from MAM. """ cmd = ["glsuser", "-u", username, "--raw"] results = self._read_output(cmd) if len(results) == 0: return None elif len(results) > 1: logger.error( "Command returned multiple results for '%s'." % username) raise RuntimeError( "Command returned multiple results for '%s'." % username) the_result = results[0] the_name = the_result["Name"] if username.lower() != the_name.lower(): logger.error( "We expected username '%s' but got username '%s'." % (username, the_name)) raise RuntimeError( "We expected username '%s' but got username '%s'." % (username, the_name)) return the_result
python
{ "resource": "" }
q16742
MamDataStoreBase.get_user_balance
train
def get_user_balance(self, username): """ Get the user balance details from MAM. """ cmd = ["gbalance", "-u", username, "--raw"] results = self._read_output(cmd) if len(results) == 0: return None return results
python
{ "resource": "" }
q16743
MamDataStoreBase.get_users_in_project
train
def get_users_in_project(self, projectname): """ Get list of users in project from MAM. """ ds_project = self.get_project(projectname) if ds_project is None: logger.error( "Project '%s' does not exist in MAM" % projectname) raise RuntimeError( "Project '%s' does not exist in MAM" % projectname) user_list = [] if ds_project["Users"] != "": user_list = ds_project["Users"].lower().split(",") return user_list
python
{ "resource": "" }
q16744
MamDataStoreBase.get_projects_in_user
train
def get_projects_in_user(self, username): """ Get list of projects in user from MAM. """ ds_balance = self.get_user_balance(username) if ds_balance is None: return [] project_list = [] for bal in ds_balance: project_list.append(bal["Name"]) return project_list
python
{ "resource": "" }
q16745
MamDataStoreBase.get_account_details
train
def get_account_details(self, account): """ Get the account details """ result = self.get_user(account.username) if result is None: result = {} return result
python
{ "resource": "" }
q16746
MamDataStoreBase.get_project_details
train
def get_project_details(self, project): """ Get the project details. """ result = self.get_project(project.pid) if result is None: result = {} return result
python
{ "resource": "" }
q16747
MamDataStoreBase.delete_institute
train
def delete_institute(self, institute): """ Called when institute is deleted. """ name = institute.name logger.debug("institute_deleted '%s'" % name) # institute deleted self._call(["goldsh", "Organization", "Delete", "Name==%s" % name]) logger.debug("returning") return
python
{ "resource": "" }
q16748
MamDataStore71.add_account_to_project
train
def add_account_to_project(self, account, project): """ Add account to project. """ username = account.username projectname = project.pid self._call([ "gchproject", "--add-user", username, "-p", projectname], ignore_errors=[74])
python
{ "resource": "" }
q16749
show
train
def show(fig, width=600): """ Renders a Matplotlib figure in Zeppelin. :param fig: a Matplotlib figure :param width: the width in pixel of the rendered figure, defaults to 600 Usage example:: import matplotlib.pyplot as plt from moztelemetry.zeppelin import show fig = plt.figure() plt.plot([1, 2, 3]) show(fig) """ img = StringIO() fig.savefig(img, format='svg') img.seek(0) print("%html <div style='width:{}px'>{}</div>".format(width, img.buf))
python
{ "resource": "" }
q16750
default_hub
train
def default_hub(hub_name, genome, email, short_label=None, long_label=None): """ Returns a fully-connected set of hub components using default filenames. Parameters ---------- hub_name : str Name of the hub genome : str Assembly name (hg38, dm6, etc) email : str Email to include with hub. short_label : str Short label for the hub. If None, defaults to the value of `hub_name` long_label : str Long label for the hub. If None, defaults to the value of `short_label`. """ if short_label is None: short_label = hub_name if long_label is None: long_label = short_label hub = Hub( hub=hub_name, short_label=short_label, long_label=long_label, email=email) genome = Genome(genome) genomes_file = GenomesFile() trackdb = TrackDb() hub.add_genomes_file(genomes_file) genomes_file.add_genome(genome) genome.add_trackdb(trackdb) return hub, genomes_file, genome, trackdb
python
{ "resource": "" }
q16751
from_files
train
def from_files(filenames, strict_type_checks=True): """Return an iterator that provides a sequence of Histograms for the histograms defined in filenames. """ if strict_type_checks: load_whitelist() all_histograms = OrderedDict() for filename in filenames: parser = FILENAME_PARSERS[os.path.basename(filename)] histograms = parser(filename, strict_type_checks) # OrderedDicts are important, because then the iteration order over # the parsed histograms is stable, which makes the insertion into # all_histograms stable, which makes ordering in generated files # stable, which makes builds more deterministic. if not isinstance(histograms, OrderedDict): raise ParserError("Histogram parser did not provide an OrderedDict.") for name, definition in iteritems(histograms): if name in all_histograms: raise ParserError('Duplicate histogram name "%s".' % name) all_histograms[name] = definition # We require that all USE_COUNTER2_* histograms be defined in a contiguous # block. use_counter_indices = filter(lambda x: x[1].startswith("USE_COUNTER2_"), enumerate(iterkeys(all_histograms))) if use_counter_indices: lower_bound = use_counter_indices[0][0] upper_bound = use_counter_indices[-1][0] n_counters = upper_bound - lower_bound + 1 if n_counters != len(use_counter_indices): raise ParserError("Use counter histograms must be defined in a contiguous block.") # Check that histograms that were removed from Histograms.json etc. # are also removed from the whitelists. if whitelists is not None: all_whitelist_entries = itertools.chain.from_iterable(whitelists.itervalues()) orphaned = set(all_whitelist_entries) - set(iterkeys(all_histograms)) if len(orphaned) > 0: msg = 'The following entries are orphaned and should be removed from ' \ 'histogram-whitelists.json:\n%s' raise ParserError(msg % (', '.join(sorted(orphaned)))) for name, definition in iteritems(all_histograms): yield Histogram(name, definition, strict_type_checks=strict_type_checks)
python
{ "resource": "" }
q16752
Histogram.ranges
train
def ranges(self): """Return an array of lower bounds for each bucket in the histogram.""" bucket_fns = { 'boolean': linear_buckets, 'flag': linear_buckets, 'count': linear_buckets, 'enumerated': linear_buckets, 'categorical': linear_buckets, 'linear': linear_buckets, 'exponential': exponential_buckets, } if self._kind not in bucket_fns: raise ParserError('Unknown kind "%s" for histogram "%s".' % (self._kind, self._name)) fn = bucket_fns[self._kind] return fn(self.low(), self.high(), self.n_buckets())
python
{ "resource": "" }
q16753
apply_extra_context
train
def apply_extra_context(extra_context, context): """ Adds items from extra_context dict to context. If a value in extra_context is callable, then it is called and the result is added to context. """ for key, value in six.iteritems(extra_context): if callable(value): context[key] = value() else: context[key] = value
python
{ "resource": "" }
q16754
get_model_and_form_class
train
def get_model_and_form_class(model, form_class): """ Returns a model and form class based on the model and form_class parameters that were passed to the generic view. If ``form_class`` is given then its associated model will be returned along with ``form_class`` itself. Otherwise, if ``model`` is given, ``model`` itself will be returned along with a ``ModelForm`` class created from ``model``. """ if form_class: return form_class._meta.model, form_class if model: # The inner Meta class fails if model = model is used for some reason. tmp_model = model # TODO: we should be able to construct a ModelForm without creating # and passing in a temporary inner class. class Meta: model = tmp_model class_name = model.__name__ + 'Form' form_class = ModelFormMetaclass( class_name, (ModelForm,), {'Meta': Meta}) return model, form_class raise GenericViewError("Generic view must be called with either a model or" " form_class argument.")
python
{ "resource": "" }
q16755
redirect
train
def redirect(post_save_redirect, obj): """ Returns a HttpResponseRedirect to ``post_save_redirect``. ``post_save_redirect`` should be a string, and can contain named string- substitution place holders of ``obj`` field names. If ``post_save_redirect`` is None, then redirect to ``obj``'s URL returned by ``get_absolute_url()``. If ``obj`` has no ``get_absolute_url`` method, then raise ImproperlyConfigured. This function is meant to handle the post_save_redirect parameter to the ``create_object`` and ``update_object`` views. """ if post_save_redirect: return HttpResponseRedirect(post_save_redirect % obj.__dict__) elif hasattr(obj, 'get_absolute_url'): return HttpResponseRedirect(obj.get_absolute_url()) else: raise ImproperlyConfigured( "No URL to redirect to. Either pass a post_save_redirect" " parameter to the generic view or define a get_absolute_url" " method on the Model.")
python
{ "resource": "" }
q16756
lookup_object
train
def lookup_object(model, object_id, slug, slug_field): """ Return the ``model`` object with the passed ``object_id``. If ``object_id`` is None, then return the object whose ``slug_field`` equals the passed ``slug``. If ``slug`` and ``slug_field`` are not passed, then raise Http404 exception. """ lookup_kwargs = {} if object_id: lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id elif slug and slug_field: lookup_kwargs['%s__exact' % slug_field] = slug else: raise GenericViewError( "Generic view must be called with either an object_id or a" " slug/slug_field.") try: return model.objects.get(**lookup_kwargs) except ObjectDoesNotExist: raise Http404("No %s found for %s" % (model._meta.verbose_name, lookup_kwargs))
python
{ "resource": "" }
q16757
create_object
train
def create_object( request, model=None, template_name=None, template_loader=loader, extra_context=None, post_save_redirect=None, login_required=False, context_processors=None, form_class=None): """ Generic object-creation function. Templates: ``<app_label>/<model_name>_form.html`` Context: form the form for the object """ if extra_context is None: extra_context = {} if login_required and not request.user.is_authenticated: return redirect_to_login(request.path) model, form_class = get_model_and_form_class(model, form_class) if request.method == 'POST': form = form_class(request.POST, request.FILES) if form.is_valid(): new_object = form.save() msg = ugettext("The %(verbose_name)s was created successfully.") %\ {"verbose_name": model._meta.verbose_name} messages.success(request, msg, fail_silently=True) return redirect(post_save_redirect, new_object) else: form = form_class() # Create the template, context, response if not template_name: template_name = "%s/%s_form.html" % ( model._meta.app_label, model._meta.object_name.lower()) t = template_loader.get_template(template_name) c = { 'form': form, } apply_extra_context(extra_context, c) return HttpResponse(t.render(context=c, request=request))
python
{ "resource": "" }
q16758
update_object
train
def update_object( request, model=None, object_id=None, slug=None, slug_field='slug', template_name=None, template_loader=loader, extra_context=None, post_save_redirect=None, login_required=False, context_processors=None, template_object_name='object', form_class=None): """ Generic object-update function. Templates: ``<app_label>/<model_name>_form.html`` Context: form the form for the object object the original object being edited """ if extra_context is None: extra_context = {} if login_required and not request.user.is_authenticated: return redirect_to_login(request.path) model, form_class = get_model_and_form_class(model, form_class) obj = lookup_object(model, object_id, slug, slug_field) if request.method == 'POST': form = form_class(request.POST, request.FILES, instance=obj) if form.is_valid(): obj = form.save() msg = ugettext("The %(verbose_name)s was updated successfully.") %\ {"verbose_name": model._meta.verbose_name} messages.success(request, msg, fail_silently=True) return redirect(post_save_redirect, obj) else: form = form_class(instance=obj) if not template_name: template_name = "%s/%s_form.html" % ( model._meta.app_label, model._meta.object_name.lower()) t = template_loader.get_template(template_name) c = { 'form': form, template_object_name: obj, } apply_extra_context(extra_context, c) response = HttpResponse(t.render(context=c, request=request)) return response
python
{ "resource": "" }
q16759
delete_object
train
def delete_object( request, model, post_delete_redirect, object_id=None, slug=None, slug_field='slug', template_name=None, template_loader=loader, extra_context=None, login_required=False, context_processors=None, template_object_name='object'): """ Generic object-delete function. The given template will be used to confirm deletetion if this view is fetched using GET; for safty, deletion will only be performed if this view is POSTed. Templates: ``<app_label>/<model_name>_confirm_delete.html`` Context: object the original object being deleted """ if extra_context is None: extra_context = {} if login_required and not request.user.is_authenticated: return redirect_to_login(request.path) obj = lookup_object(model, object_id, slug, slug_field) if request.method == 'POST': obj.delete() msg = ugettext("The %(verbose_name)s was deleted.") %\ {"verbose_name": model._meta.verbose_name} messages.success(request, msg, fail_silently=True) return HttpResponseRedirect(post_delete_redirect) else: if not template_name: template_name = "%s/%s_confirm_delete.html" % ( model._meta.app_label, model._meta.object_name.lower()) t = template_loader.get_template(template_name) c = { template_object_name: obj, } apply_extra_context(extra_context, c) response = HttpResponse(t.render(context=c, request=request)) return response
python
{ "resource": "" }
q16760
get_url
train
def get_url(request, application, roles, label=None): """ Retrieve a link that will work for the current user. """ args = [] if label is not None: args.append(label) # don't use secret_token unless we have to if 'is_admin' in roles: # Administrators can access anything without secrets require_secret = False elif 'is_applicant' not in roles: # we never give secrets to anybody but the applicant require_secret = False elif not request.user.is_authenticated: # If applicant is not logged in, we redirect them to secret URL require_secret = True elif request.user != application.applicant: # If logged in as different person, we redirect them to secret # URL. This could happen if the application was open with a different # email address, and the applicant is logged in when accessing it. require_secret = True else: # otherwise redirect them to URL that requires correct login. require_secret = False # return required url if not require_secret: url = reverse( 'kg_application_detail', args=[application.pk, application.state] + args) else: url = reverse( 'kg_application_unauthenticated', args=[application.secret_token, application.state] + args) return url
python
{ "resource": "" }
q16761
get_admin_email_link
train
def get_admin_email_link(application): """ Retrieve a link that can be emailed to the administrator. """ url = '%s/applications/%d/' % (settings.ADMIN_BASE_URL, application.pk) is_secret = False return url, is_secret
python
{ "resource": "" }
q16762
get_registration_email_link
train
def get_registration_email_link(application): """ Retrieve a link that can be emailed to the logged other users. """ url = '%s/applications/%d/' % ( settings.REGISTRATION_BASE_URL, application.pk) is_secret = False return url, is_secret
python
{ "resource": "" }
q16763
get_email_link
train
def get_email_link(application): """ Retrieve a link that can be emailed to the applicant. """ # don't use secret_token unless we have to if (application.content_type.model == 'person' and application.applicant.has_usable_password()): url = '%s/applications/%d/' % ( settings.REGISTRATION_BASE_URL, application.pk) is_secret = False else: url = '%s/applications/%s/' % ( settings.REGISTRATION_BASE_URL, application.secret_token) is_secret = True return url, is_secret
python
{ "resource": "" }
q16764
StateMachine.start
train
def start(self, request, application, extra_roles=None): """ Continue the state machine at first state. """ # Get the authentication of the current user roles = self._get_roles_for_request(request, application) if extra_roles is not None: roles.update(extra_roles) # Ensure current user is authenticated. If user isn't applicant, # leader, delegate or admin, they probably shouldn't be here. if 'is_authorised' not in roles: return HttpResponseForbidden('<h1>Access Denied</h1>') # Go to first state. return self._next(request, application, roles, self._first_state)
python
{ "resource": "" }
q16765
StateMachine.process
train
def process( self, request, application, expected_state, label, extra_roles=None): """ Process the view request at the current state. """ # Get the authentication of the current user roles = self._get_roles_for_request(request, application) if extra_roles is not None: roles.update(extra_roles) # Ensure current user is authenticated. If user isn't applicant, # leader, delegate or admin, they probably shouldn't be here. if 'is_authorised' not in roles: return HttpResponseForbidden('<h1>Access Denied</h1>') # If user didn't supply state on URL, redirect to full URL. if expected_state is None: url = get_url(request, application, roles, label) return HttpResponseRedirect(url) # Check that the current state is valid. if application.state not in self._config: raise RuntimeError("Invalid current state '%s'" % application.state) # If state user expected is different to state we are in, warn user # and jump to expected state. if expected_state != application.state: # post data will be lost if request.method == "POST": messages.warning( request, "Discarding request and jumping to current state.") # note we discard the label, it probably isn't relevant for new # state url = get_url(request, application, roles) return HttpResponseRedirect(url) # Get the current state for this application state_config = self._config[application.state] # Finally do something instance = load_state_instance(state_config) if request.method == "GET": # if method is GET, state does not ever change. response = instance.get_next_config(request, application, label, roles) assert isinstance(response, HttpResponse) return response elif request.method == "POST": # if method is POST, it can return a HttpResponse or a string response = instance.get_next_config(request, application, label, roles) if isinstance(response, HttpResponse): # If it returned a HttpResponse, state not changed, just # display return response else: # If it returned a string, lookit up in the actions for this # state next_config = response # Go to the next state return self._next(request, application, roles, next_config) else: # Shouldn't happen, user did something weird return HttpResponseBadRequest("<h1>Bad Request</h1>")
python
{ "resource": "" }
q16766
StateMachine._get_roles_for_request
train
def _get_roles_for_request(request, application): """ Check the authentication of the current user. """ roles = application.get_roles_for_person(request.user) if common.is_admin(request): roles.add("is_admin") roles.add('is_authorised') return roles
python
{ "resource": "" }
q16767
StateMachine._next
train
def _next(self, request, application, roles, next_config): """ Continue the state machine at given state. """ # we only support state changes for POST requests if request.method == "POST": key = None # If next state is a transition, process it while True: # We do not expect to get a direct state transition here. assert next_config['type'] in ['goto', 'transition'] while next_config['type'] == 'goto': key = next_config['key'] next_config = self._config[key] instance = load_instance(next_config) if not isinstance(instance, Transition): break next_config = instance.get_next_config(request, application, roles) # lookup next state assert key is not None state_key = key # enter that state instance.enter_state(request, application) application.state = state_key application.save() # log details log.change(application.application_ptr, "state: %s" % instance.name) # redirect to this new state url = get_url(request, application, roles) return HttpResponseRedirect(url) else: return HttpResponseBadRequest("<h1>Bad Request</h1>")
python
{ "resource": "" }
q16768
State.get_next_action
train
def get_next_action(self, request, application, label, roles): """ Django view method. We provide a default detail view for applications. """ # We only provide a view for when no label provided if label is not None: return HttpResponseBadRequest("<h1>Bad Request</h1>") # only certain actions make sense for default view actions = self.get_actions(request, application, roles) # process the request in default view if request.method == "GET": context = self.context context.update({ 'application': application, 'actions': actions, 'state': self.name, 'roles': roles}) return render( template_name='kgapplications/common_detail.html', context=context, request=request) elif request.method == "POST": for action in actions: if action in request.POST: return action # we don't know how to handle this request. return HttpResponseBadRequest("<h1>Bad Request</h1>")
python
{ "resource": "" }
q16769
Histogram.get_value
train
def get_value(self, only_median=False, autocast=True): """ Returns a scalar for flag and count histograms. Otherwise it returns either the raw histogram represented as a pandas Series or just the median if only_median is True. If autocast is disabled the underlying pandas series is always returned as is. """ if not autocast: return self.buckets if self.kind in ["exponential", "linear", "enumerated", "boolean"]: return float(self.percentile(50)) if only_median else self.buckets elif self.kind == "categorical" and not only_median: return self.buckets elif self.kind == "count": return int(self.buckets[0]) elif self.kind == "flag": return self.buckets[1] == 1 else: assert(False)
python
{ "resource": "" }
q16770
Histogram.percentile
train
def percentile(self, percentile): """ Returns the nth percentile of the histogram. """ assert(percentile >= 0 and percentile <= 100) assert(self.kind in ["exponential", "linear", "enumerated", "boolean"]) fraction = percentile / 100 to_count = fraction * self.buckets.sum() percentile_bucket = 0 for percentile_bucket in range(len(self.buckets)): freq = self.buckets.values[percentile_bucket] if to_count - freq <= 0: break to_count -= freq percentile_lower_boundary = self.buckets.index[percentile_bucket] percentile_frequency = self.buckets.values[percentile_bucket] if percentile_bucket == len(self.buckets) - 1 or percentile_frequency == 0: return percentile_lower_boundary width = self.buckets.index[percentile_bucket + 1] - self.buckets.index[percentile_bucket] return percentile_lower_boundary + width * to_count / percentile_frequency
python
{ "resource": "" }
q16771
BaseTrack.tracktype
train
def tracktype(self, tracktype): """ When setting the track type, the valid parameters for this track type need to be set as well. """ self._tracktype = tracktype if tracktype is not None: if 'bed' in tracktype.lower(): tracktype = 'bigBed' elif 'wig' in tracktype.lower(): tracktype = 'bigWig' self.params.update(constants.track_typespecific_fields[tracktype])
python
{ "resource": "" }
q16772
BaseTrack.add_subgroups
train
def add_subgroups(self, subgroups): """ Update the subgroups for this track. Note that in contrast to :meth:`CompositeTrack`, which takes a list of :class:`SubGroupDefinition` objects representing the allowed subgroups, this method takes a single dictionary indicating the particular subgroups for this track. Parameters ---------- subgroups : dict Dictionary of subgroups, e.g., {'celltype': 'K562', 'treatment': 'a'}. Each key must match a SubGroupDefinition name in the composite's subgroups list. Each value must match a key in that SubGroupDefinition.mapping dictionary. """ if subgroups is None: subgroups = {} assert isinstance(subgroups, dict) self.subgroups.update(subgroups)
python
{ "resource": "" }
q16773
BaseTrack._str_subgroups
train
def _str_subgroups(self): """ helper function to render subgroups as a string """ if not self.subgroups: return "" return ['subGroups %s' % ' '.join(['%s=%s' % (k, v) for (k, v) in self.subgroups.items()])]
python
{ "resource": "" }
q16774
CompositeTrack.add_subgroups
train
def add_subgroups(self, subgroups): """ Add a list of SubGroupDefinition objects to this composite. Note that in contrast to :meth:`BaseTrack`, which takes a single dictionary indicating the particular subgroups for the track, this method takes a list of :class:`SubGroupDefinition` objects representing the allowed subgroups for the composite. :param subgroups: List of SubGroupDefinition objects. """ if subgroups is None: subgroups = {} _subgroups = {} for sg in subgroups: assert isinstance(sg, SubGroupDefinition) _subgroups[sg.name] = sg self.subgroups = _subgroups
python
{ "resource": "" }
q16775
CompositeTrack.add_view
train
def add_view(self, view): """ Add a ViewTrack object to this composite. :param view: A ViewTrack object. """ self.add_child(view) self.views.append(view)
python
{ "resource": "" }
q16776
CompositeTrack._str_subgroups
train
def _str_subgroups(self): """ renders subgroups to a list of strings """ s = [] i = 0 # if there are any views, there must be a subGroup1 view View tag=val # as the first one. So create it automatically here if len(self.views) > 0: mapping = dict((i.view, i.view) for i in self.views) view_subgroup = SubGroupDefinition( name='view', label='Views', mapping=mapping) i += 1 s.append('subGroup%s %s' % (i, view_subgroup)) for subgroup in self.subgroups.values(): i += 1 s.append('subGroup%s %s' % (i, subgroup)) return s
python
{ "resource": "" }
q16777
ViewTrack.add_tracks
train
def add_tracks(self, subtracks): """ Add one or more tracks to this view. subtracks : Track or iterable of Tracks A single Track instance or an iterable of them. """ if isinstance(subtracks, Track): subtracks = [subtracks] for subtrack in subtracks: subtrack.subgroups['view'] = self.view self.add_child(subtrack) self.subtracks.append(subtrack)
python
{ "resource": "" }
q16778
SuperTrack.add_tracks
train
def add_tracks(self, subtracks): """ Add one or more tracks. subtrack : Track or iterable of Tracks """ if isinstance(subtracks, BaseTrack): subtracks = [subtracks] for subtrack in subtracks: self.add_child(subtrack) self.subtracks.append(subtrack)
python
{ "resource": "" }
q16779
load_scalars
train
def load_scalars(filename, strict_type_checks=True): """Parses a YAML file containing the scalar definition. :param filename: the YAML file containing the scalars definition. :raises ParserError: if the scalar file cannot be opened or parsed. """ # Parse the scalar definitions from the YAML file. scalars = None try: with open(filename, 'r') as f: scalars = yaml.safe_load(f) except IOError as e: raise ParserError('Error opening ' + filename + ': ' + e.message) except ValueError as e: raise ParserError('Error parsing scalars in {}: {}' '.\nSee: {}'.format(filename, e.message, BASE_DOC_URL)) scalar_list = [] # Scalars are defined in a fixed two-level hierarchy within the definition file. # The first level contains the category name, while the second level contains the # probe name (e.g. "category.name: probe: ..."). for category_name in scalars: category = scalars[category_name] # Make sure that the category has at least one probe in it. if not category or len(category) == 0: raise ParserError('Category "{}" must have at least one probe in it' + '.\nSee: {}'.format(category_name, BASE_DOC_URL)) for probe_name in category: # We found a scalar type. Go ahead and parse it. scalar_info = category[probe_name] scalar_list.append( ScalarType(category_name, probe_name, scalar_info, strict_type_checks)) return scalar_list
python
{ "resource": "" }
q16780
ScalarType.validate_values
train
def validate_values(self, definition): """This function checks that the fields have the correct values. :param definition: the dictionary containing the scalar properties. :raises ParserError: if a scalar definition field contains an unexpected value. """ if not self._strict_type_checks: return # Validate the scalar kind. scalar_kind = definition.get('kind') if scalar_kind not in SCALAR_TYPES_MAP.keys(): raise ParserError(self._name + ' - unknown scalar kind: ' + scalar_kind + '.\nSee: {}'.format(BASE_DOC_URL)) # Validate the collection policy. collection_policy = definition.get('release_channel_collection', None) if collection_policy and collection_policy not in ['opt-in', 'opt-out']: raise ParserError(self._name + ' - unknown collection policy: ' + collection_policy + '.\nSee: {}#optional-fields'.format(BASE_DOC_URL)) # Validate the cpp_guard. cpp_guard = definition.get('cpp_guard') if cpp_guard and re.match(r'\W', cpp_guard): raise ParserError(self._name + ' - invalid cpp_guard: ' + cpp_guard + '.\nSee: {}#optional-fields'.format(BASE_DOC_URL)) # Validate record_in_processes. record_in_processes = definition.get('record_in_processes', []) for proc in record_in_processes: if not utils.is_valid_process_name(proc): raise ParserError(self._name + ' - unknown value in record_in_processes: ' + proc + '.\nSee: {}'.format(BASE_DOC_URL)) # Validate the expiration version. # Historical versions of Scalars.json may contain expiration versions # using the deprecated format 'N.Na1'. Those scripts set # self._strict_type_checks to false. expires = definition.get('expires') if not utils.validate_expiration_version(expires) and self._strict_type_checks: raise ParserError('{} - invalid expires: {}.\nSee: {}#required-fields' .format(self._name, expires, BASE_DOC_URL))
python
{ "resource": "" }
q16781
HubComponent.add_child
train
def add_child(self, child): """ Adds self as parent to child, and then adds child. """ child.parent = self self.children.append(child) return child
python
{ "resource": "" }
q16782
HubComponent.add_parent
train
def add_parent(self, parent): """ Adds self as child of parent, then adds parent. """ parent.add_child(self) self.parent = parent return parent
python
{ "resource": "" }
q16783
HubComponent.root
train
def root(self, cls=None, level=0): """ Returns the top-most HubComponent in the hierarchy. If `cls` is not None, then return the top-most attribute HubComponent that is an instance of class `cls`. For a fully-constructed track hub (and `cls=None`), this should return a a Hub object for every component in the hierarchy. """ if cls is None: if self.parent is None: return self, level else: if isinstance(self, cls): if not isinstance(self.parent, cls): return self, level if self.parent is None: return None, None return self.parent.root(cls, level - 1)
python
{ "resource": "" }
q16784
HubComponent.leaves
train
def leaves(self, cls, level=0, intermediate=False): """ Returns an iterator of the HubComponent leaves that are of class `cls`. If `intermediate` is True, then return any intermediate classes as well. """ if intermediate: if isinstance(self, cls): yield self, level elif len(self.children) == 0: if isinstance(self, cls): yield self, level else: raise StopIteration for child in self.children: for leaf, _level in child.leaves(cls, level + 1, intermediate=intermediate): yield leaf, _level
python
{ "resource": "" }
q16785
HubComponent.render
train
def render(self, staging=None): """ Renders the object to file, returning a list of created files. Calls validation code, and, as long as each child is also a subclass of :class:`HubComponent`, the rendering is recursive. """ self.validate() created_files = OrderedDict() if staging is None: staging = tempfile.mkdtemp() this = self._render(staging) if this: created_files[repr(self)] = this for child in self.children: created_files[repr(child)] = child.render(staging) return created_files
python
{ "resource": "" }
q16786
send_request_email
train
def send_request_email( authorised_text, authorised_role, authorised_persons, application, link, is_secret): """Sends an email to admin asking to approve user application""" context = CONTEXT.copy() context['requester'] = application.applicant context['link'] = link context['is_secret'] = is_secret context['application'] = application context['authorised_text'] = authorised_text _send_request_email( context, authorised_role, authorised_persons, "common_request")
python
{ "resource": "" }
q16787
send_invite_email
train
def send_invite_email(application, link, is_secret): """ Sends an email inviting someone to create an account""" if not application.applicant.email: return context = CONTEXT.copy() context['receiver'] = application.applicant context['application'] = application context['link'] = link context['is_secret'] = is_secret to_email = application.applicant.email subject, body = render_email('common_invite', context) send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email])
python
{ "resource": "" }
q16788
send_approved_email
train
def send_approved_email( application, created_person, created_account, link, is_secret): """Sends an email informing person application is approved""" if not application.applicant.email: return context = CONTEXT.copy() context['receiver'] = application.applicant context['application'] = application context['created_person'] = created_person context['created_account'] = created_account context['link'] = link context['is_secret'] = is_secret subject, body = render_email('common_approved', context) to_email = application.applicant.email send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email])
python
{ "resource": "" }
q16789
_add_person_to_group
train
def _add_person_to_group(person, group): """ Call datastores after adding a person to a group. """ from karaage.datastores import add_accounts_to_group from karaage.datastores import add_accounts_to_project from karaage.datastores import add_accounts_to_institute a_list = person.account_set add_accounts_to_group(a_list, group) for project in group.project_set.all(): add_accounts_to_project(a_list, project) for institute in group.institute_set.all(): add_accounts_to_institute(a_list, institute)
python
{ "resource": "" }
q16790
_remove_person_from_group
train
def _remove_person_from_group(person, group): """ Call datastores after removing a person from a group. """ from karaage.datastores import remove_accounts_from_group from karaage.datastores import remove_accounts_from_project from karaage.datastores import remove_accounts_from_institute a_list = person.account_set remove_accounts_from_group(a_list, group) for project in group.project_set.all(): remove_accounts_from_project(a_list, project) for institute in group.institute_set.all(): remove_accounts_from_institute(a_list, institute)
python
{ "resource": "" }
q16791
dimensions_from_subgroups
train
def dimensions_from_subgroups(s): """ Given a sorted list of subgroups, return a string appropriate to provide as a composite track's `dimensions` arg. Parameters ---------- s : list of SubGroup objects (or anything with a `name` attribute) """ letters = 'XYABCDEFGHIJKLMNOPQRSTUVWZ' return ' '.join(['dim{0}={1}'.format(dim, sg.name) for dim, sg in zip(letters, s)])
python
{ "resource": "" }
q16792
filter_composite_from_subgroups
train
def filter_composite_from_subgroups(s): """ Given a sorted list of subgroups, return a string appropriate to provide as the a composite track's `filterComposite` argument >>> import trackhub >>> trackhub.helpers.filter_composite_from_subgroups(['cell', 'ab', 'lab', 'knockdown']) 'dimA dimB' Parameters ---------- s : list A list representing the ordered subgroups, ideally the same list provided to `dimensions_from_subgroups`. The values are not actually used, just the number of items. """ dims = [] for letter, sg in zip('ABCDEFGHIJKLMNOPQRSTUVWZ', s[2:]): dims.append('dim{0}'.format(letter)) if dims: return ' '.join(dims)
python
{ "resource": "" }
q16793
hex2rgb
train
def hex2rgb(h): """ Convert hex colors to RGB tuples Parameters ---------- h : str String hex color value >>> hex2rgb("#ff0033") '255,0,51' """ if not h.startswith('#') or len(h) != 7: raise ValueError("Does not look like a hex color: '{0}'".format(h)) return ','.join(map(str, ( int(h[1:3], 16), int(h[3:5], 16), int(h[5:7], 16), )))
python
{ "resource": "" }
q16794
sanitize
train
def sanitize(s, strict=True): """ Sanitize a string. Spaces are converted to underscore; if strict=True they are then removed. Parameters ---------- s : str String to sanitize strict : bool If True, only alphanumeric characters are allowed. If False, a limited set of additional characters (-._) will be allowed. """ allowed = ''.join( [ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz', '0123456789', ] ) if not strict: allowed += '-_.' s = str(s).replace(' ', '_') return ''.join([i for i in s if i in allowed])
python
{ "resource": "" }
q16795
auto_track_url
train
def auto_track_url(track): """ Automatically sets the bigDataUrl for `track`. Requirements: * the track must be fully connected, such that its root is a Hub object * the root Hub object must have the Hub.url attribute set * the track must have the `source` attribute set """ hub = track.root(cls=Hub) if hub is None: raise ValueError( "track is not fully connected because the root is %s" % repr(hub)) if hub.url is None: raise ValueError("hub.url is not set") if track.source is None: raise ValueError("track.source is not set")
python
{ "resource": "" }
q16796
print_rendered_results
train
def print_rendered_results(results_dict): """ Pretty-prints the rendered results dictionary. Rendered results can be multiply-nested dictionaries; this uses JSON serialization to print a nice representation. """ class _HubComponentEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, base.HubComponent): return repr(o) return json.JSONEncoder.default(self, o) formatted = json.dumps(results_dict, indent=4, cls=_HubComponentEncoder) # the returned string contains lines with trailing spaces, which causes # doctests to fail. So fix that here. for s in formatted.splitlines(): print(s.rstrip())
python
{ "resource": "" }
q16797
example_bigbeds
train
def example_bigbeds(): """ Returns list of example bigBed files """ hits = [] d = data_dir() for fn in os.listdir(d): fn = os.path.join(d, fn) if os.path.splitext(fn)[-1] == '.bigBed': hits.append(os.path.abspath(fn)) return hits
python
{ "resource": "" }
q16798
get_colour
train
def get_colour(index): """ get color number index. """ colours = [ 'red', 'blue', 'green', 'pink', 'yellow', 'magenta', 'orange', 'cyan', ] default_colour = 'purple' if index < len(colours): return colours[index] else: return default_colour
python
{ "resource": "" }
q16799
get_project_trend_graph_url
train
def get_project_trend_graph_url(project, start, end): """Generates a bar graph for a project. """ filename = get_project_trend_graph_filename(project, start, end) urls = { 'graph_url': urlparse.urljoin(GRAPH_URL, filename + ".png"), 'data_url': urlparse.urljoin(GRAPH_URL, filename + ".csv"), } return urls
python
{ "resource": "" }