code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
assert(percentile >= 0 and percentile <= 100)
assert(self.kind in ["exponential", "linear", "enumerated", "boolean"])
fraction = percentile / 100
to_count = fraction * self.buckets.sum()
percentile_bucket = 0
for percentile_bucket in range(len(self.buckets)):
freq = self.buckets.values[percentile_bucket]
if to_count - freq <= 0:
break
to_count -= freq
percentile_lower_boundary = self.buckets.index[percentile_bucket]
percentile_frequency = self.buckets.values[percentile_bucket]
if percentile_bucket == len(self.buckets) - 1 or percentile_frequency == 0:
return percentile_lower_boundary
width = self.buckets.index[percentile_bucket + 1] - self.buckets.index[percentile_bucket]
return percentile_lower_boundary + width * to_count / percentile_frequency | def percentile(self, percentile) | Returns the nth percentile of the histogram. | 2.895603 | 2.82505 | 1.024974 |
self._tracktype = tracktype
if tracktype is not None:
if 'bed' in tracktype.lower():
tracktype = 'bigBed'
elif 'wig' in tracktype.lower():
tracktype = 'bigWig'
self.params.update(constants.track_typespecific_fields[tracktype]) | def tracktype(self, tracktype) | When setting the track type, the valid parameters for this track type
need to be set as well. | 4.022697 | 3.906842 | 1.029654 |
for k, v in kw.items():
if (k not in self.params) and (k not in self.specific_params):
raise ParameterError('"%s" is not a valid parameter for %s'
% (k, self.__class__.__name__))
try:
self.params[k].validate(v)
except KeyError:
self.specific_params[k].validate(v)
self._orig_kwargs.update(kw)
self.kwargs = self._orig_kwargs.copy() | def add_params(self, **kw) | Add [possibly many] parameters to the track.
Parameters will be checked against known UCSC parameters and their
supported formats.
E.g.::
add_params(color='128,0,0', visibility='dense') | 2.745919 | 3.187603 | 0.861437 |
if subgroups is None:
subgroups = {}
assert isinstance(subgroups, dict)
self.subgroups.update(subgroups) | def add_subgroups(self, subgroups) | Update the subgroups for this track.
Note that in contrast to :meth:`CompositeTrack`, which takes a list of
:class:`SubGroupDefinition` objects representing the allowed subgroups,
this method takes a single dictionary indicating the particular
subgroups for this track.
Parameters
----------
subgroups : dict
Dictionary of subgroups, e.g., {'celltype': 'K562', 'treatment':
'a'}. Each key must match a SubGroupDefinition name in the
composite's subgroups list. Each value must match a key in that
SubGroupDefinition.mapping dictionary. | 2.835946 | 4.423713 | 0.641078 |
if not self.subgroups:
return ""
return ['subGroups %s'
% ' '.join(['%s=%s' % (k, v) for (k, v) in
self.subgroups.items()])] | def _str_subgroups(self) | helper function to render subgroups as a string | 4.922533 | 4.284947 | 1.148797 |
if subgroups is None:
subgroups = {}
_subgroups = {}
for sg in subgroups:
assert isinstance(sg, SubGroupDefinition)
_subgroups[sg.name] = sg
self.subgroups = _subgroups | def add_subgroups(self, subgroups) | Add a list of SubGroupDefinition objects to this composite.
Note that in contrast to :meth:`BaseTrack`, which takes a single
dictionary indicating the particular subgroups for the track, this
method takes a list of :class:`SubGroupDefinition` objects representing
the allowed subgroups for the composite.
:param subgroups:
List of SubGroupDefinition objects. | 2.963112 | 3.127321 | 0.947492 |
self.add_child(subtrack)
self.subtracks.append(subtrack) | def add_subtrack(self, subtrack) | Add a child :class:`Track`. | 3.455261 | 2.727696 | 1.266732 |
self.add_child(view)
self.views.append(view) | def add_view(self, view) | Add a ViewTrack object to this composite.
:param view:
A ViewTrack object. | 5.704218 | 6.619938 | 0.861672 |
s = []
i = 0
# if there are any views, there must be a subGroup1 view View tag=val
# as the first one. So create it automatically here
if len(self.views) > 0:
mapping = dict((i.view, i.view) for i in self.views)
view_subgroup = SubGroupDefinition(
name='view',
label='Views',
mapping=mapping)
i += 1
s.append('subGroup%s %s' % (i, view_subgroup))
for subgroup in self.subgroups.values():
i += 1
s.append('subGroup%s %s' % (i, subgroup))
return s | def _str_subgroups(self) | renders subgroups to a list of strings | 5.527036 | 5.199174 | 1.06306 |
if isinstance(subtracks, Track):
subtracks = [subtracks]
for subtrack in subtracks:
subtrack.subgroups['view'] = self.view
self.add_child(subtrack)
self.subtracks.append(subtrack) | def add_tracks(self, subtracks) | Add one or more tracks to this view.
subtracks : Track or iterable of Tracks
A single Track instance or an iterable of them. | 3.417533 | 3.553802 | 0.961655 |
if isinstance(subtracks, BaseTrack):
subtracks = [subtracks]
for subtrack in subtracks:
self.add_child(subtrack)
self.subtracks.append(subtrack) | def add_tracks(self, subtracks) | Add one or more tracks.
subtrack : Track or iterable of Tracks | 2.482996 | 2.815024 | 0.882052 |
scalar_dict = {}
# Scalars are defined in a fixed two-level hierarchy within the definition file.
# The first level contains the category name, while the second level contains the
# probe name (e.g. "category.name: probe: ...").
for category_name in scalars:
category = scalars[category_name]
for probe_name in category:
# We found a scalar type. Go ahead and parse it.
scalar_definition = category[probe_name]
# We pass |strict_type_checks=False| as we don't want to do any check
# server side. This includes skipping the checks for the required keys.
scalar_info = ScalarType(category_name, probe_name, scalar_definition,
strict_type_checks=False)
scalar_dict[scalar_info.label] = scalar_info
return scalar_dict | def _parse_scalars(scalars) | Parse the scalars from the YAML file content to a dictionary of ScalarType(s).
:return: A dictionary { 'full.scalar.label': ScalarType } | 6.508398 | 5.88831 | 1.105308 |
# Parse the scalar definitions from the YAML file.
scalars = None
try:
with open(filename, 'r') as f:
scalars = yaml.safe_load(f)
except IOError as e:
raise ParserError('Error opening ' + filename + ': ' + e.message)
except ValueError as e:
raise ParserError('Error parsing scalars in {}: {}'
'.\nSee: {}'.format(filename, e.message, BASE_DOC_URL))
scalar_list = []
# Scalars are defined in a fixed two-level hierarchy within the definition file.
# The first level contains the category name, while the second level contains the
# probe name (e.g. "category.name: probe: ...").
for category_name in scalars:
category = scalars[category_name]
# Make sure that the category has at least one probe in it.
if not category or len(category) == 0:
raise ParserError('Category "{}" must have at least one probe in it' +
'.\nSee: {}'.format(category_name, BASE_DOC_URL))
for probe_name in category:
# We found a scalar type. Go ahead and parse it.
scalar_info = category[probe_name]
scalar_list.append(
ScalarType(category_name, probe_name, scalar_info, strict_type_checks))
return scalar_list | def load_scalars(filename, strict_type_checks=True) | Parses a YAML file containing the scalar definition.
:param filename: the YAML file containing the scalars definition.
:raises ParserError: if the scalar file cannot be opened or parsed. | 3.361959 | 3.271274 | 1.027721 |
# Enforce a maximum length on category and probe names.
MAX_NAME_LENGTH = 40
for n in [category_name, probe_name]:
if len(n) > MAX_NAME_LENGTH:
raise ParserError(("Name '{}' exceeds maximum name length of {} characters.\n"
"See: {}#the-yaml-definition-file")
.format(n, MAX_NAME_LENGTH, BASE_DOC_URL))
def check_name(name, error_msg_prefix, allowed_char_regexp):
# Check if we only have the allowed characters.
chars_regxp = r'^[a-zA-Z0-9' + allowed_char_regexp + r']+$'
if not re.search(chars_regxp, name):
raise ParserError((error_msg_prefix + " name must be alpha-numeric. Got: '{}'.\n"
"See: {}#the-yaml-definition-file").format(name, BASE_DOC_URL))
# Don't allow leading/trailing digits, '.' or '_'.
if re.search(r'(^[\d\._])|([\d\._])$', name):
raise ParserError((error_msg_prefix + " name must not have a leading/trailing "
"digit, a dot or underscore. Got: '{}'.\n"
" See: {}#the-yaml-definition-file").format(name, BASE_DOC_URL))
check_name(category_name, 'Category', r'\.')
check_name(probe_name, 'Probe', r'_') | def validate_names(self, category_name, probe_name) | Validate the category and probe name:
- Category name must be alpha-numeric + '.', no leading/trailing digit or '.'.
- Probe name must be alpha-numeric + '_', no leading/trailing digit or '_'.
:param category_name: the name of the category the probe is in.
:param probe_name: the name of the scalar probe.
:raises ParserError: if the length of the names exceeds the limit or they don't
conform our name specification. | 3.090199 | 2.826063 | 1.093464 |
if not self._strict_type_checks:
return
# The required and optional fields in a scalar type definition.
REQUIRED_FIELDS = {
'bug_numbers': list, # This contains ints. See LIST_FIELDS_CONTENT.
'description': string_types,
'expires': string_types,
'kind': string_types,
'notification_emails': list, # This contains strings. See LIST_FIELDS_CONTENT.
'record_in_processes': list,
}
OPTIONAL_FIELDS = {
'cpp_guard': string_types,
'release_channel_collection': string_types,
'keyed': bool,
}
# The types for the data within the fields that hold lists.
LIST_FIELDS_CONTENT = {
'bug_numbers': int,
'notification_emails': string_types,
'record_in_processes': string_types,
}
# Concatenate the required and optional field definitions.
ALL_FIELDS = REQUIRED_FIELDS.copy()
ALL_FIELDS.update(OPTIONAL_FIELDS)
# Checks that all the required fields are available.
missing_fields = [f for f in REQUIRED_FIELDS.keys() if f not in definition]
if len(missing_fields) > 0:
raise ParserError(self._name + ' - missing required fields: ' +
', '.join(missing_fields) +
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
# Do we have any unknown field?
unknown_fields = [f for f in definition.keys() if f not in ALL_FIELDS]
if len(unknown_fields) > 0:
raise ParserError(self._name + ' - unknown fields: ' + ', '.join(unknown_fields) +
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
# Checks the type for all the fields.
wrong_type_names = ['{} must be {}'.format(f, utils.nice_type_name(ALL_FIELDS[f]))
for f in definition.keys()
if not isinstance(definition[f], ALL_FIELDS[f])]
if len(wrong_type_names) > 0:
raise ParserError(self._name + ' - ' + ', '.join(wrong_type_names) +
'.\nSee: {}#required-fields'.format(BASE_DOC_URL))
# Check that the lists are not empty and that data in the lists
# have the correct types.
list_fields = [f for f in definition if isinstance(definition[f], list)]
for field in list_fields:
# Check for empty lists.
if len(definition[field]) == 0:
raise ParserError(("Field '{}' for probe '{}' must not be empty" +
".\nSee: {}#required-fields)")
.format(field, self._name, BASE_DOC_URL))
# Check the type of the list content.
broken_types =\
[not isinstance(v, LIST_FIELDS_CONTENT[field]) for v in definition[field]]
if any(broken_types):
raise ParserError(("Field '{}' for probe '{}' must only contain values of type {}"
".\nSee: {}#the-yaml-definition-file)")
.format(field, self._name, utils.nice_type_name(LIST_FIELDS_CONTENT[field]),
BASE_DOC_URL)) | def validate_types(self, definition) | This function performs some basic sanity checks on the scalar definition:
- Checks that all the required fields are available.
- Checks that all the fields have the expected types.
:param definition: the dictionary containing the scalar properties.
:raises ParserError: if a scalar definition field is of the wrong type.
:raises ParserError: if a required field is missing or unknown fields are present. | 2.918466 | 2.791978 | 1.045304 |
if not self._strict_type_checks:
return
# Validate the scalar kind.
scalar_kind = definition.get('kind')
if scalar_kind not in SCALAR_TYPES_MAP.keys():
raise ParserError(self._name + ' - unknown scalar kind: ' + scalar_kind +
'.\nSee: {}'.format(BASE_DOC_URL))
# Validate the collection policy.
collection_policy = definition.get('release_channel_collection', None)
if collection_policy and collection_policy not in ['opt-in', 'opt-out']:
raise ParserError(self._name + ' - unknown collection policy: ' + collection_policy +
'.\nSee: {}#optional-fields'.format(BASE_DOC_URL))
# Validate the cpp_guard.
cpp_guard = definition.get('cpp_guard')
if cpp_guard and re.match(r'\W', cpp_guard):
raise ParserError(self._name + ' - invalid cpp_guard: ' + cpp_guard +
'.\nSee: {}#optional-fields'.format(BASE_DOC_URL))
# Validate record_in_processes.
record_in_processes = definition.get('record_in_processes', [])
for proc in record_in_processes:
if not utils.is_valid_process_name(proc):
raise ParserError(self._name + ' - unknown value in record_in_processes: ' + proc +
'.\nSee: {}'.format(BASE_DOC_URL))
# Validate the expiration version.
# Historical versions of Scalars.json may contain expiration versions
# using the deprecated format 'N.Na1'. Those scripts set
# self._strict_type_checks to false.
expires = definition.get('expires')
if not utils.validate_expiration_version(expires) and self._strict_type_checks:
raise ParserError('{} - invalid expires: {}.\nSee: {}#required-fields'
.format(self._name, expires, BASE_DOC_URL)) | def validate_values(self, definition) | This function checks that the fields have the correct values.
:param definition: the dictionary containing the scalar properties.
:raises ParserError: if a scalar definition field contains an unexpected value. | 3.418436 | 3.247717 | 1.052566 |
child.parent = self
self.children.append(child)
return child | def add_child(self, child) | Adds self as parent to child, and then adds child. | 3.52337 | 2.59813 | 1.356117 |
parent.add_child(self)
self.parent = parent
return parent | def add_parent(self, parent) | Adds self as child of parent, then adds parent. | 4.577309 | 3.492245 | 1.310707 |
if cls is None:
if self.parent is None:
return self, level
else:
if isinstance(self, cls):
if not isinstance(self.parent, cls):
return self, level
if self.parent is None:
return None, None
return self.parent.root(cls, level - 1) | def root(self, cls=None, level=0) | Returns the top-most HubComponent in the hierarchy.
If `cls` is not None, then return the top-most attribute HubComponent
that is an instance of class `cls`.
For a fully-constructed track hub (and `cls=None`), this should return
a a Hub object for every component in the hierarchy. | 2.788849 | 3.186331 | 0.875254 |
if intermediate:
if isinstance(self, cls):
yield self, level
elif len(self.children) == 0:
if isinstance(self, cls):
yield self, level
else:
raise StopIteration
for child in self.children:
for leaf, _level in child.leaves(cls, level + 1, intermediate=intermediate):
yield leaf, _level | def leaves(self, cls, level=0, intermediate=False) | Returns an iterator of the HubComponent leaves that are of class `cls`.
If `intermediate` is True, then return any intermediate classes as
well. | 2.665122 | 3.132372 | 0.850832 |
self.validate()
created_files = OrderedDict()
if staging is None:
staging = tempfile.mkdtemp()
this = self._render(staging)
if this:
created_files[repr(self)] = this
for child in self.children:
created_files[repr(child)] = child.render(staging)
return created_files | def render(self, staging=None) | Renders the object to file, returning a list of created files.
Calls validation code, and, as long as each child is also a subclass of
:class:`HubComponent`, the rendering is recursive. | 3.639585 | 3.066925 | 1.186721 |
context = CONTEXT.copy()
context['requester'] = application.applicant
context['link'] = link
context['is_secret'] = is_secret
context['application'] = application
context['authorised_text'] = authorised_text
_send_request_email(
context,
authorised_role, authorised_persons,
"common_request") | def send_request_email(
authorised_text, authorised_role, authorised_persons, application,
link, is_secret) | Sends an email to admin asking to approve user application | 3.613535 | 3.660892 | 0.987064 |
if not application.applicant.email:
return
context = CONTEXT.copy()
context['receiver'] = application.applicant
context['application'] = application
context['link'] = link
context['is_secret'] = is_secret
to_email = application.applicant.email
subject, body = render_email('common_invite', context)
send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email]) | def send_invite_email(application, link, is_secret) | Sends an email inviting someone to create an account | 3.206872 | 3.200604 | 1.001958 |
if not application.applicant.email:
return
context = CONTEXT.copy()
context['receiver'] = application.applicant
context['application'] = application
context['created_person'] = created_person
context['created_account'] = created_account
context['link'] = link
context['is_secret'] = is_secret
subject, body = render_email('common_approved', context)
to_email = application.applicant.email
send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email]) | def send_approved_email(
application, created_person, created_account, link, is_secret) | Sends an email informing person application is approved | 2.603115 | 2.546531 | 1.02222 |
from karaage.datastores import add_accounts_to_group
from karaage.datastores import add_accounts_to_project
from karaage.datastores import add_accounts_to_institute
a_list = person.account_set
add_accounts_to_group(a_list, group)
for project in group.project_set.all():
add_accounts_to_project(a_list, project)
for institute in group.institute_set.all():
add_accounts_to_institute(a_list, institute) | def _add_person_to_group(person, group) | Call datastores after adding a person to a group. | 2.92033 | 2.675404 | 1.091547 |
from karaage.datastores import remove_accounts_from_group
from karaage.datastores import remove_accounts_from_project
from karaage.datastores import remove_accounts_from_institute
a_list = person.account_set
remove_accounts_from_group(a_list, group)
for project in group.project_set.all():
remove_accounts_from_project(a_list, project)
for institute in group.institute_set.all():
remove_accounts_from_institute(a_list, institute) | def _remove_person_from_group(person, group) | Call datastores after removing a person from a group. | 2.906999 | 2.673586 | 1.087303 |
if action == "post_add":
if not reverse:
group = instance
for person in model.objects.filter(pk__in=pk_set):
log.change(person, "Added person to group %s" % group)
log.change(group, "Added person %s to group" % person)
_add_person_to_group(person, group)
else:
person = instance
for group in model.objects.filter(pk__in=pk_set):
log.change(person, "Added person to group %s" % group)
log.change(group, "Added person %s to group" % person)
_add_person_to_group(person, group)
elif action == "post_remove":
if not reverse:
group = instance
for person in model.objects.filter(pk__in=pk_set):
log.change(person, "Removed person from group %s" % group)
log.change(group, "Removed person %s from group" % person)
_remove_person_from_group(person, group)
else:
person = instance
for group in model.objects.filter(pk__in=pk_set):
log.change(person, "Removed person from group %s" % group)
log.change(group, "Removed person %s from group" % person)
_remove_person_from_group(person, group)
elif action == "pre_clear":
# This has to occur in pre_clear, not post_clear, as otherwise
# we won't see what groups need to be removed.
if not reverse:
group = instance
log.change(group, "Removed all people from group")
for person in group.members.all():
log.change(group, "Removed person %s from group" % person)
_remove_person_from_group(person, group)
else:
person = instance
log.change(person, "Removed person from all groups")
for group in person.groups.all():
log.change(group, "Removed person %s from group" % person)
_remove_person_from_group(person, group) | def _members_changed(
sender, instance, action, reverse, model, pk_set, **kwargs) | Hook that executes whenever the group members are changed. | 1.563333 | 1.575027 | 0.992575 |
letters = 'XYABCDEFGHIJKLMNOPQRSTUVWZ'
return ' '.join(['dim{0}={1}'.format(dim, sg.name) for dim, sg in zip(letters, s)]) | def dimensions_from_subgroups(s) | Given a sorted list of subgroups, return a string appropriate to provide as
a composite track's `dimensions` arg.
Parameters
----------
s : list of SubGroup objects (or anything with a `name` attribute) | 7.627546 | 7.91964 | 0.963118 |
dims = []
for letter, sg in zip('ABCDEFGHIJKLMNOPQRSTUVWZ', s[2:]):
dims.append('dim{0}'.format(letter))
if dims:
return ' '.join(dims) | def filter_composite_from_subgroups(s) | Given a sorted list of subgroups, return a string appropriate to provide as
the a composite track's `filterComposite` argument
>>> import trackhub
>>> trackhub.helpers.filter_composite_from_subgroups(['cell', 'ab', 'lab', 'knockdown'])
'dimA dimB'
Parameters
----------
s : list
A list representing the ordered subgroups, ideally the same list
provided to `dimensions_from_subgroups`. The values are not actually
used, just the number of items. | 6.721076 | 8.190844 | 0.82056 |
if not h.startswith('#') or len(h) != 7:
raise ValueError("Does not look like a hex color: '{0}'".format(h))
return ','.join(map(str, (
int(h[1:3], 16),
int(h[3:5], 16),
int(h[5:7], 16),
))) | def hex2rgb(h) | Convert hex colors to RGB tuples
Parameters
----------
h : str
String hex color value
>>> hex2rgb("#ff0033")
'255,0,51' | 2.163939 | 2.256719 | 0.958887 |
allowed = ''.join(
[
'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'abcdefghijklmnopqrstuvwxyz',
'0123456789',
]
)
if not strict:
allowed += '-_.'
s = str(s).replace(' ', '_')
return ''.join([i for i in s if i in allowed]) | def sanitize(s, strict=True) | Sanitize a string.
Spaces are converted to underscore; if strict=True they are then removed.
Parameters
----------
s : str
String to sanitize
strict : bool
If True, only alphanumeric characters are allowed. If False, a limited
set of additional characters (-._) will be allowed. | 2.497802 | 2.783097 | 0.89749 |
hub = track.root(cls=Hub)
if hub is None:
raise ValueError(
"track is not fully connected because the root is %s" % repr(hub))
if hub.url is None:
raise ValueError("hub.url is not set")
if track.source is None:
raise ValueError("track.source is not set") | def auto_track_url(track) | Automatically sets the bigDataUrl for `track`.
Requirements:
* the track must be fully connected, such that its root is a Hub object
* the root Hub object must have the Hub.url attribute set
* the track must have the `source` attribute set | 5.237621 | 3.854578 | 1.358805 |
for k, v in results_dict.items():
if isinstance(v, string_types):
print("rendered file: %s (created by: %s)" % (v, k))
else:
show_rendered_files(v)
return | def show_rendered_files(results_dict) | Parses a nested dictionary returned from :meth:`Hub.render` and just prints
the resulting files. | 3.199244 | 3.229419 | 0.990656 |
class _HubComponentEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, base.HubComponent):
return repr(o)
return json.JSONEncoder.default(self, o)
formatted = json.dumps(results_dict, indent=4, cls=_HubComponentEncoder)
# the returned string contains lines with trailing spaces, which causes
# doctests to fail. So fix that here.
for s in formatted.splitlines():
print(s.rstrip()) | def print_rendered_results(results_dict) | Pretty-prints the rendered results dictionary.
Rendered results can be multiply-nested dictionaries; this uses JSON
serialization to print a nice representation. | 4.27057 | 4.294788 | 0.994361 |
hits = []
d = data_dir()
for fn in os.listdir(d):
fn = os.path.join(d, fn)
if os.path.splitext(fn)[-1] == '.bigBed':
hits.append(os.path.abspath(fn))
return hits | def example_bigbeds() | Returns list of example bigBed files | 2.849397 | 2.550842 | 1.117042 |
colours = [
'red', 'blue', 'green', 'pink',
'yellow', 'magenta', 'orange', 'cyan',
]
default_colour = 'purple'
if index < len(colours):
return colours[index]
else:
return default_colour | def get_colour(index) | get color number index. | 2.723917 | 2.661563 | 1.023428 |
filename = get_project_trend_graph_filename(project, start, end)
urls = {
'graph_url': urlparse.urljoin(GRAPH_URL, filename + ".png"),
'data_url': urlparse.urljoin(GRAPH_URL, filename + ".csv"),
}
return urls | def get_project_trend_graph_url(project, start, end) | Generates a bar graph for a project. | 3.231344 | 3.058454 | 1.056529 |
filename = get_institute_graph_filename(start, end)
urls = {
'graph_url': urlparse.urljoin(GRAPH_URL, filename + ".png"),
'data_url': urlparse.urljoin(GRAPH_URL, filename + ".csv"),
}
return urls | def get_institute_graph_url(start, end) | Pie chart comparing institutes usage. | 3.527207 | 3.290842 | 1.071825 |
filename = get_machine_graph_filename(start, end)
urls = {
'graph_url': urlparse.urljoin(GRAPH_URL, filename + ".png"),
'data_url': urlparse.urljoin(GRAPH_URL, filename + ".csv"),
}
return urls | def get_machine_graph_url(start, end) | Pie chart comparing machines usage. | 3.425092 | 3.21421 | 1.065609 |
filename = get_trend_graph_filename(start, end)
urls = {
'graph_url': urlparse.urljoin(GRAPH_URL, filename + ".png"),
'data_url': urlparse.urljoin(GRAPH_URL, filename + ".csv"),
}
return urls | def get_trend_graph_url(start, end) | Total trend graph for machine category. | 3.533314 | 3.403363 | 1.038183 |
filename = get_institute_trend_graph_filename(institute, start, end)
urls = {
'graph_url': urlparse.urljoin(GRAPH_URL, filename + ".png"),
'data_url': urlparse.urljoin(GRAPH_URL, filename + ".csv"),
}
return urls | def get_institute_trend_graph_url(institute, start, end) | Institute trend graph for machine category. | 3.222596 | 3.12775 | 1.030324 |
graph_list = []
for institute in Institute.objects.all():
urls = get_institute_trend_graph_url(institute, start, end)
urls['institute'] = institute
graph_list.append(urls)
return graph_list | def get_institutes_trend_graph_urls(start, end) | Get all institute trend graphs. | 2.829821 | 2.620616 | 1.079831 |
ctx = {
'SHIB_SUPPORTED': settings.SHIB_SUPPORTED,
'org_name': settings.ACCOUNTS_ORG_NAME,
'accounts_email': settings.ACCOUNTS_EMAIL,
'is_admin': is_admin(request),
'kgversion': __version__,
'BUILD_DATE': settings.BUILD_DATE,
'VCS_REF': settings.VCS_REF,
'SLURM_VER': settings.SLURM_VER,
}
return ctx | def common(request) | Set context with common variables. | 4.691065 | 4.438294 | 1.056952 |
from .models import Application
ctx = {}
if request.user.is_authenticated:
person = request.user
my_applications = Application.objects.get_for_applicant(person)
requires_attention = Application.objects.requires_attention(request)
ctx['pending_applications'] = (
my_applications.count() + requires_attention.count()
)
return ctx | def context_processor(request) | Set context with common variables. | 4.758374 | 4.698734 | 1.012693 |
# Create Person
person = self.model(
username=username, email=email,
short_name=short_name, full_name=full_name,
is_admin=is_admin,
institute=institute,
**extra_fields
)
person.set_password(password)
person.save()
return person | def _create_user(
self, username, email, short_name, full_name,
institute, password, is_admin, **extra_fields) | Creates a new active person. | 1.981514 | 1.894441 | 1.045962 |
return self._create_user(
username=username, email=email,
short_name=short_name, full_name=full_name,
institute=institute, password=password,
is_admin=False, **extra_fields) | def create_user(
self, username, email, short_name, full_name,
institute, password=None, **extra_fields) | Creates a new ordinary person. | 1.775559 | 1.808979 | 0.981525 |
return self._create_user(
username=username, email=email,
institute=institute, password=password,
short_name=short_name, full_name=full_name,
is_admin=True, **extra_fields) | def create_superuser(
self, username, email, short_name, full_name,
institute, password, **extra_fields) | Creates a new person with super powers. | 1.817162 | 1.870949 | 0.971252 |
new_context = {
'roles': context['roles'],
'org_name': context['org_name'],
'application': application,
}
nodelist = template.loader.get_template(
'kgapplications/%s_common_state.html' % application.type)
output = nodelist.render(new_context)
return output | def application_state(context, application) | Render current state of application, verbose. | 5.652802 | 5.638431 | 1.002549 |
state_machine = get_state_machine(application)
state = state_machine.get_state(application)
return state.name | def application_simple_state(context, application) | Render current state of application, verbose. | 4.107012 | 3.764896 | 1.09087 |
nodelist = parser.parse(('end_application_actions',))
parser.delete_first_token()
return ApplicationActionsPlus(nodelist) | def do_application_actions_plus(parser, token) | Render actions available with extra text. | 2.272736 | 2.17997 | 1.042554 |
proc = sp.Popen(cmds, bufsize=-1, stdout=sp.PIPE, stderr=sp.STDOUT,
close_fds=sys.platform != 'win32')
for line in proc.stdout:
print(line[:-1].decode())
retcode = proc.wait()
if retcode:
raise sp.CalledProcessError(retcode, cmds) | def run(cmds, **kwargs) | Wrapper around subprocess.run, with unicode decoding of output.
Additional kwargs are passed to subprocess.run. | 2.26328 | 2.570484 | 0.880488 |
target = os.path.abspath(target)
linkname = os.path.abspath(linkname)
if not os.path.exists(target):
raise ValueError("target {} not found".format(target))
link_dir = os.path.dirname(linkname)
if not os.path.exists(link_dir):
os.makedirs(link_dir)
run(['ln', '-s', '-f', target, linkname])
return linkname | def symlink(target, linkname) | Create a symlink to `target` called `linkname`.
Converts `target` and `linkname` to absolute paths; creates
`dirname(linkname)` if needed. | 2.159912 | 2.365109 | 0.91324 |
if user is None:
user = ""
else:
user = user + "@"
if host is None or host == 'localhost':
host = ""
else:
host = host + ":"
if not local_dir.endswith('/'):
local_dir = local_dir + '/'
if not remote_dir.endswith('/'):
remote_dir = remote_dir + '/'
remote_string = '{user}{host}{remote_dir}'.format(**locals())
cmds = ['rsync']
cmds += shlex.split(rsync_options)
cmds += [local_dir, remote_string]
run(cmds)
return [remote_string] | def upload(host, user, local_dir, remote_dir, rsync_options=RSYNC_OPTIONS) | Upload a file or directory via rsync.
Parameters
----------
host : str or None
If None, omit the host part and just transfer locally
user : str or None
If None, omit the user part
local_dir : str
If a directory, a trailing "/" will be added.
remote_dir : str
If a directory, a trailing "/" will be added. | 2.43913 | 2.493726 | 0.978107 |
linkname = os.path.join(staging, remote_fn.lstrip(os.path.sep))
return symlink(local_fn, linkname) | def local_link(local_fn, remote_fn, staging) | Creates a symlink to a local staging area.
The link name is built from `remote_fn`, but the absolute path is put
inside the staging directory.
Example
-------
If we have the following initial setup::
cwd="/home/user"
local="data/sample1.bw"
remote="/hubs/hg19/a.bw"
staging="__staging__"
Then this function does the equivalent of::
mkdir -p __staging__/hubs/hg19
ln -sf \\
/home/user/data/sample1.bw \\
/home/user/__staging__/hubs/hg19/a.bw | 3.512736 | 6.690618 | 0.525024 |
linknames = []
# Objects that don't represent a file shouldn't be staged
non_file_objects = (
track.ViewTrack,
track.CompositeTrack,
track.AggregateTrack,
track.SuperTrack,
genome.Genome,
)
if isinstance(x, non_file_objects):
return linknames
# If it's an object representing a file, then render it.
#
# Track objects don't represent files, but their documentation does
linknames.append(x.render(staging))
if hasattr(x, 'source') and hasattr(x, 'filename'):
def _stg(x, ext=''):
# A remote track hosted elsewhere does not need staging. This is
# defined by a track with a url, but no source or filename.
if (
x.source is None
and x.filename is None
and getattr(x, 'url', None) is not None
):
return
linknames.append(
local_link(x.source + ext, x.filename + ext, staging)
)
_stg(x)
if isinstance(x, track.Track):
if x.tracktype == 'bam':
_stg(x, ext='.bai')
if x.tracktype == 'vcfTabix':
_stg(x, ext='.tbi')
if isinstance(x, track.CompositeTrack):
if x._html:
_stg(x._html)
return linknames | def stage(x, staging) | Stage an object to the `staging` directory.
If the object is a Track and is one of the types that needs an index file
(bam, vcfTabix), then the index file will be staged as well.
Returns a list of the linknames created. | 4.834046 | 4.090907 | 1.181656 |
linknames = []
if staging is None:
staging = tempfile.mkdtemp()
for obj, level in hub.leaves(base.HubComponent, intermediate=True):
linknames.extend(stage(obj, staging))
return staging, linknames | def stage_hub(hub, staging=None) | Stage a hub by symlinking all its connected files to a local directory. | 10.499821 | 8.697293 | 1.207252 |
hub.render()
if staging is None:
staging = tempfile.mkdtemp()
staging, linknames = stage_hub(hub, staging=staging)
local_dir = os.path.join(staging)
upload(host, user, local_dir=local_dir, remote_dir=remote_dir, rsync_options=rsync_options)
return linknames | def upload_hub(hub, host, remote_dir, user=None, port=22, rsync_options=RSYNC_OPTIONS, staging=None) | Renders, stages, and uploads a hub. | 3.69876 | 3.37689 | 1.095316 |
try:
project = Project.objects.get(pid=project_id)
except Project.DoesNotExist:
return 'Project not found'
return [x.username for x in project.group.members.all()] | def get_project_members(machine, project_id) | Returns list of usernames given a project id | 3.676452 | 3.266106 | 1.125638 |
query = Project.active.all()
return [x.pid for x in query] | def get_projects(machine) | Returns list of project ids | 14.945255 | 11.620693 | 1.28609 |
try:
account = Account.objects.get(
username=username,
date_deleted__isnull=True)
except Account.DoesNotExist:
return "Account '%s' not found" % username
if project is None:
project = account.default_project
else:
try:
project = Project.objects.get(pid=project)
except Project.DoesNotExist:
project = account.default_project
if project is None:
return "None"
if account.person not in project.group.members.all():
project = account.default_project
if project is None:
return "None"
if account.person not in project.group.members.all():
return "None"
return project.pid | def get_project(username, project, machine_name=None) | Used in the submit filter to make sure user is in project | 2.724924 | 2.706202 | 1.006918 |
person = user
projects = person.projects.filter(is_active=True)
return 0, [x.pid for x in projects] | def get_users_projects(user) | List projects a user is part of | 7.976152 | 7.638174 | 1.044249 |
if extra_context is None:
extra_context = {}
dictionary = {'params': kwargs}
for key, value in extra_context.items():
if callable(value):
dictionary[key] = value()
else:
dictionary[key] = value
t = loader.get_template(template)
return HttpResponse(
t.render(context=dictionary, request=request),
content_type=mimetype) | def direct_to_template(
request, template, extra_context=None, mimetype=None, **kwargs) | Render a given template with any extra URL parameters in the context as
``{{ params }}``. | 2.112411 | 2.050363 | 1.030262 |
r
args = request.META.get('QUERY_STRING', '')
if url is not None:
if kwargs:
url = url % kwargs
if args and query_string:
url = "%s?%s" % (url, args)
klass = (permanent and HttpResponsePermanentRedirect
or HttpResponseRedirect)
return klass(url)
else:
logger.warning(
'Gone: %s',
request.path,
extra={
'status_code': 410,
'request': request
})
return HttpResponseGone() | def redirect_to(request, url, permanent=True, query_string=False, **kwargs) | r"""
Redirect to a given URL.
The given url may contain dict-style string formatting, which will be
interpolated against the params in the URL. For example, to redirect from
``/foo/<id>/`` to ``/bar/<id>/``, you could use the following URLconf::
urlpatterns = patterns('',
(r'^foo/(?P<id>\d+)/$',
'django.views.generic.simple.redirect_to',
{'url' : '/bar/%(id)s/'}),
)
If the given url is ``None``, a HttpResponseGone (410) will be issued.
If the ``permanent`` argument is False, then the response will have a 302
HTTP status code. Otherwise, the status code will be 301.
If the ``query_string`` argument is True, then the GET query string
from the request is appended to the URL. | 3.219824 | 3.424092 | 0.940344 |
# test the length
try:
minlength = settings.MIN_PASSWORD_LENGTH
except AttributeError:
minlength = 12
if len(password) < minlength:
raise ValueError(
"Password must be at least %s characters long" % minlength)
if username is not None and username in password:
raise ValueError("Password contains username")
return _assert_password(password, old_password) | def assert_strong_password(username, password, old_password=None) | Raises ValueError if the password isn't strong.
Returns the password otherwise. | 3.557908 | 3.494324 | 1.018196 |
if isinstance(cls, str):
module_name, _, name = cls.rpartition(".")
module = importlib.import_module(module_name)
try:
cls = getattr(module, name)
except AttributeError:
raise AttributeError("%s reference cannot be found" % cls)
return cls | def _lookup(cls: str) -> LdapObjectClass | Lookup module.class. | 3.203452 | 2.750644 | 1.164619 |
person = account.person
if self._primary_group == 'institute':
lgroup = self._get_group(person.institute.group.name)
elif self._primary_group == 'default_project':
if account.default_project is None:
lgroup = self._get_group(self._default_primary_group)
else:
lgroup = self._get_group(account.default_project.group.name)
else:
raise RuntimeError("Unknown value of PRIMARY_GROUP.")
if account.default_project is None:
default_project = "none"
else:
default_project = account.default_project.pid
try:
luser = self._get_account(account.username)
changes = changeset(luser, {})
new_user = False
except ObjectDoesNotExist:
new_user = True
luser = self._account_class()
changes = changeset(luser, {
'uid': account.username
})
changes = changes.merge({
'gidNumber': lgroup['gidNumber'],
'givenName': person.first_name,
'sn': person.last_name,
'telephoneNumber': _str_or_none(person.telephone),
'mail': _str_or_none(person.email),
'title': _str_or_none(person.title),
'o': person.institute.name,
'cn': person.full_name,
'default_project': default_project,
'loginShell': account.shell,
'locked': account.is_locked()
})
save(changes, database=self._database)
if new_user:
# add all groups
for group in account.person.groups.all():
self.add_account_to_group(account, group) | def save_account(self, account: Account) -> None | Account was saved. | 3.173975 | 3.120262 | 1.017214 |
try:
luser = self._get_account(account.username)
groups = luser['groups'].load(database=self._database)
for group in groups:
changes = changeset(group, {})
changes = group.remove_member(changes, luser)
save(changes, database=self._database)
delete(luser, database=self._database)
except ObjectDoesNotExist:
# it doesn't matter if it doesn't exist
pass | def delete_account(self, account) | Account was deleted. | 5.965492 | 5.826005 | 1.023942 |
luser = self._get_account(account.username)
changes = changeset(luser, {
'password': raw_password,
})
save(changes, database=self._database) | def set_account_password(self, account, raw_password) | Account's password was changed. | 9.287655 | 8.128586 | 1.142592 |
luser = self._get_account(old_username)
rename(luser, database=self._database, uid=new_username) | def set_account_username(self, account, old_username, new_username) | Account's username was changed. | 10.869371 | 10.201729 | 1.065444 |
lgroup: OpenldapGroup = self._get_group(group.name)
person: OpenldapAccount = self._get_account(account.username)
changes = changeset(lgroup, {})
changes = lgroup.add_member(changes, person)
save(changes, database=self._database) | def add_account_to_group(self, account, group) | Add account to group. | 8.090375 | 7.704902 | 1.05003 |
result = {}
try:
luser = self._get_account(account.username)
luser = preload(luser, database=self._database)
except ObjectDoesNotExist:
return result
for i, j in luser.items():
if i != 'userPassword' and j is not None:
result[i] = j
return result | def get_account_details(self, account) | Get the account details. | 5.782801 | 5.450855 | 1.060898 |
# If group already exists, take over existing group rather then error.
try:
lgroup = self._get_group(group.name)
changes = changeset(lgroup, {})
except ObjectDoesNotExist:
lgroup = self._group_class()
changes = changeset(lgroup, {
'cn': group.name,
})
changes = changes.merge({
'description': group.description
})
save(changes, database=self._database) | def save_group(self, group) | Group was saved. | 7.168376 | 6.81839 | 1.05133 |
try:
lgroup = self._get_group(group.name)
delete(lgroup, database=self._database)
except ObjectDoesNotExist:
# it doesn't matter if it doesn't exist
pass | def delete_group(self, group) | Group was deleted. | 6.133532 | 5.926954 | 1.034854 |
lgroup = self._get_group(old_name)
rename(lgroup, database=self._database, cn=new_name) | def set_group_name(self, group, old_name, new_name) | Group was renamed. | 11.54955 | 10.589594 | 1.090651 |
result = {}
try:
lgroup = self._get_group(group.name)
lgroup = preload(lgroup, database=self._database)
except ObjectDoesNotExist:
return result
for i, j in lgroup.items():
if j is not None:
result[i] = j
return result | def get_group_details(self, group) | Get the group details. | 5.249902 | 4.917168 | 1.067668 |
rank = 1
ranks = {}
for k in sorted(sample.keys()):
n = sample[k]
ranks[k] = rank + (n - 1) / 2
rank += n
return ranks | def _rank(sample) | Assign numeric ranks to all values in the sample.
The ranks begin with 1 for the smallest value. When there are groups of
tied values, assign a rank equal to the midpoint of unadjusted rankings.
E.g.::
>>> rank({3: 1, 5: 4, 9: 1})
{3: 1.0, 5: 3.5, 9: 6.0} | 3.825444 | 6.630712 | 0.576928 |
tc = 0
n = sum(sample.values())
if n < 2:
return 1.0 # Avoid a ``ZeroDivisionError``.
for k in sorted(sample.keys()):
tc += math.pow(sample[k], 3) - sample[k]
tc = 1 - tc / (math.pow(n, 3) - n)
return tc | def _tie_correct(sample) | Returns the tie correction value for U.
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.tiecorrect.html | 4.719052 | 4.361015 | 1.082099 |
sqrth = math.sqrt(2) / 2
x = float(a) * sqrth
z = abs(x)
if z < sqrth:
y = 0.5 + 0.5 * math.erf(x)
else:
y = 0.5 * math.erfc(z)
if x > 0:
y = 1 - y
return y | def ndtr(a) | Returns the area under the Gaussian probability density function,
integrated from minus infinity to x.
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ndtr.html#scipy.special.ndtr | 2.665898 | 2.748863 | 0.969819 |
# Merge dictionaries, adding values if keys match.
sample = sample1.copy()
for k, v in sample2.items():
sample[k] = sample.get(k, 0) + v
# Create a ranking dictionary using same keys for lookups.
ranks = _rank(sample)
sum_of_ranks = sum([sample1[k] * ranks[k] for k, v in sample1.items()])
n1 = sum(sample1.values())
n2 = sum(sample2.values())
# Calculate Mann-Whitney U for both samples.
u1 = sum_of_ranks - (n1 * (n1 + 1)) / 2
u2 = n1 * n2 - u1
tie_correction = _tie_correct(sample)
if tie_correction == 0:
raise ValueError('All provided sample values are identical.')
sd_u = math.sqrt(tie_correction * n1 * n2 * (n1 + n2 + 1) / 12.0)
mean_rank = n1 * n2 / 2.0 + 0.5 * use_continuity
z = abs((max(u1, u2) - mean_rank) / sd_u)
return mwu_result(min(u1, u2), ndtr(-z)) | def mann_whitney_u(sample1, sample2, use_continuity=True) | Computes the Mann-Whitney rank test on both samples.
Each sample is expected to be of the form::
{1: 5, 2: 20, 3: 12, ...}
Returns a named tuple with:
``u`` equal to min(U for sample1, U for sample2), and
``p`` equal to the p-value. | 3.991965 | 3.983455 | 1.002136 |
def newFunc(*args, **kwargs):
print("Call to deprecated function %s." % func.__name__)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc | def deprecated(func) | This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used. | 1.520344 | 1.543781 | 0.984819 |
if schema:
print("The 'schema' parameter is deprecated. "
"Version 4 is now the only schema supported.")
if schema != "v4":
raise ValueError("Invalid schema version")
dataset = Dataset.from_source('telemetry')
filters = (
('docType', doc_type),
('sourceName', source_name),
('sourceVersion', source_version),
('appName', app),
('appUpdateChannel', channel),
('appVersion', version),
)
for key, condition in filters:
if condition and condition != '*':
dataset = dataset.where(**{key: condition})
# build_id and submission_date can be either strings or tuples or lists,
# so they deserve a special treatment
special_cases = dict(appBuildId=build_id, submissionDate=submission_date)
def range_compare(min_val, max_val, val):
return min_val <= val <= max_val
for key, value in iteritems(special_cases):
if value is not None and value != '*':
if isinstance(value, string_types):
condition = value
elif isinstance(value, (list, tuple)) and len(value) == 2:
start, end = value
condition = partial(range_compare, start, end)
else:
raise ValueError(('{} must be either a string or a 2 elements '
'list/tuple'. format(key)))
dataset = dataset.where(**{key: condition})
return dataset.records(sc, sample=fraction) | def get_pings(sc, app=None, build_id=None, channel=None, doc_type='saved_session',
fraction=1.0, schema=None, source_name='telemetry', source_version='4',
submission_date=None, version=None) | Returns a RDD of Telemetry submissions for a given filtering criteria.
:param sc: an instance of SparkContext
:param app: an application name, e.g.: "Firefox"
:param channel: a channel name, e.g.: "nightly"
:param version: the application version, e.g.: "40.0a1"
:param build_id: a build_id or a range of build_ids, e.g.:
"20150601000000" or ("20150601000000", "20150610999999")
:param submission_date: a submission date or a range of submission dates, e.g:
"20150601" or ("20150601", "20150610")
:param source_name: source name, set to "telemetry" by default
:param source_version: source version, set to "4" by default
:param doc_type: ping type, set to "saved_session" by default
:param schema: (deprecated) version of the schema to use
:param fraction: the fraction of pings to return, set to 1.0 by default | 3.815867 | 3.784085 | 1.008399 |
if isinstance(pings.first(), binary_type):
pings = pings.map(lambda p: json.loads(p.decode('utf-8')))
if isinstance(paths, str):
paths = [paths]
# Use '/' as dots can appear in keyed histograms
if isinstance(paths, dict):
paths = [(prop_name, path.split("/")) for prop_name, path in iteritems(paths)]
else:
paths = [(path, path.split("/")) for path in paths]
return pings.map(lambda p: _get_ping_properties(p, paths, only_median,
with_processes,
histograms_url,
additional_histograms)) \
.filter(lambda p: p) | def get_pings_properties(pings, paths, only_median=False, with_processes=False,
histograms_url=None, additional_histograms=None) | Returns a RDD of a subset of properties of pings. Child histograms are
automatically merged with the parent histogram.
If one of the paths points to a keyedHistogram name without supplying the
actual key, returns a dict of all available subhistograms for that property.
:param with_processes: should separate parent and child histograms be
included as well?
:param paths: paths to properties in the payload, with levels separated by "/".
These can be supplied either as a list, eg.
["application/channel", "payload/info/subsessionStartDate"],
or as the values of a dict keyed by custom identifiers, eg.
{"channel": "application/channel", "ssd": "payload/info/subsessionStartDate"}.
:param histograms_url: see histogram.Histogram constructor
:param additional_histograms: see histogram.Histogram constructor
The returned RDD contains a dict for each ping with the required properties as values,
keyed by the original paths (if 'paths' is a list) or the custom identifier keys
(if 'paths' is a dict). | 3.600894 | 3.207872 | 1.122518 |
if isinstance(pings.first(), binary_type):
pings = pings.map(lambda p: json.loads(p.decode('utf-8')))
filtered = pings.filter(lambda p: "clientID" in p or "clientId" in p)
if not filtered:
raise ValueError("Missing clientID/clientId attribute.")
if "clientID" in filtered.first():
client_id = "clientID" # v2
else:
client_id = "clientId" # v4
return filtered.map(lambda p: (p[client_id], p)) \
.reduceByKey(lambda p1, p2: p1) \
.map(lambda p: p[1]) | def get_one_ping_per_client(pings) | Returns a single ping for each client in the RDD.
THIS METHOD IS NOT RECOMMENDED: The ping to be returned is essentially
selected at random. It is also expensive as it requires data to be
shuffled around. It should be run only after extracting a subset with
get_pings_properties. | 3.102463 | 3.007334 | 1.031632 |
def check_perms(user):
# if user not logged in, show login form
if not user.is_authenticated:
return False
# if this site doesn't allow admin access, fail
if settings.ADMIN_IGNORED:
raise PermissionDenied
# check if the user has admin rights
if not user.is_admin:
raise PermissionDenied
return True
actual_decorator = user_passes_test(check_perms, login_url=_login_url)
if function:
return actual_decorator(function)
return actual_decorator | def admin_required(function=None) | Decorator for views that checks that the user is an administrator,
redirecting to the log-in page if necessary. | 3.430019 | 3.709832 | 0.924575 |
def check_perms(user):
# if user not logged in, show login form
if not user.is_authenticated:
return False
# if this is the admin site only admin access
if settings.ADMIN_REQUIRED and not user.is_admin:
raise PermissionDenied
return True
actual_decorator = user_passes_test(check_perms, login_url=_login_url)
if function:
return actual_decorator(function)
return actual_decorator | def login_required(function=None) | Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary. | 3.821983 | 4.153958 | 0.920082 |
def actual_decorator(func):
def wrapper(machine_name, password, *args):
from django_xmlrpc.decorators import AuthenticationFailedException
from karaage.machines.models import Machine
machine = Machine.objects.authenticate(machine_name, password)
if machine is None:
raise AuthenticationFailedException
return func(machine, *args)
if hasattr(func, '_xmlrpc_signature'):
sig = func._xmlrpc_signature
sig['args'] = (['string'] * 2) + sig['args']
wrapper._xmlrpc_signature = sig
if func.__doc__:
wrapper.__doc__ = func.__doc__ + \
"\nNote: Machine authentication is required."
return wrapper
if function:
return actual_decorator(function)
return actual_decorator | def xmlrpc_machine_required(function=None) | Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary. | 3.702413 | 3.888843 | 0.95206 |
if ignore_errors is None:
ignore_errors = []
cmd = []
cmd.extend(self._prefix)
cmd.extend([self._path, "-iP"])
cmd.extend(command)
command = cmd
logger.debug("Cmd %s" % command)
null = open('/dev/null', 'w')
retcode = subprocess.call(command, stdout=null, stderr=null)
null.close()
if retcode in ignore_errors:
logger.debug(
"<-- Cmd %s returned %d (ignored)" % (command, retcode))
return
if retcode:
logger.error(
"<-- Cmd %s returned: %d (error)" % (command, retcode))
raise subprocess.CalledProcessError(retcode, command)
logger.debug("<-- Returned %d (good)" % retcode)
return | def _call(self, command, ignore_errors=None) | Call remote command with logging. | 2.841244 | 2.795471 | 1.016374 |
cmd = []
cmd.extend(self._prefix)
cmd.extend([self._path, "-iP"])
cmd.extend(command)
command = cmd
logger.debug("Cmd %s" % command)
null = open('/dev/null', 'w')
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=null)
null.close()
results = []
reader = csv.reader(_input_csv(process.stdout), delimiter=str("|"))
try:
headers = next(reader)
logger.debug("<-- headers %s" % headers)
except StopIteration:
logger.debug("Cmd %s headers not found" % command)
headers = []
for row in reader:
_output_csv(row)
logger.debug("<-- row %s" % row)
this_row = {}
i = 0
for i in range(0, len(headers)):
key = headers[i]
value = row[i]
this_row[key] = value
results.append(this_row)
process.stdout.close()
retcode = process.wait()
if retcode != 0:
logger.error("<-- Cmd %s returned %d (error)" % (command, retcode))
raise subprocess.CalledProcessError(retcode, command)
if len(headers) == 0:
logger.error("Cmd %s didn't return any headers." % command)
raise RuntimeError("Cmd %s didn't return any headers." % command)
logger.debug("<-- Returned: %d (good)" % retcode)
return results | def _read_output(self, command) | Read CSV delimited input from Slurm. | 2.848175 | 2.776909 | 1.025664 |
cmd = ["list", "accounts", "where", "name=%s" % projectname]
results = self._read_output(cmd)
if len(results) == 0:
return None
elif len(results) > 1:
logger.error(
"Command returned multiple results for '%s'." % projectname)
raise RuntimeError(
"Command returned multiple results for '%s'." % projectname)
the_result = results[0]
the_project = the_result["Account"]
if projectname.lower() != the_project.lower():
logger.error(
"We expected projectname '%s' "
"but got projectname '%s'." % (projectname, the_project))
raise RuntimeError(
"We expected projectname '%s' "
"but got projectname '%s'." % (projectname, the_project))
return the_result | def get_project(self, projectname) | Get the project details from Slurm. | 2.585198 | 2.534723 | 1.019914 |
cmd = ["list", "assoc", "where", "account=%s" % projectname]
results = self._read_output(cmd)
user_list = []
for result in results:
if result["User"] != "":
user_list.append(result["User"])
return user_list | def get_users_in_project(self, projectname) | Get list of users in project from Slurm. | 4.918846 | 4.995193 | 0.984716 |
cmd = ["list", "assoc", "where", "user=%s" % username]
results = self._read_output(cmd)
project_list = []
for result in results:
project_list.append(result["Account"])
return project_list | def get_projects_in_user(self, username) | Get list of projects in user from Slurm. | 6.003532 | 5.606136 | 1.070886 |
# retrieve default project, or use null project if none
default_project_name = self._null_project
if account.default_project is not None:
default_project_name = account.default_project.pid
# account created
# account updated
ds_user = self.get_user(username)
if account.date_deleted is None:
# date_deleted is not set, user should exist
logger.debug("account is active")
if ds_user is None:
# create user if doesn't exist
self._call([
"add", "user",
"accounts=%s" % default_project_name,
"defaultaccount=%s" % default_project_name,
"name=%s" % username])
else:
# or just set default project
self._call([
"modify", "user",
"set", "defaultaccount=%s" % default_project_name,
"where", "name=%s" % username])
# update user meta information
# add rest of projects user belongs to
slurm_projects = self.get_projects_in_user(username)
slurm_projects = [project.lower() for project in slurm_projects]
slurm_projects = set(slurm_projects)
for project in account.person.projects.all():
if project.pid.lower() not in slurm_projects:
self._call([
"add", "user",
"name=%s" % username,
"accounts=%s" % project.pid])
else:
# date_deleted is not set, user should not exist
logger.debug("account is not active")
self._delete_account(username)
return | def _save_account(self, account, username) | Called when account is created/updated. With username override. | 3.777846 | 3.747473 | 1.008105 |
# account deleted
ds_user = self.get_user(username)
if ds_user is not None:
self._call(["delete", "user", "name=%s" % username])
return | def _delete_account(self, username) | Called when account is deleted. With username override. | 7.618697 | 7.081269 | 1.075894 |
pid = project.pid
# project created
# project updated
if project.is_active:
# project is not deleted
logger.debug("project is active")
ds_project = self.get_project(pid)
if ds_project is None:
self._call(["add", "account", "name=%s" % pid] + self._add_account_extra)
# update project meta information
name = self._truncate(project.name, 40)
self._call([
"modify", "account",
"set", "Description=%s" % self._filter_string(name),
"where", "name=%s" % pid])
self._call([
"modify", "account",
"set", "Organization=%s"
% self._filter_string(project.institute.name),
"where", "name=%s" % pid])
else:
# project is deleted
logger.debug("project is not active")
ds_project = self.get_project(pid)
if ds_project is not None:
self._call(["delete", "account", "name=%s" % pid])
return | def save_project(self, project) | Called when project is saved/updated. | 3.616378 | 3.530387 | 1.024357 |
pid = project.pid
# project deleted
ds_project = self.get_project(pid)
if ds_project is not None:
self._call(["delete", "account", "name=%s" % pid])
return | def delete_project(self, project) | Called when project is deleted. | 10.052236 | 9.059747 | 1.109549 |
return Project.objects.filter(
Q(pid__icontains=q)
| Q(name__icontains=q)
) | def get_query(self, q, request) | return a query set searching for the query string q
either implement this method yourself or set the search_field
in the LookupChannel class definition | 4.181808 | 4.080651 | 1.024789 |
from trackhub import BaseTrack
if isinstance(track, BaseTrack):
self.add_child(track)
self._tracks.append(track)
else:
for t in track:
self.add_child(t)
self._tracks.append(t) | def add_tracks(self, track) | Add a track or iterable of tracks.
Parameters
----------
track : iterable or Track
Iterable of :class:`Track` objects, or a single :class:`Track`
object. | 2.729342 | 3.337012 | 0.8179 |
import string
import random
vowels = ['a', 'e', 'i', 'o', 'u']
consonants = [a for a in string.ascii_lowercase if a not in vowels]
digits = string.digits
# utility functions
def a_part(slen):
ret = ''
for i in range(slen):
if i % 2 == 0:
randid = random.randint(0, 20) # number of consonants
ret += consonants[randid]
else:
randid = random.randint(0, 4) # number of vowels
ret += vowels[randid]
return ret
def n_part(slen):
ret = ''
for i in range(slen):
randid = random.randint(0, 9) # number of digits
ret += digits[randid]
return ret
####
fpl = alpha / 2
if alpha % 2:
fpl = int(alpha / 2) + 1
lpl = alpha - fpl
start = a_part(fpl)
mid = n_part(numeric)
end = a_part(lpl)
return "%s%s%s" % (start, mid, end) | def nicepass(alpha=8, numeric=4) | returns a human-readble password (say rol86din instead of
a difficult to remember K8Yn9muL ) | 2.294474 | 2.235549 | 1.026358 |
return Person.objects.filter(
Q(username__icontains=q)
| Q(short_name__icontains=q)
| Q(full_name__icontains=q)
) | def get_query(self, q, request) | return a query set searching for the query string q
either implement this method yourself or set the search_field
in the LookupChannel class definition | 2.898719 | 2.731675 | 1.061151 |
return Group.objects.filter(
Q(name__icontains=q)
| Q(description__icontains=q)
) | def get_query(self, q, request) | return a query set searching for the query string q
either implement this method yourself or set the search_field
in the LookupChannel class definition | 3.092833 | 2.869653 | 1.077772 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.