body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
def _pretty_print_target(self, target):
'\n Make target printouts more user-friendly.\n 1) builtins will be printed as `builtins.xyz`\n 2) operators will be printed as `operator.xyz`\n 3) other callables will be printed with qualfied name, e.g. torch.add\n '
if isinstance(targ... | -4,326,301,714,006,745,600 | Make target printouts more user-friendly.
1) builtins will be printed as `builtins.xyz`
2) operators will be printed as `operator.xyz`
3) other callables will be printed with qualfied name, e.g. torch.add | venv/Lib/site-packages/torch/fx/node.py | _pretty_print_target | Westlanderz/AI-Plat1 | python | def _pretty_print_target(self, target):
'\n Make target printouts more user-friendly.\n 1) builtins will be printed as `builtins.xyz`\n 2) operators will be printed as `operator.xyz`\n 3) other callables will be printed with qualfied name, e.g. torch.add\n '
if isinstance(targ... |
@compatibility(is_backward_compatible=True)
def format_node(self, placeholder_names: List[str]=None, maybe_return_typename: List[str]=None) -> Optional[str]:
"\n Return a descriptive string representation of ``self``.\n\n This method can be used with no arguments as a debugging\n utility.\n\n ... | 2,380,465,824,610,337,000 | Return a descriptive string representation of ``self``.
This method can be used with no arguments as a debugging
utility.
This function is also used internally in the ``__str__`` method
of ``Graph``. Together, the strings in ``placeholder_names``
and ``maybe_return_typename`` make up the signature of the
autogenerate... | venv/Lib/site-packages/torch/fx/node.py | format_node | Westlanderz/AI-Plat1 | python | @compatibility(is_backward_compatible=True)
def format_node(self, placeholder_names: List[str]=None, maybe_return_typename: List[str]=None) -> Optional[str]:
"\n Return a descriptive string representation of ``self``.\n\n This method can be used with no arguments as a debugging\n utility.\n\n ... |
@compatibility(is_backward_compatible=True)
def replace_all_uses_with(self, replace_with: 'Node') -> List['Node']:
'\n Replace all uses of ``self`` in the Graph with the Node ``replace_with``.\n\n Args:\n\n replace_with (Node): The node to replace all uses of ``self`` with.\n\n Retur... | -1,574,887,777,613,111,800 | Replace all uses of ``self`` in the Graph with the Node ``replace_with``.
Args:
replace_with (Node): The node to replace all uses of ``self`` with.
Returns:
The list of Nodes on which this change was made. | venv/Lib/site-packages/torch/fx/node.py | replace_all_uses_with | Westlanderz/AI-Plat1 | python | @compatibility(is_backward_compatible=True)
def replace_all_uses_with(self, replace_with: 'Node') -> List['Node']:
'\n Replace all uses of ``self`` in the Graph with the Node ``replace_with``.\n\n Args:\n\n replace_with (Node): The node to replace all uses of ``self`` with.\n\n Retur... |
@compatibility(is_backward_compatible=False)
def is_impure(self):
'\n Returns whether this op is impure, i.e. if its op is a placeholder or\n output, or if a call_function or call_module which is impure.\n\n Returns:\n\n bool: If the op is impure or not.\n '
if (self.op in... | -7,435,228,998,158,217,000 | Returns whether this op is impure, i.e. if its op is a placeholder or
output, or if a call_function or call_module which is impure.
Returns:
bool: If the op is impure or not. | venv/Lib/site-packages/torch/fx/node.py | is_impure | Westlanderz/AI-Plat1 | python | @compatibility(is_backward_compatible=False)
def is_impure(self):
'\n Returns whether this op is impure, i.e. if its op is a placeholder or\n output, or if a call_function or call_module which is impure.\n\n Returns:\n\n bool: If the op is impure or not.\n '
if (self.op in... |
@compatibility(is_backward_compatible=False)
def normalized_arguments(self, root: torch.nn.Module, arg_types: Optional[Tuple[Any]]=None, kwarg_types: Optional[Dict[(str, Any)]]=None, normalize_to_only_use_kwargs: bool=False) -> Optional[ArgsKwargsPair]:
"\n Returns normalized arguments to Python targets. Thi... | 8,207,796,060,024,680,000 | Returns normalized arguments to Python targets. This means that
`args/kwargs` will be matched up to the module/functional's
signature and return exclusively kwargs in positional order
if `normalize_to_only_use_kwargs` is true.
Also populates default values. Does not support positional-only
parameters or varargs paramet... | venv/Lib/site-packages/torch/fx/node.py | normalized_arguments | Westlanderz/AI-Plat1 | python | @compatibility(is_backward_compatible=False)
def normalized_arguments(self, root: torch.nn.Module, arg_types: Optional[Tuple[Any]]=None, kwarg_types: Optional[Dict[(str, Any)]]=None, normalize_to_only_use_kwargs: bool=False) -> Optional[ArgsKwargsPair]:
"\n Returns normalized arguments to Python targets. Thi... |
@compatibility(is_backward_compatible=True)
def replace_input_with(self, old_input: 'Node', new_input: 'Node'):
'\n Loop through input nodes of ``self``, and replace all instances of\n ``old_input`` with ``new_input``.\n\n Args:\n\n old_input (Node): The old input node to be replaced... | -5,413,264,850,184,298,000 | Loop through input nodes of ``self``, and replace all instances of
``old_input`` with ``new_input``.
Args:
old_input (Node): The old input node to be replaced.
new_input (Node): The new input node to replace ``old_input``. | venv/Lib/site-packages/torch/fx/node.py | replace_input_with | Westlanderz/AI-Plat1 | python | @compatibility(is_backward_compatible=True)
def replace_input_with(self, old_input: 'Node', new_input: 'Node'):
'\n Loop through input nodes of ``self``, and replace all instances of\n ``old_input`` with ``new_input``.\n\n Args:\n\n old_input (Node): The old input node to be replaced... |
def execute(argv):
'Generate genetic sequences for each benchmark'
del argv
FLAGS = flags.FLAGS
benchmarks = IO.load_yaml(FLAGS.benchmarks_filename)
if (not benchmarks):
logging.error('There are no benchmarks to process')
sys.exit(1)
if (not os.path.isdir(FLAGS.benchmarks_directo... | 5,197,675,214,564,177,000 | Generate genetic sequences for each benchmark | examples/algorithms/sga.py | execute | ComputerSystemsLab/OptimizationCache | python | def execute(argv):
del argv
FLAGS = flags.FLAGS
benchmarks = IO.load_yaml(FLAGS.benchmarks_filename)
if (not benchmarks):
logging.error('There are no benchmarks to process')
sys.exit(1)
if (not os.path.isdir(FLAGS.benchmarks_directory)):
logging.error('Benchmarks directo... |
def main():
'Run administrative tasks.'
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apibox.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError("Couldn't import Django. Are you sure it's installed and available on ... | 4,307,895,462,495,865,300 | Run administrative tasks. | manage.py | main | woodonggyu/apibox | python | def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apibox.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError("Couldn't import Django. Are you sure it's installed and available on your PYTHONPATH environment... |
def object_to_dict(obj, list_depth=1):
'Convert Suds object into serializable format.\n\n The calling function can limit the amount of list entries that\n are converted.\n '
d = {}
for (k, v) in suds.sudsobject.asdict(obj).iteritems():
if hasattr(v, '__keylist__'):
d[k] = object... | -3,044,999,984,033,977,300 | Convert Suds object into serializable format.
The calling function can limit the amount of list entries that
are converted. | fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py | object_to_dict | Hybrid-Cloud/badam | python | def object_to_dict(obj, list_depth=1):
'Convert Suds object into serializable format.\n\n The calling function can limit the amount of list entries that\n are converted.\n '
d = {}
for (k, v) in suds.sudsobject.asdict(obj).iteritems():
if hasattr(v, '__keylist__'):
d[k] = object... |
def get_object_properties(vim, collector, mobj, type, properties):
'Gets the properties of the Managed object specified.'
client_factory = vim.client.factory
if (mobj is None):
return None
usecoll = collector
if (usecoll is None):
usecoll = vim.service_content.propertyCollector
p... | -1,476,953,285,040,833,300 | Gets the properties of the Managed object specified. | fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py | get_object_properties | Hybrid-Cloud/badam | python | def get_object_properties(vim, collector, mobj, type, properties):
client_factory = vim.client.factory
if (mobj is None):
return None
usecoll = collector
if (usecoll is None):
usecoll = vim.service_content.propertyCollector
property_filter_spec = client_factory.create('ns0:Prope... |
def get_dynamic_property(vim, mobj, type, property_name):
'Gets a particular property of the Managed Object.'
property_dict = get_dynamic_properties(vim, mobj, type, [property_name])
return property_dict.get(property_name) | 1,675,623,835,722,089,500 | Gets a particular property of the Managed Object. | fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py | get_dynamic_property | Hybrid-Cloud/badam | python | def get_dynamic_property(vim, mobj, type, property_name):
property_dict = get_dynamic_properties(vim, mobj, type, [property_name])
return property_dict.get(property_name) |
def get_dynamic_properties(vim, mobj, type, property_names):
'Gets the specified properties of the Managed Object.'
obj_content = get_object_properties(vim, None, mobj, type, property_names)
if (obj_content is None):
return {}
if hasattr(obj_content, 'token'):
cancel_retrieve(vim, obj_co... | -5,690,402,733,005,168,000 | Gets the specified properties of the Managed Object. | fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py | get_dynamic_properties | Hybrid-Cloud/badam | python | def get_dynamic_properties(vim, mobj, type, property_names):
obj_content = get_object_properties(vim, None, mobj, type, property_names)
if (obj_content is None):
return {}
if hasattr(obj_content, 'token'):
cancel_retrieve(vim, obj_content.token)
property_dict = {}
if obj_content... |
def get_objects(vim, type, properties_to_collect=None, all=False):
'Gets the list of objects of the type specified.'
return vutil.get_objects(vim, type, CONF.vmware.maximum_objects, properties_to_collect, all) | 8,382,414,298,554,375,000 | Gets the list of objects of the type specified. | fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py | get_objects | Hybrid-Cloud/badam | python | def get_objects(vim, type, properties_to_collect=None, all=False):
return vutil.get_objects(vim, type, CONF.vmware.maximum_objects, properties_to_collect, all) |
def get_inner_objects(vim, base_obj, path, inner_type, properties_to_collect=None, all=False):
'Gets the list of inner objects of the type specified.'
client_factory = vim.client.factory
base_type = base_obj._type
traversal_spec = vutil.build_traversal_spec(client_factory, 'inner', base_type, path, Fals... | 3,725,338,895,588,128,000 | Gets the list of inner objects of the type specified. | fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py | get_inner_objects | Hybrid-Cloud/badam | python | def get_inner_objects(vim, base_obj, path, inner_type, properties_to_collect=None, all=False):
client_factory = vim.client.factory
base_type = base_obj._type
traversal_spec = vutil.build_traversal_spec(client_factory, 'inner', base_type, path, False, [])
object_spec = vutil.build_object_spec(client... |
def cancel_retrieve(vim, token):
'Cancels the retrieve operation.'
return vim.CancelRetrievePropertiesEx(vim.service_content.propertyCollector, token=token) | -7,733,806,174,974,513,000 | Cancels the retrieve operation. | fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py | cancel_retrieve | Hybrid-Cloud/badam | python | def cancel_retrieve(vim, token):
return vim.CancelRetrievePropertiesEx(vim.service_content.propertyCollector, token=token) |
def continue_to_get_objects(vim, token):
'Continues to get the list of objects of the type specified.'
return vim.ContinueRetrievePropertiesEx(vim.service_content.propertyCollector, token=token) | -4,253,159,307,787,871,000 | Continues to get the list of objects of the type specified. | fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py | continue_to_get_objects | Hybrid-Cloud/badam | python | def continue_to_get_objects(vim, token):
return vim.ContinueRetrievePropertiesEx(vim.service_content.propertyCollector, token=token) |
def get_prop_spec(client_factory, spec_type, properties):
'Builds the Property Spec Object.'
prop_spec = client_factory.create('ns0:PropertySpec')
prop_spec.type = spec_type
prop_spec.pathSet = properties
return prop_spec | -2,410,048,619,987,145,000 | Builds the Property Spec Object. | fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py | get_prop_spec | Hybrid-Cloud/badam | python | def get_prop_spec(client_factory, spec_type, properties):
prop_spec = client_factory.create('ns0:PropertySpec')
prop_spec.type = spec_type
prop_spec.pathSet = properties
return prop_spec |
def get_obj_spec(client_factory, obj, select_set=None):
'Builds the Object Spec object.'
obj_spec = client_factory.create('ns0:ObjectSpec')
obj_spec.obj = obj
obj_spec.skip = False
if (select_set is not None):
obj_spec.selectSet = select_set
return obj_spec | 3,381,832,873,053,010,000 | Builds the Object Spec object. | fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py | get_obj_spec | Hybrid-Cloud/badam | python | def get_obj_spec(client_factory, obj, select_set=None):
obj_spec = client_factory.create('ns0:ObjectSpec')
obj_spec.obj = obj
obj_spec.skip = False
if (select_set is not None):
obj_spec.selectSet = select_set
return obj_spec |
def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
'Builds the Property Filter Spec Object.'
prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec | 2,964,159,198,052,121,600 | Builds the Property Filter Spec Object. | fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py | get_prop_filter_spec | Hybrid-Cloud/badam | python | def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec |
def get_properties_for_a_collection_of_objects(vim, type, obj_list, properties):
'Gets the list of properties for the collection of\n objects of the type specified.\n '
client_factory = vim.client.factory
if (len(obj_list) == 0):
return []
prop_spec = get_prop_spec(client_factory, type, pr... | -3,855,717,937,075,148,000 | Gets the list of properties for the collection of
objects of the type specified. | fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py | get_properties_for_a_collection_of_objects | Hybrid-Cloud/badam | python | def get_properties_for_a_collection_of_objects(vim, type, obj_list, properties):
'Gets the list of properties for the collection of\n objects of the type specified.\n '
client_factory = vim.client.factory
if (len(obj_list) == 0):
return []
prop_spec = get_prop_spec(client_factory, type, pr... |
def get_about_info(vim):
'Get the About Info from the service content.'
return vim.service_content.about | -3,403,545,440,397,797,000 | Get the About Info from the service content. | fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vim_util.py | get_about_info | Hybrid-Cloud/badam | python | def get_about_info(vim):
return vim.service_content.about |
def validate_view_arguments(project_name=None, ontology_key=None, document_type=None):
'\n extends the "validate_view_arguments" fn in "views_base"\n by adding a check that there is a default customization associated w/ this project/ontology/proxy\n :param project_name:\n :param ontology_key:\n :para... | 7,049,328,265,247,111,000 | extends the "validate_view_arguments" fn in "views_base"
by adding a check that there is a default customization associated w/ this project/ontology/proxy
:param project_name:
:param ontology_key:
:param document_type:
:return: | Q/questionnaire/views/views_realizations.py | validate_view_arguments | ES-DOC/esdoc-questionnaire | python | def validate_view_arguments(project_name=None, ontology_key=None, document_type=None):
'\n extends the "validate_view_arguments" fn in "views_base"\n by adding a check that there is a default customization associated w/ this project/ontology/proxy\n :param project_name:\n :param ontology_key:\n :para... |
@redirect_legacy_projects
def q_view_new(request, project_name=None, ontology_key=None, document_type=None):
'\n this is never exposed by templates\n but a user might still try to navigate explicitly to this URL\n just return an error telling them not to try that\n :param request:\n :param project_na... | -2,959,830,592,501,942,300 | this is never exposed by templates
but a user might still try to navigate explicitly to this URL
just return an error telling them not to try that
:param request:
:param project_name:
:param ontology_key:
:param document_type:
:return: | Q/questionnaire/views/views_realizations.py | q_view_new | ES-DOC/esdoc-questionnaire | python | @redirect_legacy_projects
def q_view_new(request, project_name=None, ontology_key=None, document_type=None):
'\n this is never exposed by templates\n but a user might still try to navigate explicitly to this URL\n just return an error telling them not to try that\n :param request:\n :param project_na... |
@redirect_legacy_projects
def q_view_existing(request, project_name=None, ontology_key=None, document_type=None, realization_pk=None):
'\n this is exactly the same as "q_edit_existing" except:\n there are no authentication checks,\n the template_context & template are different.\n :param request:\n :... | 7,441,352,269,681,091,000 | this is exactly the same as "q_edit_existing" except:
there are no authentication checks,
the template_context & template are different.
:param request:
:param project_name:
:param ontology_key:
:param document_type:
:param realization_pk:
:return: | Q/questionnaire/views/views_realizations.py | q_view_existing | ES-DOC/esdoc-questionnaire | python | @redirect_legacy_projects
def q_view_existing(request, project_name=None, ontology_key=None, document_type=None, realization_pk=None):
'\n this is exactly the same as "q_edit_existing" except:\n there are no authentication checks,\n the template_context & template are different.\n :param request:\n :... |
@redirect_legacy_projects
def q_get_existing(request, project_name=None, ontology_key=None, document_type=None):
'\n this is meant to be used from external requests (ie: further_info_url)\n where uniquely identifying model fields (including pk) are passed\n if a unique realization cannot be found then an e... | -1,087,697,355,295,826,700 | this is meant to be used from external requests (ie: further_info_url)
where uniquely identifying model fields (including pk) are passed
if a unique realization cannot be found then an error is returned
otherwise the response is routed to "q_edit_existing"
:param request:
:param project_name:
:param ontology_key:
:para... | Q/questionnaire/views/views_realizations.py | q_get_existing | ES-DOC/esdoc-questionnaire | python | @redirect_legacy_projects
def q_get_existing(request, project_name=None, ontology_key=None, document_type=None):
'\n this is meant to be used from external requests (ie: further_info_url)\n where uniquely identifying model fields (including pk) are passed\n if a unique realization cannot be found then an e... |
def combineCommandLineOptionsDictIntoShellCommand(commandOptions):
'\n Write out the compas input parameters into a shell string.\n Ensure the Compas executable is first, and not repeated.\n Options are non-ordered.\n '
shellCommand = commandOptions['compas_executable']
del commandOptions['compa... | -6,617,363,741,634,693,000 | Write out the compas input parameters into a shell string.
Ensure the Compas executable is first, and not repeated.
Options are non-ordered. | utils/example_plots/methods_paper_plots/fig_5_HR_diagram/pythonSubmit.py | combineCommandLineOptionsDictIntoShellCommand | IsobelMarguarethe/COMPAS | python | def combineCommandLineOptionsDictIntoShellCommand(commandOptions):
'\n Write out the compas input parameters into a shell string.\n Ensure the Compas executable is first, and not repeated.\n Options are non-ordered.\n '
shellCommand = commandOptions['compas_executable']
del commandOptions['compa... |
def cleanStringParameter(str_param):
' clean up string parameters to avoid confusing Boost '
if (str_param is not None):
str_param = str_param.strip('\'"')
escapes = [' ', "'", '"']
for escape in escapes:
str_param = re.sub('(?<!\\\\){}'.format(escape), '\\{}'.format(escape),... | -4,032,103,620,565,185,500 | clean up string parameters to avoid confusing Boost | utils/example_plots/methods_paper_plots/fig_5_HR_diagram/pythonSubmit.py | cleanStringParameter | IsobelMarguarethe/COMPAS | python | def cleanStringParameter(str_param):
' '
if (str_param is not None):
str_param = str_param.strip('\'"')
escapes = [' ', "'", '"']
for escape in escapes:
str_param = re.sub('(?<!\\\\){}'.format(escape), '\\{}'.format(escape), str_param)
return str_param |
def generateCommandLineOptionsDict(self):
'\n This function generates a dictionary mapping COMPAS options to their specified \n values (or empty strings for boolean options). These can be combined into a string\n and run directly as a terminal command, or passed to the stroopwafel interface\n ... | 400,721,765,994,395,800 | This function generates a dictionary mapping COMPAS options to their specified
values (or empty strings for boolean options). These can be combined into a string
and run directly as a terminal command, or passed to the stroopwafel interface
where some of them may be overwritten. Options not to be included in the comma... | utils/example_plots/methods_paper_plots/fig_5_HR_diagram/pythonSubmit.py | generateCommandLineOptionsDict | IsobelMarguarethe/COMPAS | python | def generateCommandLineOptionsDict(self):
'\n This function generates a dictionary mapping COMPAS options to their specified \n values (or empty strings for boolean options). These can be combined into a string\n and run directly as a terminal command, or passed to the stroopwafel interface\n ... |
def get_header(self, request):
'\n Extracts the header containing the JSON web token from the given\n request.\n '
header = request.META.get(api_settings.AUTH_HEADER_NAME)
if isinstance(header, str):
header = header.encode(HTTP_HEADER_ENCODING)
return header | 1,066,136,120,163,030,000 | Extracts the header containing the JSON web token from the given
request. | webpersonal/env/Lib/site-packages/rest_framework_simplejwt/authentication.py | get_header | BrianMarquez3/Python-Django | python | def get_header(self, request):
'\n Extracts the header containing the JSON web token from the given\n request.\n '
header = request.META.get(api_settings.AUTH_HEADER_NAME)
if isinstance(header, str):
header = header.encode(HTTP_HEADER_ENCODING)
return header |
def get_raw_token(self, header):
'\n Extracts an unvalidated JSON web token from the given "Authorization"\n header value.\n '
parts = header.split()
if (len(parts) == 0):
return None
if (parts[0] not in AUTH_HEADER_TYPE_BYTES):
return None
if (len(parts) != 2):
... | 4,438,926,674,893,976,000 | Extracts an unvalidated JSON web token from the given "Authorization"
header value. | webpersonal/env/Lib/site-packages/rest_framework_simplejwt/authentication.py | get_raw_token | BrianMarquez3/Python-Django | python | def get_raw_token(self, header):
'\n Extracts an unvalidated JSON web token from the given "Authorization"\n header value.\n '
parts = header.split()
if (len(parts) == 0):
return None
if (parts[0] not in AUTH_HEADER_TYPE_BYTES):
return None
if (len(parts) != 2):
... |
def get_validated_token(self, raw_token):
'\n Validates an encoded JSON web token and returns a validated token\n wrapper object.\n '
messages = []
for AuthToken in api_settings.AUTH_TOKEN_CLASSES:
try:
return AuthToken(raw_token)
except TokenError as e:
... | -2,122,270,758,756,231,000 | Validates an encoded JSON web token and returns a validated token
wrapper object. | webpersonal/env/Lib/site-packages/rest_framework_simplejwt/authentication.py | get_validated_token | BrianMarquez3/Python-Django | python | def get_validated_token(self, raw_token):
'\n Validates an encoded JSON web token and returns a validated token\n wrapper object.\n '
messages = []
for AuthToken in api_settings.AUTH_TOKEN_CLASSES:
try:
return AuthToken(raw_token)
except TokenError as e:
... |
def get_user(self, validated_token):
'\n Attempts to find and return a user using the given validated token.\n '
try:
user_id = validated_token[api_settings.USER_ID_CLAIM]
except KeyError:
raise InvalidToken(_('Token contained no recognizable user identification'))
try:
... | 8,316,481,509,880,417,000 | Attempts to find and return a user using the given validated token. | webpersonal/env/Lib/site-packages/rest_framework_simplejwt/authentication.py | get_user | BrianMarquez3/Python-Django | python | def get_user(self, validated_token):
'\n \n '
try:
user_id = validated_token[api_settings.USER_ID_CLAIM]
except KeyError:
raise InvalidToken(_('Token contained no recognizable user identification'))
try:
user = self.user_model.objects.get(**{api_settings.USER_ID_FIE... |
def get_user(self, validated_token):
'\n Returns a stateless user object which is backed by the given validated\n token.\n '
if (api_settings.USER_ID_CLAIM not in validated_token):
raise InvalidToken(_('Token contained no recognizable user identification'))
return api_settings.T... | 1,161,550,880,086,617,600 | Returns a stateless user object which is backed by the given validated
token. | webpersonal/env/Lib/site-packages/rest_framework_simplejwt/authentication.py | get_user | BrianMarquez3/Python-Django | python | def get_user(self, validated_token):
'\n Returns a stateless user object which is backed by the given validated\n token.\n '
if (api_settings.USER_ID_CLAIM not in validated_token):
raise InvalidToken(_('Token contained no recognizable user identification'))
return api_settings.T... |
def _init_state(self, encoder_hidden):
' Initialize the encoder hidden state. '
if (encoder_hidden is None):
return None
if isinstance(encoder_hidden, tuple):
encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])
else:
encoder_hidden = self._cat_directions(enc... | -888,232,364,086,648,400 | Initialize the encoder hidden state. | seq2seq/models/DecoderRNN.py | _init_state | junyongk/pytorch-seq2seq | python | def _init_state(self, encoder_hidden):
' '
if (encoder_hidden is None):
return None
if isinstance(encoder_hidden, tuple):
encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])
else:
encoder_hidden = self._cat_directions(encoder_hidden)
return encoder_hidd... |
def _cat_directions(self, h):
' If the encoder is bidirectional, do the following transformation.\n (#directions * #layers, #batch, hidden_size) -> (#layers, #batch, #directions * hidden_size)\n '
if self.bidirectional_encoder:
h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)
... | 1,430,779,244,302,292,700 | If the encoder is bidirectional, do the following transformation.
(#directions * #layers, #batch, hidden_size) -> (#layers, #batch, #directions * hidden_size) | seq2seq/models/DecoderRNN.py | _cat_directions | junyongk/pytorch-seq2seq | python | def _cat_directions(self, h):
' If the encoder is bidirectional, do the following transformation.\n (#directions * #layers, #batch, hidden_size) -> (#layers, #batch, #directions * hidden_size)\n '
if self.bidirectional_encoder:
h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)
... |
def parse(self) -> object:
' reads in the file, then parses record tables'
self.contents = self.f.read()
self.header = self.parseHeader()
self.records = self.parseRecordInfoList()
self.readRecord0() | 3,804,246,371,784,762,400 | reads in the file, then parses record tables | dbookbee/mobi/__init__.py | parse | cloudylan/dbooklib | python | def parse(self) -> object:
' '
self.contents = self.f.read()
self.header = self.parseHeader()
self.records = self.parseRecordInfoList()
self.readRecord0() |
def author(self):
'Returns the author of the book'
return self.config['exth']['records'][100] | 5,083,589,889,736,570,000 | Returns the author of the book | dbookbee/mobi/__init__.py | author | cloudylan/dbooklib | python | def author(self):
return self.config['exth']['records'][100] |
def title(self):
'Returns the title of the book'
return self.config['mobi']['Full Name'] | 943,860,609,546,996,200 | Returns the title of the book | dbookbee/mobi/__init__.py | title | cloudylan/dbooklib | python | def title(self):
return self.config['mobi']['Full Name'] |
async def begin_delete(self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, **kwargs) -> AsyncLROPoller[None]:
'Deletes the specified network security rule.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :... | -2,860,460,665,875,554,300 | Deletes the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type secu... | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_security_rules_operations.py | begin_delete | Co0olboi/azure-sdk-for-python | python | async def begin_delete(self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, **kwargs) -> AsyncLROPoller[None]:
'Deletes the specified network security rule.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :... |
async def get(self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, **kwargs) -> 'models.SecurityRule':
'Get the specified network security rule.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param networ... | -431,310,436,554,868,800 | Get the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security... | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_security_rules_operations.py | get | Co0olboi/azure-sdk-for-python | python | async def get(self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, **kwargs) -> 'models.SecurityRule':
'Get the specified network security rule.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param networ... |
async def begin_create_or_update(self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, security_rule_parameters: 'models.SecurityRule', **kwargs) -> AsyncLROPoller['models.SecurityRule']:
'Creates or updates a security rule in the specified network security group.\n\n :pa... | -7,602,824,364,702,028,000 | Creates or updates a security rule in the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name o... | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_security_rules_operations.py | begin_create_or_update | Co0olboi/azure-sdk-for-python | python | async def begin_create_or_update(self, resource_group_name: str, network_security_group_name: str, security_rule_name: str, security_rule_parameters: 'models.SecurityRule', **kwargs) -> AsyncLROPoller['models.SecurityRule']:
'Creates or updates a security rule in the specified network security group.\n\n :pa... |
def list(self, resource_group_name: str, network_security_group_name: str, **kwargs) -> AsyncIterable['models.SecurityRuleListResult']:
'Gets all security rules in a network security group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param... | -2,965,344,165,562,121,700 | Gets all security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will b... | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_security_rules_operations.py | list | Co0olboi/azure-sdk-for-python | python | def list(self, resource_group_name: str, network_security_group_name: str, **kwargs) -> AsyncIterable['models.SecurityRuleListResult']:
'Gets all security rules in a network security group.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param... |
@classmethod
def file_to_delete(cls, all_files=False):
'\n Return the list of file to delete.\n '
directory = '{0}{1}'.format(PyFunceble.OUTPUT_DIRECTORY, PyFunceble.OUTPUTS.parent_directory)
if (not directory.endswith(directory_separator)):
directory += directory_separator
result ... | -7,448,717,146,557,944,000 | Return the list of file to delete. | PyFunceble/output/clean.py | file_to_delete | NeolithEra/PyFunceble | python | @classmethod
def file_to_delete(cls, all_files=False):
'\n \n '
directory = '{0}{1}'.format(PyFunceble.OUTPUT_DIRECTORY, PyFunceble.OUTPUTS.parent_directory)
if (not directory.endswith(directory_separator)):
directory += directory_separator
result = []
for (root, _, files) in w... |
@classmethod
def databases_to_delete(cls):
'\n Set the databases files to delete.\n '
result = []
if (PyFunceble.CONFIGURATION.db_type == 'json'):
directory = PyFunceble.CONFIG_DIRECTORY
result.append('{0}{1}'.format(directory, PyFunceble.OUTPUTS.default_files.dir_structure))
... | 2,857,564,988,682,521,600 | Set the databases files to delete. | PyFunceble/output/clean.py | databases_to_delete | NeolithEra/PyFunceble | python | @classmethod
def databases_to_delete(cls):
'\n \n '
result = []
if (PyFunceble.CONFIGURATION.db_type == 'json'):
directory = PyFunceble.CONFIG_DIRECTORY
result.append('{0}{1}'.format(directory, PyFunceble.OUTPUTS.default_files.dir_structure))
result.append('{0}{1}'.form... |
def almost_everything(self, clean_all=False, file_path=False):
'\n Delete almost all discovered files.\n\n :param bool clean_all:\n Tell the subsystem if we have to clean everything instesd\n of almost everything.\n '
if (('do_not_clean' not in PyFunceble.INTERN) or (n... | -2,799,624,771,840,950,000 | Delete almost all discovered files.
:param bool clean_all:
Tell the subsystem if we have to clean everything instesd
of almost everything. | PyFunceble/output/clean.py | almost_everything | NeolithEra/PyFunceble | python | def almost_everything(self, clean_all=False, file_path=False):
'\n Delete almost all discovered files.\n\n :param bool clean_all:\n Tell the subsystem if we have to clean everything instesd\n of almost everything.\n '
if (('do_not_clean' not in PyFunceble.INTERN) or (n... |
async def get_thumb(self, message):
'\n Тупой алгоритм,\n который рекурсивно с конца ищет поле "thumb"\n и если находит, возвращает его\n '
if isinstance(message, list):
values = list(enumerate(message))
elif isinstance(message, dict):
values = list(message.items(... | 1,209,851,858,328,211,700 | Тупой алгоритм,
который рекурсивно с конца ищет поле "thumb"
и если находит, возвращает его | tgquote/filegetters/base.py | get_thumb | Forevka/tgquote | python | async def get_thumb(self, message):
'\n Тупой алгоритм,\n который рекурсивно с конца ищет поле "thumb"\n и если находит, возвращает его\n '
if isinstance(message, list):
values = list(enumerate(message))
elif isinstance(message, dict):
values = list(message.items(... |
async def get_document(self, message):
'\n Тупой алгоритм,\n который рекурсивно с конца ищет поле "file_id"\n и если находит, возвращает его родителя\n '
if isinstance(message, list):
values = list(enumerate(message))
elif isinstance(message, dict):
values = list(... | 1,139,342,667,563,544,700 | Тупой алгоритм,
который рекурсивно с конца ищет поле "file_id"
и если находит, возвращает его родителя | tgquote/filegetters/base.py | get_document | Forevka/tgquote | python | async def get_document(self, message):
'\n Тупой алгоритм,\n который рекурсивно с конца ищет поле "file_id"\n и если находит, возвращает его родителя\n '
if isinstance(message, list):
values = list(enumerate(message))
elif isinstance(message, dict):
values = list(... |
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
'\n Computes gradient penalty based on prediction and weighted real / fake samples\n '
gradients = K.gradients(y_pred, averaged_samples)[0]
gradients_sqr = K.square(gradients)
gradients_sqr_sum = K.sum(gradients_sqr, axis=n... | 2,655,758,970,492,790,300 | Computes gradient penalty based on prediction and weighted real / fake samples | wgan_gp/wgan_gp.py | gradient_penalty_loss | 311nguyenbaohuy/Keras-GAN | python | def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
'\n \n '
gradients = K.gradients(y_pred, averaged_samples)[0]
gradients_sqr = K.square(gradients)
gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape)))
gradient_l2_norm = K.sqrt(gradients... |
def find_subsections(section: Element) -> List[nodes.section]:
'Return a list of subsections for the given ``section``.'
result = []
for child in section:
if isinstance(child, nodes.section):
result.append(child)
continue
elif isinstance(child, nodes.Element):
... | 7,239,203,639,831,353,000 | Return a list of subsections for the given ``section``. | sphinx/writers/texinfo.py | find_subsections | Bibo-Joshi/sphinx | python | def find_subsections(section: Element) -> List[nodes.section]:
result = []
for child in section:
if isinstance(child, nodes.section):
result.append(child)
continue
elif isinstance(child, nodes.Element):
result.extend(find_subsections(child))
return re... |
def smart_capwords(s: str, sep: str=None) -> str:
'Like string.capwords() but does not capitalize words that already\n contain a capital letter.'
words = s.split(sep)
for (i, word) in enumerate(words):
if all((x.islower() for x in word)):
words[i] = word.capitalize()
return (sep o... | 8,820,783,055,573,011,000 | Like string.capwords() but does not capitalize words that already
contain a capital letter. | sphinx/writers/texinfo.py | smart_capwords | Bibo-Joshi/sphinx | python | def smart_capwords(s: str, sep: str=None) -> str:
'Like string.capwords() but does not capitalize words that already\n contain a capital letter.'
words = s.split(sep)
for (i, word) in enumerate(words):
if all((x.islower() for x in word)):
words[i] = word.capitalize()
return (sep o... |
def collect_node_names(self) -> None:
'Generates a unique id for each section.\n\n Assigns the attribute ``node_name`` to each section.'
def add_node_name(name: str) -> str:
node_id = self.escape_id(name)
(nth, suffix) = (1, '')
while (((node_id + suffix) in self.written_ids) or ... | 3,862,376,790,812,649,000 | Generates a unique id for each section.
Assigns the attribute ``node_name`` to each section. | sphinx/writers/texinfo.py | collect_node_names | Bibo-Joshi/sphinx | python | def collect_node_names(self) -> None:
'Generates a unique id for each section.\n\n Assigns the attribute ``node_name`` to each section.'
def add_node_name(name: str) -> str:
node_id = self.escape_id(name)
(nth, suffix) = (1, )
while (((node_id + suffix) in self.written_ids) or ((... |
def collect_node_menus(self) -> None:
'Collect the menu entries for each "node" section.'
node_menus = self.node_menus
targets: List[Element] = [self.document]
targets.extend(self.document.findall(nodes.section))
for node in targets:
assert (('node_name' in node) and node['node_name'])
... | -9,153,002,201,023,103,000 | Collect the menu entries for each "node" section. | sphinx/writers/texinfo.py | collect_node_menus | Bibo-Joshi/sphinx | python | def collect_node_menus(self) -> None:
node_menus = self.node_menus
targets: List[Element] = [self.document]
targets.extend(self.document.findall(nodes.section))
for node in targets:
assert (('node_name' in node) and node['node_name'])
entries = [s['node_name'] for s in find_subsecti... |
def collect_rellinks(self) -> None:
'Collect the relative links (next, previous, up) for each "node".'
rellinks = self.rellinks
node_menus = self.node_menus
for id in node_menus:
rellinks[id] = ['', '', '']
for (id, entries) in node_menus.items():
for e in entries:
rellin... | 9,184,016,339,000,884,000 | Collect the relative links (next, previous, up) for each "node". | sphinx/writers/texinfo.py | collect_rellinks | Bibo-Joshi/sphinx | python | def collect_rellinks(self) -> None:
rellinks = self.rellinks
node_menus = self.node_menus
for id in node_menus:
rellinks[id] = [, , ]
for (id, entries) in node_menus.items():
for e in entries:
rellinks[e][2] = id
for (id, entries) in node_menus.items():
for (... |
def escape(self, s: str) -> str:
'Return a string with Texinfo command characters escaped.'
s = s.replace('@', '@@')
s = s.replace('{', '@{')
s = s.replace('}', '@}')
s = s.replace('``', '`@w{`}')
s = s.replace("''", "'@w{'}")
return s | 9,155,230,778,005,700,000 | Return a string with Texinfo command characters escaped. | sphinx/writers/texinfo.py | escape | Bibo-Joshi/sphinx | python | def escape(self, s: str) -> str:
s = s.replace('@', '@@')
s = s.replace('{', '@{')
s = s.replace('}', '@}')
s = s.replace('``', '`@w{`}')
s = s.replace(, "'@w{'}")
return s |
def escape_arg(self, s: str) -> str:
'Return an escaped string suitable for use as an argument\n to a Texinfo command.'
s = self.escape(s)
s = s.replace(',', '@comma{}')
s = ' '.join(s.split()).strip()
return s | 6,421,078,286,981,946,000 | Return an escaped string suitable for use as an argument
to a Texinfo command. | sphinx/writers/texinfo.py | escape_arg | Bibo-Joshi/sphinx | python | def escape_arg(self, s: str) -> str:
'Return an escaped string suitable for use as an argument\n to a Texinfo command.'
s = self.escape(s)
s = s.replace(',', '@comma{}')
s = ' '.join(s.split()).strip()
return s |
def escape_id(self, s: str) -> str:
'Return an escaped string suitable for node names and anchors.'
bad_chars = ',:()'
for bc in bad_chars:
s = s.replace(bc, ' ')
if re.search('[^ .]', s):
s = s.replace('.', ' ')
s = ' '.join(s.split()).strip()
return self.escape(s) | 2,745,504,349,594,490,400 | Return an escaped string suitable for node names and anchors. | sphinx/writers/texinfo.py | escape_id | Bibo-Joshi/sphinx | python | def escape_id(self, s: str) -> str:
bad_chars = ',:()'
for bc in bad_chars:
s = s.replace(bc, ' ')
if re.search('[^ .]', s):
s = s.replace('.', ' ')
s = ' '.join(s.split()).strip()
return self.escape(s) |
def escape_menu(self, s: str) -> str:
'Return an escaped string suitable for menu entries.'
s = self.escape_arg(s)
s = s.replace(':', ';')
s = ' '.join(s.split()).strip()
return s | -2,540,185,727,114,880,500 | Return an escaped string suitable for menu entries. | sphinx/writers/texinfo.py | escape_menu | Bibo-Joshi/sphinx | python | def escape_menu(self, s: str) -> str:
s = self.escape_arg(s)
s = s.replace(':', ';')
s = ' '.join(s.split()).strip()
return s |
def ensure_eol(self) -> None:
'Ensure the last line in body is terminated by new line.'
if (self.body and (self.body[(- 1)][(- 1):] != '\n')):
self.body.append('\n') | -8,811,531,292,864,465,000 | Ensure the last line in body is terminated by new line. | sphinx/writers/texinfo.py | ensure_eol | Bibo-Joshi/sphinx | python | def ensure_eol(self) -> None:
if (self.body and (self.body[(- 1)][(- 1):] != '\n')):
self.body.append('\n') |
def get_short_id(self, id: str) -> str:
"Return a shorter 'id' associated with ``id``."
try:
sid = self.short_ids[id]
except KeyError:
sid = hex(len(self.short_ids))[2:]
self.short_ids[id] = sid
return sid | -8,816,042,497,587,074,000 | Return a shorter 'id' associated with ``id``. | sphinx/writers/texinfo.py | get_short_id | Bibo-Joshi/sphinx | python | def get_short_id(self, id: str) -> str:
try:
sid = self.short_ids[id]
except KeyError:
sid = hex(len(self.short_ids))[2:]
self.short_ids[id] = sid
return sid |
def load_data(coh, thresh=False):
'Load in the hg38 and hg19 gistic thresholded data. Assume GISTIC runs \n for each tumor type live in a parent directory (hg38_gistic or hg19_gistic)\n one level up from this script.'
if thresh:
hg38 = (('../hg38_gistic/' + coh) + '/all_thresholded.by_genes.txt')
... | 8,639,613,017,847,819,000 | Load in the hg38 and hg19 gistic thresholded data. Assume GISTIC runs
for each tumor type live in a parent directory (hg38_gistic or hg19_gistic)
one level up from this script. | scripts/AnalysisCode.py | load_data | gaog94/GDAN_QC_CopyNumber | python | def load_data(coh, thresh=False):
'Load in the hg38 and hg19 gistic thresholded data. Assume GISTIC runs \n for each tumor type live in a parent directory (hg38_gistic or hg19_gistic)\n one level up from this script.'
if thresh:
hg38 = (('../hg38_gistic/' + coh) + '/all_thresholded.by_genes.txt')
... |
def raw_value_comparison(coh, plot=False):
"Return the average differences in raw copy number values between the\n gene-level calls in hg19 and hg38 for each gene for a given tumor type \n 'coh.' If plot=True, plot the genes' differences in a histogram."
(df_38, df_19) = load_data(coh, thresh=False)
d... | 7,161,945,418,783,088,000 | Return the average differences in raw copy number values between the
gene-level calls in hg19 and hg38 for each gene for a given tumor type
'coh.' If plot=True, plot the genes' differences in a histogram. | scripts/AnalysisCode.py | raw_value_comparison | gaog94/GDAN_QC_CopyNumber | python | def raw_value_comparison(coh, plot=False):
"Return the average differences in raw copy number values between the\n gene-level calls in hg19 and hg38 for each gene for a given tumor type \n 'coh.' If plot=True, plot the genes' differences in a histogram."
(df_38, df_19) = load_data(coh, thresh=False)
d... |
def sequential_cohort_test_raw_values(cohs, plot=False):
'Sequentially compare raw gene-level calls for the given tumor types.'
c_results = []
for coh in cohs:
c_results += raw_value_comparison(coh, plot=plot)
df_r = pd.DataFrame(c_results, columns=['Cohort', 'Direction', 'Gene', 'Difference'])
... | 3,576,139,177,949,071,400 | Sequentially compare raw gene-level calls for the given tumor types. | scripts/AnalysisCode.py | sequential_cohort_test_raw_values | gaog94/GDAN_QC_CopyNumber | python | def sequential_cohort_test_raw_values(cohs, plot=False):
c_results = []
for coh in cohs:
c_results += raw_value_comparison(coh, plot=plot)
df_r = pd.DataFrame(c_results, columns=['Cohort', 'Direction', 'Gene', 'Difference'])
gcount = Counter(df_r['Gene'])
pos_gcount = Counter(df_r[(df_r... |
def thresholded_value_comparison(df_hg38, df_hg19, metric='hamming'):
"Compare -2,-1,0,1,2 gene-level thresholded calls. metric can be either\n hamming (number of discrepancies in each gene) or manhattan (sum of \n 'distances' between each gene so a 1 to -1 change is 2). Returns a vector\n of each gene's m... | 8,923,828,903,387,390,000 | Compare -2,-1,0,1,2 gene-level thresholded calls. metric can be either
hamming (number of discrepancies in each gene) or manhattan (sum of
'distances' between each gene so a 1 to -1 change is 2). Returns a vector
of each gene's metric. | scripts/AnalysisCode.py | thresholded_value_comparison | gaog94/GDAN_QC_CopyNumber | python | def thresholded_value_comparison(df_hg38, df_hg19, metric='hamming'):
"Compare -2,-1,0,1,2 gene-level thresholded calls. metric can be either\n hamming (number of discrepancies in each gene) or manhattan (sum of \n 'distances' between each gene so a 1 to -1 change is 2). Returns a vector\n of each gene's m... |
def sequential_cohort_test_thresholded_values(cohs):
'Compare thresholded gene-level calls for input tumor types.'
df_out = pd.DataFrame([])
for coh in cohs:
(df_hg38, df_hg19) = load_data(coh, thresh=True)
df_results = thresholded_value_comparison(df_hg38, df_hg19, metric='hamming')
... | 2,946,206,052,754,461,000 | Compare thresholded gene-level calls for input tumor types. | scripts/AnalysisCode.py | sequential_cohort_test_thresholded_values | gaog94/GDAN_QC_CopyNumber | python | def sequential_cohort_test_thresholded_values(cohs):
df_out = pd.DataFrame([])
for coh in cohs:
(df_hg38, df_hg19) = load_data(coh, thresh=True)
df_results = thresholded_value_comparison(df_hg38, df_hg19, metric='hamming')
df_results.columns = [coh]
df_out = df_out.join(df_r... |
def plot_fractionDisagreements_perCohort(cohs):
'Visualize fraction of samples with disagreements in thresholded copy \n number for each gene. Run sequential_cohort_test_thresholded_values()\n before this function.'
df = sequential_cohort_test_thresholded_values(cohs)
df_box = pd.melt(df.reset_index()... | -8,287,393,572,364,439,000 | Visualize fraction of samples with disagreements in thresholded copy
number for each gene. Run sequential_cohort_test_thresholded_values()
before this function. | scripts/AnalysisCode.py | plot_fractionDisagreements_perCohort | gaog94/GDAN_QC_CopyNumber | python | def plot_fractionDisagreements_perCohort(cohs):
'Visualize fraction of samples with disagreements in thresholded copy \n number for each gene. Run sequential_cohort_test_thresholded_values()\n before this function.'
df = sequential_cohort_test_thresholded_values(cohs)
df_box = pd.melt(df.reset_index()... |
def peakgene_overlaps(combos, same_genes, normalize=False):
"Count the number of genes that overlap when examing the hg19 & hg38 \n GISTIC runs' focal peaks."
(venn_numbers, gsu, gsi) = ([], [], [])
for (coh, ad) in combos:
print(coh)
fnames = [((('../hg19_gistic/' + coh) + ad) + 'genes.c... | 2,170,390,030,271,310,600 | Count the number of genes that overlap when examing the hg19 & hg38
GISTIC runs' focal peaks. | scripts/AnalysisCode.py | peakgene_overlaps | gaog94/GDAN_QC_CopyNumber | python | def peakgene_overlaps(combos, same_genes, normalize=False):
"Count the number of genes that overlap when examing the hg19 & hg38 \n GISTIC runs' focal peaks."
(venn_numbers, gsu, gsi) = ([], [], [])
for (coh, ad) in combos:
print(coh)
fnames = [((('../hg19_gistic/' + coh) + ad) + 'genes.c... |
def plot_peakgene_overlaps(combos, same_genes, write=False):
'Visualize the results of peakgene_overlaps function in bargraph form.'
df_out = peakgene_overlaps(combos, same_genes, normalize=False)
(df_d, df_a) = (df_out[(df_out.index.str.split('_').str[(- 1)] == 'del')], df_out[(df_out.index.str.split('_').... | 6,568,916,132,747,667,000 | Visualize the results of peakgene_overlaps function in bargraph form. | scripts/AnalysisCode.py | plot_peakgene_overlaps | gaog94/GDAN_QC_CopyNumber | python | def plot_peakgene_overlaps(combos, same_genes, write=False):
df_out = peakgene_overlaps(combos, same_genes, normalize=False)
(df_d, df_a) = (df_out[(df_out.index.str.split('_').str[(- 1)] == 'del')], df_out[(df_out.index.str.split('_').str[(- 1)] == 'amp')])
for x in zip((df_d, df_a), ('Deletion Peak M... |
def documented_driver_differences():
'Scan and analyze manually currated DocumentedDriverDifferences.txt file.\n Returns: 1) Number of driver genes called in both hg19 & hg38 GISTIC peaks\n 2) Number of drivers missing in hg38 peaks that appeared in hg19 peaks and\n 3) Number of drivers present in hg38 pe... | 3,854,181,415,097,760,000 | Scan and analyze manually currated DocumentedDriverDifferences.txt file.
Returns: 1) Number of driver genes called in both hg19 & hg38 GISTIC peaks
2) Number of drivers missing in hg38 peaks that appeared in hg19 peaks and
3) Number of drivers present in hg38 peaks but absent from hg19 peaks. | scripts/AnalysisCode.py | documented_driver_differences | gaog94/GDAN_QC_CopyNumber | python | def documented_driver_differences():
'Scan and analyze manually currated DocumentedDriverDifferences.txt file.\n Returns: 1) Number of driver genes called in both hg19 & hg38 GISTIC peaks\n 2) Number of drivers missing in hg38 peaks that appeared in hg19 peaks and\n 3) Number of drivers present in hg38 pe... |
def read_raw_artemis123(input_fname, preload=False, verbose=None):
'Read Artemis123 data as raw object.\n\n Parameters\n ----------\n input_fname : str\n Path to the data file (extension ``.bin``). The header file with the\n same file name stem and an extension ``.txt`` is expected to be foun... | 1,388,420,436,905,467,000 | Read Artemis123 data as raw object.
Parameters
----------
input_fname : str
Path to the data file (extension ``.bin``). The header file with the
same file name stem and an extension ``.txt`` is expected to be found
in the same directory.
preload : bool or str (default False)
Preload data into memory fo... | mne/io/artemis123/artemis123.py | read_raw_artemis123 | mvdoc/mne-python | python | def read_raw_artemis123(input_fname, preload=False, verbose=None):
'Read Artemis123 data as raw object.\n\n Parameters\n ----------\n input_fname : str\n Path to the data file (extension ``.bin``). The header file with the\n same file name stem and an extension ``.txt`` is expected to be foun... |
def _get_artemis123_info(fname):
'Function for extracting info from artemis123 header files.'
fname = op.splitext(op.abspath(fname))[0]
header = (fname + '.txt')
logger.info('Reading header...')
chan_keys = ['name', 'scaling', 'FLL_Gain', 'FLL_Mode', 'FLL_HighPass', 'FLL_AutoReset', 'FLL_ResetLock']... | 6,327,057,148,829,524,000 | Function for extracting info from artemis123 header files. | mne/io/artemis123/artemis123.py | _get_artemis123_info | mvdoc/mne-python | python | def _get_artemis123_info(fname):
fname = op.splitext(op.abspath(fname))[0]
header = (fname + '.txt')
logger.info('Reading header...')
chan_keys = ['name', 'scaling', 'FLL_Gain', 'FLL_Mode', 'FLL_HighPass', 'FLL_AutoReset', 'FLL_ResetLock']
header_info = dict()
header_info['filter_hist'] = [... |
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
'Read a chunk of raw data.'
_read_segments_file(self, data, idx, fi, start, stop, cals, mult, dtype='>f4') | 6,048,899,956,598,921,000 | Read a chunk of raw data. | mne/io/artemis123/artemis123.py | _read_segment_file | mvdoc/mne-python | python | def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
_read_segments_file(self, data, idx, fi, start, stop, cals, mult, dtype='>f4') |
@singledispatch
def das(var, level=0):
'Single dispatcher that generates the DAS response.'
raise StopIteration | 4,591,123,262,229,556,000 | Single dispatcher that generates the DAS response. | src/pydap/responses/das.py | das | JohnMLarkin/pydap | python | @singledispatch
def das(var, level=0):
raise StopIteration |
def build_attributes(attr, values, level=0):
'Recursive function to build the DAS.'
if isinstance(values, dict):
(yield '{indent}{attr} {{\n'.format(indent=(level * INDENT), attr=attr))
for (k, v) in values.items():
for line in build_attributes(k, v, (level + 1)):
(yi... | -7,251,863,667,847,830,000 | Recursive function to build the DAS. | src/pydap/responses/das.py | build_attributes | JohnMLarkin/pydap | python | def build_attributes(attr, values, level=0):
if isinstance(values, dict):
(yield '{indent}{attr} {{\n'.format(indent=(level * INDENT), attr=attr))
for (k, v) in values.items():
for line in build_attributes(k, v, (level + 1)):
(yield line)
(yield '{indent}}}\n... |
def get_type(values):
'Extract the type of a variable.\n\n This function tries to determine the DAP type of a Python variable using\n several methods. Returns the DAP type as a string.\n\n '
if hasattr(values, 'dtype'):
return NUMPY_TO_DAP2_TYPEMAP[values.dtype.char]
elif (isinstance(values... | 5,853,865,236,788,224,000 | Extract the type of a variable.
This function tries to determine the DAP type of a Python variable using
several methods. Returns the DAP type as a string. | src/pydap/responses/das.py | get_type | JohnMLarkin/pydap | python | def get_type(values):
'Extract the type of a variable.\n\n This function tries to determine the DAP type of a Python variable using\n several methods. Returns the DAP type as a string.\n\n '
if hasattr(values, 'dtype'):
return NUMPY_TO_DAP2_TYPEMAP[values.dtype.char]
elif (isinstance(values... |
def type_convert(obj):
'Map Python objects to the corresponding Opendap types.\n\n Returns the DAP representation of the type as a string.\n\n '
if isinstance(obj, float):
return 'Float64'
elif isinstance(obj, integer_types):
return 'Int32'
else:
return 'String' | -4,285,503,868,923,275,000 | Map Python objects to the corresponding Opendap types.
Returns the DAP representation of the type as a string. | src/pydap/responses/das.py | type_convert | JohnMLarkin/pydap | python | def type_convert(obj):
'Map Python objects to the corresponding Opendap types.\n\n Returns the DAP representation of the type as a string.\n\n '
if isinstance(obj, float):
return 'Float64'
elif isinstance(obj, integer_types):
return 'Int32'
else:
return 'String' |
def _parse__event_type_ids(self):
"turns the request's `event_type=operations__update_recents__global` into an id."
event_type_id = None
event_type = self.request.params.get('event_type', None)
if event_type:
try:
event_type_id = model_utils.OperationsEventType.from_string(event_type... | 7,138,887,671,699,360,000 | turns the request's `event_type=operations__update_recents__global` into an id. | src/peter_sslers/web/views_admin/operation.py | _parse__event_type_ids | jvanasco/peter_sslers | python | def _parse__event_type_ids(self):
event_type_id = None
event_type = self.request.params.get('event_type', None)
if event_type:
try:
event_type_id = model_utils.OperationsEventType.from_string(event_type)
except AttributeError:
event_type = None
event_... |
def get_queryset(self, request):
'\n read queryset if is superuser\n or read owns objects\n '
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(created_by=request.user) | -5,192,438,078,885,395,000 | read queryset if is superuser
or read owns objects | schedules/mixins.py | get_queryset | dvek/scyp | python | def get_queryset(self, request):
'\n read queryset if is superuser\n or read owns objects\n '
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(created_by=request.user) |
def response_change(self, request, obj):
"\n get from response change some custom action from post\n ej: '_custom_action' in request.POST:\n "
if ('_custom_action' in request.POST):
pass
return super().response_change(request, obj) | 5,401,525,779,719,651,000 | get from response change some custom action from post
ej: '_custom_action' in request.POST: | schedules/mixins.py | response_change | dvek/scyp | python | def response_change(self, request, obj):
"\n get from response change some custom action from post\n ej: '_custom_action' in request.POST:\n "
if ('_custom_action' in request.POST):
pass
return super().response_change(request, obj) |
def response_add(self, request, obj):
"\n get from response change some custom action from post\n ej: '_custom_action' in request.POST:\n "
if ('_custom_action' in request.POST):
pass
return super().response_add(request, obj) | -777,167,301,962,328,600 | get from response change some custom action from post
ej: '_custom_action' in request.POST: | schedules/mixins.py | response_add | dvek/scyp | python | def response_add(self, request, obj):
"\n get from response change some custom action from post\n ej: '_custom_action' in request.POST:\n "
if ('_custom_action' in request.POST):
pass
return super().response_add(request, obj) |
def get_queryset(self, request):
'\n read queryset if is superuser\n or read owns objects\n '
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(created_by=request.user) | -5,192,438,078,885,395,000 | read queryset if is superuser
or read owns objects | schedules/mixins.py | get_queryset | dvek/scyp | python | def get_queryset(self, request):
'\n read queryset if is superuser\n or read owns objects\n '
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(created_by=request.user) |
def _split_generators(self, dl_manager: nlp.DownloadManager):
'Returns SplitGenerators.'
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, 'ijcnlp_dailydialog')
for name in ('train', 'validation', 'test'):
zip_fpath = os.path.join(data_dir, f'{name}.zip')
wit... | -3,751,968,599,593,737,700 | Returns SplitGenerators. | datasets/daily_dialog/daily_dialog.py | _split_generators | vinayya/nlp | python | def _split_generators(self, dl_manager: nlp.DownloadManager):
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, 'ijcnlp_dailydialog')
for name in ('train', 'validation', 'test'):
zip_fpath = os.path.join(data_dir, f'{name}.zip')
with ZipFile(zip_fpath) as zi... |
def _generate_examples(self, file_path, act_path, emotion_path, split):
' Yields examples. '
with open(file_path, 'r', encoding='utf-8') as f, open(act_path, 'r', encoding='utf-8') as act, open(emotion_path, 'r', encoding='utf-8') as emotion:
for (i, (line_f, line_act, line_emotion)) in enumerate(zip(f,... | 3,076,678,954,509,529,600 | Yields examples. | datasets/daily_dialog/daily_dialog.py | _generate_examples | vinayya/nlp | python | def _generate_examples(self, file_path, act_path, emotion_path, split):
' '
with open(file_path, 'r', encoding='utf-8') as f, open(act_path, 'r', encoding='utf-8') as act, open(emotion_path, 'r', encoding='utf-8') as emotion:
for (i, (line_f, line_act, line_emotion)) in enumerate(zip(f, act, emotion)):... |
def access_log_decorate(func):
'\n 用于记录用户登录后访问网址行为的装饰器\n :param func:\n :return:\n '
@wraps(func)
def wrapper(*args, **kwargs):
access_user = request.headers.get('X-Real-IP ', request.remote_addr)
access_method = request.method
access_path = request.path
access_t... | -7,364,709,911,023,220,000 | 用于记录用户登录后访问网址行为的装饰器
:param func:
:return: | sfo_server/decorate.py | access_log_decorate | SF-Technology/SFO | python | def access_log_decorate(func):
'\n 用于记录用户登录后访问网址行为的装饰器\n :param func:\n :return:\n '
@wraps(func)
def wrapper(*args, **kwargs):
access_user = request.headers.get('X-Real-IP ', request.remote_addr)
access_method = request.method
access_path = request.path
access_t... |
def login_required(func):
'\n 验证是否登录\n :param func:\n :return:\n '
@wraps(func)
def wrapper(*args, **kwargs):
user_account = session.get('username', '')
if user_account:
login_user = SfoServerUser.query_user_by_account(user_account)
g.user = login_user
... | 2,283,296,594,586,760,200 | 验证是否登录
:param func:
:return: | sfo_server/decorate.py | login_required | SF-Technology/SFO | python | def login_required(func):
'\n 验证是否登录\n :param func:\n :return:\n '
@wraps(func)
def wrapper(*args, **kwargs):
user_account = session.get('username', )
if user_account:
login_user = SfoServerUser.query_user_by_account(user_account)
g.user = login_user
... |
def permission_required(*resources):
'\n 权限验证的前提是用户已经登录\n 权限验证\n :param resources: 控制的资源对象\n '
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
method = func.__name__
resource_names = [resource.__tablename__ for resource in resources]
... | 2,981,514,981,444,468,700 | 权限验证的前提是用户已经登录
权限验证
:param resources: 控制的资源对象 | sfo_server/decorate.py | permission_required | SF-Technology/SFO | python | def permission_required(*resources):
'\n 权限验证的前提是用户已经登录\n 权限验证\n :param resources: 控制的资源对象\n '
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
method = func.__name__
resource_names = [resource.__tablename__ for resource in resources]
... |
@when('@:2.4.99')
def patch(self):
' see https://github.com/spack/spack/issues/13559 '
filter_file('import sys', 'import sys; return "{0}"'.format(self.prefix.include), 'pybind11/__init__.py', string=True) | -4,811,059,582,925,944,000 | see https://github.com/spack/spack/issues/13559 | var/spack/repos/builtin/packages/py-pybind11/package.py | patch | ikitayama/spack | python | @when('@:2.4.99')
def patch(self):
' '
filter_file('import sys', 'import sys; return "{0}"'.format(self.prefix.include), 'pybind11/__init__.py', string=True) |
def _DashboardJsonToRawRows(dash_json_dict):
"Formats a Dashboard JSON dict as a list of row dicts.\n\n For the dashboard to begin accepting the Telemetry Dashboard JSON format\n as per go/telemetry-json, this function chunks a Dashboard JSON literal\n into rows and passes the resulting list to _AddTasks.\n\n A... | 2,263,905,534,619,643,600 | Formats a Dashboard JSON dict as a list of row dicts.
For the dashboard to begin accepting the Telemetry Dashboard JSON format
as per go/telemetry-json, this function chunks a Dashboard JSON literal
into rows and passes the resulting list to _AddTasks.
Args:
dash_json_dict: A dashboard JSON v1.0 dict.
Returns:
A... | dashboard/dashboard/add_point.py | _DashboardJsonToRawRows | bopopescu/catapult-2 | python | def _DashboardJsonToRawRows(dash_json_dict):
"Formats a Dashboard JSON dict as a list of row dicts.\n\n For the dashboard to begin accepting the Telemetry Dashboard JSON format\n as per go/telemetry-json, this function chunks a Dashboard JSON literal\n into rows and passes the resulting list to _AddTasks.\n\n A... |
def _TestSuiteName(dash_json_dict):
'Extracts a test suite name from Dashboard JSON.\n\n The dashboard JSON may contain a field "test_suite_name". If this is not\n present or it is None, the dashboard will fall back to using "benchmark_name"\n in the "chart_data" dict.\n '
if dash_json_dict.get('test_suite_... | -8,499,947,017,492,508,000 | Extracts a test suite name from Dashboard JSON.
The dashboard JSON may contain a field "test_suite_name". If this is not
present or it is None, the dashboard will fall back to using "benchmark_name"
in the "chart_data" dict. | dashboard/dashboard/add_point.py | _TestSuiteName | bopopescu/catapult-2 | python | def _TestSuiteName(dash_json_dict):
'Extracts a test suite name from Dashboard JSON.\n\n The dashboard JSON may contain a field "test_suite_name". If this is not\n present or it is None, the dashboard will fall back to using "benchmark_name"\n in the "chart_data" dict.\n '
if dash_json_dict.get('test_suite_... |
def _AddTasks(data):
'Puts tasks on queue for adding data.\n\n Args:\n data: A list of dictionaries, each of which represents one point.\n '
task_list = []
for data_sublist in _Chunk(data, _TASK_QUEUE_SIZE):
task_list.append(taskqueue.Task(url='/add_point_queue', params={'data': json.dumps(data... | -2,097,049,842,708,797,200 | Puts tasks on queue for adding data.
Args:
data: A list of dictionaries, each of which represents one point. | dashboard/dashboard/add_point.py | _AddTasks | bopopescu/catapult-2 | python | def _AddTasks(data):
'Puts tasks on queue for adding data.\n\n Args:\n data: A list of dictionaries, each of which represents one point.\n '
task_list = []
for data_sublist in _Chunk(data, _TASK_QUEUE_SIZE):
task_list.append(taskqueue.Task(url='/add_point_queue', params={'data': json.dumps(data... |
def _Chunk(items, chunk_size):
'Breaks a long list into sub-lists of a particular size.'
chunks = []
for i in range(0, len(items), chunk_size):
chunks.append(items[i:(i + chunk_size)])
return chunks | 3,657,317,635,363,090,400 | Breaks a long list into sub-lists of a particular size. | dashboard/dashboard/add_point.py | _Chunk | bopopescu/catapult-2 | python | def _Chunk(items, chunk_size):
chunks = []
for i in range(0, len(items), chunk_size):
chunks.append(items[i:(i + chunk_size)])
return chunks |
def _MakeRowTemplate(dash_json_dict):
'Produces a template for rows created from a Dashboard JSON v1.0 dict.\n\n _DashboardJsonToRawRows adds metadata fields to every row that it creates.\n These include things like master, bot, point ID, versions, and other\n supplementary data. This method produces a dict cont... | 4,264,561,190,868,986,000 | Produces a template for rows created from a Dashboard JSON v1.0 dict.
_DashboardJsonToRawRows adds metadata fields to every row that it creates.
These include things like master, bot, point ID, versions, and other
supplementary data. This method produces a dict containing this metadata
to which row-specific informatio... | dashboard/dashboard/add_point.py | _MakeRowTemplate | bopopescu/catapult-2 | python | def _MakeRowTemplate(dash_json_dict):
'Produces a template for rows created from a Dashboard JSON v1.0 dict.\n\n _DashboardJsonToRawRows adds metadata fields to every row that it creates.\n These include things like master, bot, point ID, versions, and other\n supplementary data. This method produces a dict cont... |
def _FlattenTrace(test_suite_name, chart_name, trace_name, trace, is_ref=False, tracing_links=None, benchmark_description=''):
"Takes a trace dict from dashboard JSON and readies it for display.\n\n Traces can be either scalars or lists; if scalar we take the value directly;\n if list we average the values and co... | -3,075,484,346,601,531,000 | Takes a trace dict from dashboard JSON and readies it for display.
Traces can be either scalars or lists; if scalar we take the value directly;
if list we average the values and compute their standard deviation. We also
extract fields that are normally part of v0 row dicts that are uploaded
using add_point but are act... | dashboard/dashboard/add_point.py | _FlattenTrace | bopopescu/catapult-2 | python | def _FlattenTrace(test_suite_name, chart_name, trace_name, trace, is_ref=False, tracing_links=None, benchmark_description=):
"Takes a trace dict from dashboard JSON and readies it for display.\n\n Traces can be either scalars or lists; if scalar we take the value directly;\n if list we average the values and comp... |
def _ExtractValueAndError(trace):
'Returns the value and measure of error from a chartjson trace dict.\n\n Args:\n trace: A dict that has one "result" from a performance test, e.g. one\n "value" in a Telemetry test, with the keys "trace_type", "value", etc.\n\n Returns:\n A pair (value, error) where ... | -1,784,660,418,561,223,700 | Returns the value and measure of error from a chartjson trace dict.
Args:
trace: A dict that has one "result" from a performance test, e.g. one
"value" in a Telemetry test, with the keys "trace_type", "value", etc.
Returns:
A pair (value, error) where |value| is a float and |error| is some measure
of vari... | dashboard/dashboard/add_point.py | _ExtractValueAndError | bopopescu/catapult-2 | python | def _ExtractValueAndError(trace):
'Returns the value and measure of error from a chartjson trace dict.\n\n Args:\n trace: A dict that has one "result" from a performance test, e.g. one\n "value" in a Telemetry test, with the keys "trace_type", "value", etc.\n\n Returns:\n A pair (value, error) where ... |
def _EscapeName(name):
'Escapes a trace name so it can be stored in a row.\n\n Args:\n name: A string representing a name.\n\n Returns:\n An escaped version of the name.\n '
return re.sub('[\\:|=/#&,]', '_', name) | 4,860,297,647,989,263,000 | Escapes a trace name so it can be stored in a row.
Args:
name: A string representing a name.
Returns:
An escaped version of the name. | dashboard/dashboard/add_point.py | _EscapeName | bopopescu/catapult-2 | python | def _EscapeName(name):
'Escapes a trace name so it can be stored in a row.\n\n Args:\n name: A string representing a name.\n\n Returns:\n An escaped version of the name.\n '
return re.sub('[\\:|=/#&,]', '_', name) |
def _GeomMeanAndStdDevFromHistogram(histogram):
"Generates the geom. mean and std. dev. for a histogram.\n\n A histogram is a collection of numerical buckets with associated\n counts; a bucket can either represent a number of instances of a single\n value ('low'), or from within a range of values (in which case ... | 7,263,711,009,653,982,000 | Generates the geom. mean and std. dev. for a histogram.
A histogram is a collection of numerical buckets with associated
counts; a bucket can either represent a number of instances of a single
value ('low'), or from within a range of values (in which case 'high' will
specify the upper bound). We compute the statistics... | dashboard/dashboard/add_point.py | _GeomMeanAndStdDevFromHistogram | bopopescu/catapult-2 | python | def _GeomMeanAndStdDevFromHistogram(histogram):
"Generates the geom. mean and std. dev. for a histogram.\n\n A histogram is a collection of numerical buckets with associated\n counts; a bucket can either represent a number of instances of a single\n value ('low'), or from within a range of values (in which case ... |
def _ImprovementDirectionToHigherIsBetter(improvement_direction_str):
"Converts an improvement direction string to a higher_is_better boolean.\n\n Args:\n improvement_direction_str: a string, either 'up' or 'down'.\n\n Returns:\n A boolean expressing the appropriate higher_is_better value.\n\n Raises:\n ... | 5,229,265,490,068,555,000 | Converts an improvement direction string to a higher_is_better boolean.
Args:
improvement_direction_str: a string, either 'up' or 'down'.
Returns:
A boolean expressing the appropriate higher_is_better value.
Raises:
BadRequestError: if improvement_direction_str is invalid. | dashboard/dashboard/add_point.py | _ImprovementDirectionToHigherIsBetter | bopopescu/catapult-2 | python | def _ImprovementDirectionToHigherIsBetter(improvement_direction_str):
"Converts an improvement direction string to a higher_is_better boolean.\n\n Args:\n improvement_direction_str: a string, either 'up' or 'down'.\n\n Returns:\n A boolean expressing the appropriate higher_is_better value.\n\n Raises:\n ... |
def _ConstructTestPathMap(row_dicts):
'Makes a mapping from test paths to last added revision.'
last_added_revision_keys = []
for row in row_dicts:
if (not (('master' in row) and ('bot' in row) and ('test' in row))):
continue
path = ('%s/%s/%s' % (row['master'], row['bot'], row['... | -3,003,235,259,617,169,400 | Makes a mapping from test paths to last added revision. | dashboard/dashboard/add_point.py | _ConstructTestPathMap | bopopescu/catapult-2 | python | def _ConstructTestPathMap(row_dicts):
last_added_revision_keys = []
for row in row_dicts:
if (not (('master' in row) and ('bot' in row) and ('test' in row))):
continue
path = ('%s/%s/%s' % (row['master'], row['bot'], row['test'].strip('/')))
if (len(path) > _MAX_TEST_PAT... |
def _ValidateRowDict(row, test_map):
'Checks all fields in the input dictionary.\n\n Args:\n row: A dictionary which represents one point.\n test_map: A dictionary mapping test paths to last added revision.\n\n Raises:\n BadRequestError: The input was not valid.\n '
required_fields = ['master', 'bot... | 789,831,051,503,380,000 | Checks all fields in the input dictionary.
Args:
row: A dictionary which represents one point.
test_map: A dictionary mapping test paths to last added revision.
Raises:
BadRequestError: The input was not valid. | dashboard/dashboard/add_point.py | _ValidateRowDict | bopopescu/catapult-2 | python | def _ValidateRowDict(row, test_map):
'Checks all fields in the input dictionary.\n\n Args:\n row: A dictionary which represents one point.\n test_map: A dictionary mapping test paths to last added revision.\n\n Raises:\n BadRequestError: The input was not valid.\n '
required_fields = ['master', 'bot... |
def _ValidateMasterBotTest(master, bot, test):
'Validates the master, bot, and test properties of a row dict.'
test = test.strip('/')
if ('/' not in test):
raise BadRequestError('Test name must have more than one part.')
if (len(test.split('/')) > graph_data.MAX_TEST_ANCESTORS):
raise Ba... | 4,406,785,683,133,111,300 | Validates the master, bot, and test properties of a row dict. | dashboard/dashboard/add_point.py | _ValidateMasterBotTest | bopopescu/catapult-2 | python | def _ValidateMasterBotTest(master, bot, test):
test = test.strip('/')
if ('/' not in test):
raise BadRequestError('Test name must have more than one part.')
if (len(test.split('/')) > graph_data.MAX_TEST_ANCESTORS):
raise BadRequestError(('Invalid test name: %s' % test))
if (('/' in... |
def _ValidateTestPath(test_path):
'Checks whether all the parts of the test path are valid.'
if (len(test_path) > _MAX_TEST_PATH_LENGTH):
raise BadRequestError(('Test path too long: %s' % test_path))
if ('*' in test_path):
raise BadRequestError('Illegal asterisk in test name.')
for name ... | -8,827,850,601,999,613,000 | Checks whether all the parts of the test path are valid. | dashboard/dashboard/add_point.py | _ValidateTestPath | bopopescu/catapult-2 | python | def _ValidateTestPath(test_path):
if (len(test_path) > _MAX_TEST_PATH_LENGTH):
raise BadRequestError(('Test path too long: %s' % test_path))
if ('*' in test_path):
raise BadRequestError('Illegal asterisk in test name.')
for name in test_path.split('/'):
_ValidateTestPathPartName... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.