content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def gwei_to_ether(wei):
"""Convert gwei to ether
"""
return 1.0 * wei / 10**9 | 95567f47f6e12d4aa3bbfdf7b0d6f1feaa96a9bd | 37,717 |
from typing import Dict
from typing import Any
def update_epoch_dict(
obj_ds_to_epoch: Dict[str, Any],
objective_name: str,
dataset_name: str,
split_name: str,
):
"""update num of iterations
"""
try:
obj_ds_to_epoch[objective_name][dataset_name][split_name] += 1
except KeyError:
# begin at 1 since it is initialized after being run
tmp_dict = {objective_name: {dataset_name: {split_name: 1}}}
obj_ds_to_epoch = {**obj_ds_to_epoch, **tmp_dict}
return obj_ds_to_epoch | c1b7ae8d2d708280dbf83fe8052675ead09780a4 | 37,718 |
import argparse
def create_cli():
"""Create command line interface
Returns:
Parsed CLI arguments
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--play', type=str, required=True, help='File path to WAV file to play.')
arg_parser.add_argument('--record', type=str, required=True,
help='File path to write the recording. This must have ".wav" extension and be either'
'"headphones.wav" or any combination of supported speaker names separated by commas '
'eg. FL,FC,FR.wav to be recognized by Impulcifer as a recording file. It\'s '
'convenient to point the file path directly to the recording directory such as '
'"data\\my_hrir\\FL,FR.wav".')
arg_parser.add_argument('--input_device', type=str, default=argparse.SUPPRESS,
help='Name or number of the input device. Use "python -m sounddevice to '
'find out which devices are available. It\'s possible to add host API at the end of '
'the input device name separated by space to specify which host API to use. For '
'example: "Zoom H1n DirectSound".')
arg_parser.add_argument('--output_device', type=str, default=argparse.SUPPRESS,
help='Name or number of the output device. Use "python -m sounddevice to '
'find out which devices are available. It\'s possible to add host API at the end of '
'the output device name separated by space to specify which host API to use. For '
'example: "Zoom H1n WASAPI"')
arg_parser.add_argument('--host_api', type=str, default=argparse.SUPPRESS,
help='Host API name to prefer for input and output devices. Supported options on Windows '
'are: "MME", "DirectSound" and "WASAPI". This is used when input and '
'output devices have not been specified (using system defaults) or if they have no '
'host API specified.')
arg_parser.add_argument('--channels', type=int, default=2, help='Number of output channels.')
arg_parser.add_argument('--append', action='store_true',
help='Add track(s) to existing file? Silence will be added to the end of all tracks to '
'make the equal in length.')
args = vars(arg_parser.parse_args())
return args | d15c4ace0af9535cd30992abf94afa2127d1e90d | 37,719 |
import pandas
def get_cell_value(xls, row_index, column_index):
"""
:param xls: target xls
:param row_index: row index in xls
:param column_index: column index in xls
:return: value at coordinate or None if cell is empty
"""
try:
value = xls.iloc[row_index, column_index]
return None if pandas.isnull(value) else value
except IndexError:
return None | 1e046cf84ac474177c93507afbf7025abb82812c | 37,720 |
def postprocess(simdata):
"""
Make an arbitrary edit to the simulation data, so we can check the postprocessing call works.
"""
mycol = simdata.cols['B']
simdata.data[4,mycol] = 123456.
return simdata | 0f3c8303d90b4f52a1d504bc4ba267a6854021e4 | 37,722 |
from typing import List
def concat_level(list_: List, d: int) -> str:
"""
helper for concatenate_flat
"""
if not isinstance(list_, list):
if d == 0:
return str(list_)
else:
return ''
else:
return ''.join([concat_level(s, d - 1) for s in list_]) | 8693581a74171924798538c6cccfc8ad77921a9c | 37,723 |
import yaml
def load_yaml_file(file_path):
"""
Loads a yaml file and returns a dictionary with the contents.
Args:
file_path (str):
Path to the yaml file
Returns:
yaml_dict:
Dictionary with the contents of the yaml file
"""
with open(file_path, "r") as stream:
yaml_dict = yaml.safe_load(stream)
return yaml_dict | c0e2067d248dff3695380aa92c97ab621fae0d96 | 37,724 |
def families_skew_correctors():
"""Return skew corrector families."""
return ['QS'] | 023af29606dcbdd97625f5e3e884bdc32022ad58 | 37,725 |
import os
def listFiles(path, tup_ext):
"""
:rtype : object
"""
lst_files = os.listdir(path) # list all content, includes folders
lst_files_ext = [] # empty list
for file in lst_files:
if file.endswith(tup_ext): # has the extension (.xxx) found in the tuple
lst_files_ext.append(file) # add the item in the list
return lst_files_ext | ba43846ce6e4870953f1dc3d174025e973939865 | 37,727 |
import os
import ntpath
def get_file_shortname(file_path):
""" Get file name (without ext and path)
Returns:
.str
"""
return os.path.splitext(ntpath.basename(file_path))[0] | e79d393f670745cb41f14145426a1200da889e15 | 37,728 |
import logging
def get_handler_filename(logger):
"""Gets logger filename
Parameters:
* logger (object): log file object
Returns:
* str: Log file name if any, None if not
"""
for handler in logger.handlers:
if isinstance( handler, logging.FileHandler ):
return handler.baseFilename
return None | bdaa47977c14601aa2217fc8a3c734e97b9a0295 | 37,729 |
import sys
import pickle
def loadVocabulary(vocabularyFilePath):
"""Loads vocabulary.
Returns:
index to word vocabulary
word to index wocabulary
"""
try:
file = open(vocabularyFilePath, 'rb')
except Exception as e:
print(e)
sys.exit()
vocabulary = pickle.load(file)
file.close()
indexToWord = [v[0] for v in vocabulary]
return vocabulary, dict([(w, i) for i, w in enumerate(indexToWord)]) | d4965de578ba1df3446384790705d00a5d3766e2 | 37,731 |
def ros_subscribe_cmd(topic, _id=None, _type=None):
"""
create a rosbridge subscribe command object
messages on subscribed topics will be sent like this:
outgoing_msg = {"op": "publish", "topic": topic, "msg": message}
see rosbridge_library capabilities/subscribe.py
:param topic: the string name of the topic to subscribe to
:param _id: optional id to identify subscription for later unsubscribe calls
:param _type: ROS msg type as string
not passed on:
(optional) "throttle_rate": <int>,
(optional) "queue_length": <int>,
(optional) "fragment_size": <int>,
(optional) "compression": <string>
"""
command = {
"op": "subscribe",
"topic": topic
}
if _id:
command["id"] = _id
if _type:
command["type"] = _type
return command | b001bf487894a1fa238997b21d7264eb802312ed | 37,732 |
import os
import sys
def _format_filename(filename):
"""Format the filename in a nicer manner than given.
Try to make the filename shorter when it makes sense to, without losing the
clarity of what it means.
"""
if filename is None:
return "<unknown-file>"
# A tiny helper
def in_dir(dirpath, abspath):
return dirpath == os.path.commonpath([dirpath, abspath])
abspath = os.path.abspath(filename)
cwd = os.getcwd()
# If it's in the current directory, return the path, with current directory
# removed.
if in_dir(cwd, abspath):
return abspath[len(cwd) + 1:]
# If it's importable, we show the path to it.
for location in sys.path:
if in_dir(location, abspath):
fpath = abspath[len(location) + 1:]
if fpath.endswith(".py"):
fpath = fpath[:-3]
return "<installed> " + fpath.replace(os.path.sep, ".")
return abspath | 2df6a4c13888a8583a9d4355c4a0f92ca849b7ea | 37,733 |
def set_mode(mode_input):
"""
Setter of mode of mapping based on mode input
Parameters:
(bool) mode_input: The mode input
Returns:
(bool) mode: The result mode
"""
mode = None
if mode_input is not None and isinstance(mode_input, bool) and mode_input == True:
mode = mode_input
else:
mode = None
return mode | 79752605ce416c34a4a0efd6a5abde7e97096e8d | 37,734 |
def operator_same_class(method):
"""
Intended to wrap operator methods, this decorator ensures the `other`
parameter is of the same type as the `self` parameter.
:param method: The method being decorated.
:return: The wrapper to replace the method with.
"""
def wrapper(self, other):
if not isinstance(other, self.__class__):
raise TypeError(
'unsupported operand types: \'{0}\' and \'{1}\''.format(
self.__class__.__name__, other.__class__.__name__))
return method(self, other)
return wrapper | ee233396a2b9a6cf64c3bfc843cb0ec7edf0e27f | 37,738 |
def convert_to_tokenized_ground_truth(original_ground_truth, original_document, tokenizer):
""" Algorithm to get new_ground_truth by the tokenizer. Checking each substring if it's equal, and appending the
ground_truth value of the original_document_index
Assumptions: NO UNKNOWNS! since we check by ==, else need to check for unknowns and perform equality ignoring left side.
Inputs:
original_ground_truth: Original GT boolean array with same shape as original_document
original_document: Original Pretokenized document array with same shape as original_ground_truth
tokenizer: tokenizer used to encode/decode the document
Output:
new_ground_truth: New GT boolean array expanded by tokenizer
"""
new_document = tokenizer.encode(' '.join(original_document))
new_ground_truth = []
original_document_start_index = 0
original_document_end_index = 1
new_document_start_index = 0
new_document_end_index = 1
while new_document_end_index <= len(new_document):
original_document_temp = ' '.join(original_document[original_document_start_index:original_document_end_index])
new_document_temp = tokenizer.decode(new_document[new_document_start_index:new_document_end_index]).strip()
new_ground_truth.append(original_ground_truth[original_document_end_index-1])
# if new_document_end_index < 150:
# print("NEW DOC", new_document_temp)
# print("ORI DOC", original_document_temp)
# print(new_ground_truth)
## ASSUME THAT NEW_DOCUMENT_TEMP HAS NO UNKNOWNS??!?
if new_document_temp == original_document_temp:
original_document_start_index += 1
original_document_end_index += 1
new_document_start_index = new_document_end_index
new_document_end_index += 1
return new_ground_truth | 710039562c2ae138fca0f8a31aea932bfbb37f82 | 37,739 |
import sys
def combine_hash(a: int, b: int) -> int:
"""Combine the two given hash values.
Parameter
---------
a : int
the first hash value.
b : int
the second hash value.
Returns
-------
hash : int
the combined hash value.
"""
# algorithm taken from boost::hash_combine
return sys.maxsize & (a ^ (b + 0x9e3779b9 + (a << 6) + (a >> 2))) | b1f528456e646ff06861eabddce364b13848f83d | 37,740 |
def cal_SINR(sp, ip, noise):
"""
Args:
sp (float or numpy array): signal power
ip (float or numpy array): interference power
"""
return sp / (ip + noise) | 544b42f3d82d4b96e441c45be994ebab306c6a86 | 37,741 |
def same_list_of_list_of_bboxes(l_1, l_2):
"""test same list of list of bboxes."""
assert len(l_1) == len(l_2)
for i in range(len(l_1)):
assert len(l_1[i]) == len(l_2[i])
for j in range(len(l_1[i])):
assert l_1[i][j] == l_2[i][j]
return True | d5105d3bdf4e376f10def7f6e05f0c8c2417d24c | 37,742 |
def selected(data, select):
"""
Takes data and removes any values/columns not in SELECT parameter
:param data: List of data entries to be SELECT'ed. ex:
[
{ 'stb': 'stb1',
'title': 'the matrix',
'rev': '6.00',
'date': '2017-05-01',
'provider': 'Warner Brothers',
'time': '12:30' },
{ ... }
]
:param select: List of SELECT parameters. ex: ['stb', 'rev', 'title']
:return: List of data with only keys matching SELECT parameters
"""
result = []
for entry in data:
result += [{key: entry[key] for key in select}]
return result | da7dec6686ee57ec5e89f78e552eb5476f27c66c | 37,743 |
def _compute_total_node_capacity(graph, node):
"""
Sum the capacities in all edges touching a given node
"""
neighbours = graph.adj[node]._atlas
return sum([neighbours[adj_node_id][channel_id]['capacity']
for adj_node_id in neighbours for channel_id in neighbours[adj_node_id]]) | f240e6417bb527fb73e4a182567bb4b8c2db2370 | 37,745 |
def remove_non_ascii(s):
"""
Dirty hack to replace non-ASCII characters in a string with spaces
"""
if s is None:
return None
return ''.join(c if ord(c) < 128 else ' ' for c in s) | 7a4c299ac250abda1b0723506644748a56fc1dc9 | 37,746 |
import requests
import json
def newtend_get_tender(tender_internal_id):
"""
input tender_internal_id
return python dict with tender
make request to api and get tender data
url like
https://lb-api-staging.prozorro.gov.ua/api/0/tenders/bd861e500f344165bb3ac0b8301292f8
"""
url = "https://lb-api-staging.prozorro.gov.ua/api/0/tenders/" + tender_internal_id
request = requests.get(url)
tender = json.loads(request.text)
return tender | 2d7f38ac0fd856d08f669f1e23767fa4a5bc871d | 37,747 |
def _masked_array_repr(values, mask):
"""Returns a string representation for a masked numpy array."""
assert len(values) == len(mask)
if len(values.shape) == 1:
items = [repr(v) if m else '_' for (v, m) in zip(values, mask)]
else:
items = [_masked_array_repr(v, m) for (v, m) in zip(values, mask)]
return '[%s]' % ', '.join(items) | 9324e3343ceefeda6c9676b0303988faa5a35474 | 37,748 |
def sym_lookup(obj, key, default=None):
"""
dict lookup w/ optional ruby-symbol-like keys
"""
for lookup_key in [ key, ':{}'.format(key) ]:
if lookup_key in obj:
return obj[lookup_key]
return default | 9d531be2e7160c8285332a6d82d13c2b92bdca20 | 37,749 |
def concat(list_a: list, list_b: list) -> list:
"""
Concatenates two lists together into a new list
Example:
>>> concat([1, 2, 3], [4, 5, 6])
... [1, 2, 3, 4, 5 6]
:param list_a: First list to concatenate
:param list_b: Second list to concatenate
:return: Concatenated list
"""
result = list_a + list_b
return result | c4fd1bf4c579ed48c599699d2f76277d85b47264 | 37,750 |
from typing import Optional
import sys
import subprocess
def generator_settings_for_compiler(cmake_path: str, compiler_path: Optional[str]):
"""Makes settings to give the generator for a specific compiler."""
settings = []
if compiler_path is not None:
settings = [f'-DCMAKE_C_COMPILER={compiler_path}']
# https://cmake.org/cmake/help/latest/generator/Visual%20Studio%2015%202017.html
if sys.platform == 'win32':
if compiler_path is None:
arch = 'x64'
help_output = subprocess.check_output([cmake_path, '--help'])
generator = None
for line in help_output.decode().splitlines():
if line.startswith('*'):
print(line)
generator = line[1:line.index('=')]
if '[arch]' in generator:
generator = generator.replace('[arch]', '')
generator = generator.strip()
print(f'"{generator}"')
break
if generator is not None:
settings.extend(['-G', generator, '-A', arch])
else:
settings.extend(['-G', 'Unix Makefiles'])
return settings | 7e6a0ea0b6a8d696b44b3427a5681108b6d6a7a4 | 37,751 |
def get_reading_level_from_flesch(flesch_score):
"""
Thresholds taken from https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests
:param flesch_score:
:return: A reading level and difficulty for a given flesch score
"""
if flesch_score < 30:
return "Very difficult to read"
elif flesch_score < 50:
return "Difficult to read"
elif flesch_score < 60:
return "Fairly difficult to read"
elif flesch_score < 70:
return "Plain English"
elif flesch_score < 80:
return "Fairly easy to read"
elif flesch_score < 90:
return "Easy to read"
else:
return "Very easy to read" | 54903df2bc4114de663fb85af8500fe1cb26ddc5 | 37,752 |
import getpass
def getCurrentUserName():
"""
Returns the login name for the UID of the current process.
"""
return getpass.getuser() | bb8fd5fb9041e17e0ba69b491f75f39c46d1f5ad | 37,753 |
import json
import requests
import hashlib
def get_wikidata_image(wikidata_id):
"""Return the image for the Wikidata item with *wikidata_id*. """
query_string = ("https://www.wikidata.org/wiki/Special:EntityData/%s.json"
% wikidata_id)
item = json.loads(requests.get(query_string).text)
wdata = item["entities"][wikidata_id]["claims"]
try:
image = wdata["P18"][0]["mainsnak"]["datavalue"]["value"].replace(" ", "_")
except KeyError:
print("No image on Wikidata.")
else:
md = hashlib.md5(image.encode('utf-8')).hexdigest()
image_url = ("https://upload.wikimedia.org/wikipedia/commons/thumb/%s/%s/%s/64px-%s"
% (md[0], md[:2], image, image))
return image_url | c946cd9b2b73cb6140cddf7197f7095ded71bd8f | 37,754 |
import torch
def create_iterator(args, trainer, task, adv_split):
"""Sets up data and progress meters for one pass of adversarial attack."""
# Set seed based on args.seed
torch.manual_seed(args.seed)
# reset training meters
for k in ["wps", "ups", "wpb", "bsz"]:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
return task.get_batch_iterator(
dataset=task.dataset(adv_split),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=trainer.get_model().max_positions(),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False) | 174ef93973a8f1c46c380526559c5e5f20545496 | 37,755 |
import codecs
def normalized_compression_distance(x, y, n=25_270_000_000):
"""Computes the Normalized Compression Distance (NCD) between two strings.
Parameters:
x (bytes)
y (bytes)
References:
1. https://www.wikiwand.com/en/Normalized_compression_distance
2. https://www.wikiwand.com/en/Normalized_Google_distance
"""
x_code = codecs.encode(x, encoding='zip')
y_code = codecs.encode(y, encoding='zip')
x_y_code = codecs.encode(x + y, encoding='zip')
return (len(x_y_code) - min(len(x_code), len(y_code))) / max(len(x_code), len(y_code)) | 010662fb2ee603537f13bb4cc98a39d5a43943be | 37,756 |
def in_types():
"""Returns a list of what is supported in a IN-statement"""
return ["VerbSelect", "ParameterString"] | d17b0a54e17fe8ed1f13d9817bf454847e776e93 | 37,757 |
import random
def rand(limit):
"""
Returns a generator that produces uniform random numbers below limit.
Parameters
----------
limit : int | float
Sets the exlusive upper bound for random selection. If limit
is an integer then integer values are returned otherwise float
values are returned.
"""
if isinstance(limit, int):
def irand():
while True:
yield random.randrange(limit)
return irand()
elif isinstance(limit, float):
def frand():
while True:
yield random.random() * limit
return frand()
raise TypeError("limit not an int or float: {limit}.") | cf37175eeedd629b3c0f0f0ab4fc4320252fee44 | 37,758 |
def entity_emiss_o(x, n_lbs, tp, exp_term=2):
"""
The function that calculates the emission prior of entity labels to the non-entity label 'O'
according to the diagonal values of the emission prior
Parameters
----------
x: diagonal values
n_lbs: number of entity labels (2e+1)
tp: turning point
exp_term: the exponential term that controls the slope of the function
Returns
-------
non-diagonal emission priors
"""
# separating piecewise function
low = x < tp
high = x >= tp
# parameters for the first piece
a = (2 - n_lbs) / ((exp_term - 1) * tp ** exp_term - exp_term * tp ** (exp_term - 1))
b = 1 - n_lbs
# parameter for the second piece
f_tp = a * tp ** exp_term + b * tp + 1
c = f_tp / (tp - 1)
# piecewise result
y = low * (a * x ** exp_term + b * x + 1) + high * (c * x - c)
return y | 34b9473e5799d7beddd20bea81a6fe926b8fde3f | 37,759 |
def mock_response(*args, **kwargs):
"""Mocks HTTP response."""
class MockResponse(object):
"""Mock HTTP response object."""
def __init__(self, json_data=None, text_data=None, status_code=200):
"""Initializes mock object."""
self.json_data = json_data
self.text = text_data
self.status_code = status_code
def json(self):
"""Mock JSON response."""
return self.json_data
auth_text_data = u'<input id="csrf_token" name="csrf_token" value="test">'
sketch_data = {
u'meta': {
u'views': [
{
u'id': 1,
u'name': u'test'
},
{
u'id': 2,
u'name': u'test'
}
]
},
u'objects': [
{
u'id': 1,
u'name': u'test',
u'description': u'test',
u'timelines': [
{
u'id': 1,
u'name': u'test',
u'searchindex': {
u'index_name': u'test'
}
},
{
u'id': 2,
u'name': u'test',
u'searchindex': {
u'index_name': u'test'
}
}
]
}
]}
sketch_list_data = {
u'objects': [sketch_data[u'objects']]
}
timeline_data = {
u'objects': [
{
u'id': 1,
u'name': u'test',
u'searchindex': {
u'index_name': u'test'
}
}
]
}
# Register API endpoints to the correct mock response data.
url_router = {
u'http://127.0.0.1': MockResponse(text_data=auth_text_data),
u'http://127.0.0.1/api/v1/sketches/': MockResponse(
json_data=sketch_list_data),
u'http://127.0.0.1/api/v1/sketches/1': MockResponse(
json_data=sketch_data),
u'http://127.0.0.1/api/v1/sketches/1/timelines/1': MockResponse(
json_data=timeline_data),
}
return url_router.get(args[0], MockResponse(None, 404)) | 182afcf091ad5af2485deccdc9b0a883c13cb3e0 | 37,761 |
from io import StringIO
import requests
def load_header_url(url, max_bytes=1024):
"""Load the first line or first 1024 bytes from a URL"""
with StringIO(requests.get(
url,
allow_redirects=True,
headers={'Range' : f'bytes=0-{max_bytes}'}
).content.decode("utf8")) as data_in:
lines = data_in.readlines()
return [line.strip() for line in lines] | 2a1392fa0427aa4899770ea1471cbacbc5b163ce | 37,762 |
def flatten_tree_structure(root_list):
"""Flatten a tree."""
elements = []
def generate(input_list, indent_levels_so_far):
"""Generate flat list of nodes."""
for index, element in enumerate(input_list):
# add to destination
elements.append(element)
# compute and set indent levels
indent_levels = indent_levels_so_far + [(index, len(input_list))]
element.indent_levels = indent_levels
# add children
children = element.get_children()
element.children_count = len(children)
generate(children, indent_levels)
generate(root_list, [])
# Add indent change counters
level = 0
last_element = None
for element in elements:
new_level = len(element.indent_levels)
# Compute level change before this element
change = new_level - level
if last_element is not None:
last_element.indent_change_after = change
element.indent_change_before = change
# Update variables
level = new_level
last_element = element
# Set level change after last element
if last_element is not None:
last_element.indent_change_after = -level
return elements | 772307d18170d57fb319cd2934d872c05a375852 | 37,763 |
def mssql_sql_utcnow(element, compiler, **kw):
"""MS SQL provides a function for the UTC datetime."""
return 'GETUTCDATE()' | 5f3fed9f95de23069bd13b6a26cdf21f916a15b7 | 37,764 |
import subprocess
def git_timestamp():
""" Get the last commit timestamp from Git (if possible)"""
try:
return subprocess.Popen(['git', 'log', '-1', '--format=%cd'], stdout=subprocess.PIPE).communicate()[0].decode("utf-8").strip(" \n")
except:
# Any failure, return None. We may not be in a Git repo at all
return None | 55dfc37e10ff97c22d62a404760524704829cdd7 | 37,767 |
def construct_rate_units(dimensionality, desired_units):
"""
Put the provided unit parts back together into a single unit based on the
provided dimensionality and units.
"""
error = False
units = ''
if len(dimensionality) == 1:
# Should just be time units
unit, power = desired_units['time'], dimensionality['time']
units = '{0}^{1}'.format(unit, power)
elif len(dimensionality) == 3:
# Should be combination of length, mol, and time units
l = m = t = ''
for dimension, power in dimensionality.items():
if abs(power) == 1:
exponent = ''
else:
exponent = '^{0}'.format(abs(power))
if dimension == 'length':
if power < 1:
# length units should have positive exponent
error = True
l = desired_units[dimension] + exponent
elif dimension == 'mol':
if power > 1:
# mol units should have negative exponent
error = True
m = desired_units[dimension] + exponent
elif dimension == 'time':
if power > 1:
# time units should have negative exponent
error = True
t = desired_units[dimension] + exponent
if l and m and t:
units = '{0}/({1}*{2})'.format(l, m, t)
else:
error = True
else:
error = True
if error:
raise ValueError('Unable to construct rate units from the provided dimensionality: {0!r}'.format(dimensionality))
return units | 5cdebe394df114922d708af1dd7ea76b5be22b98 | 37,768 |
import re
def validate_fs_path(path):
"""Checks if the specified file system path is valid
Arguments:
path {string} -- the file system path to check
Returns:
True if the specified path matches the regex (is a valid path),
False if otherwise
"""
is_valid_path = re.search(r"(\w:)?([\\\/]{1}[a-zA-Z-_ ]*)*", path)
return bool(is_valid_path) | c34f451fd3e74167fc6aa1b72862378878da7b6e | 37,769 |
import copy
def get_added_dicts(a, b):
"""Add two dictionaries together, return new dictionary.
if key in B already exists in A, do not override. None destructive.
Args:
a (dict): dictionary you want to ADD to
b (dict): dict you want to add from, the new keys
Returns:
dict: copied from a, with b keys added
"""
tmp = copy.deepcopy(a)
for key, val in b.iteritems():
if key not in tmp:
tmp[key] = val
return tmp | f2a015dddcd1442ec556a71cb13e9da6b2950805 | 37,772 |
def init_node_table(n_tr, n_oob, indices_oob):
"""Initialise Node table for first leaf.
Parameters
----------
n_tr : INT. Number of observation in training subsample.
n_oob : INT. Number of observation in OOB subsample.
Returns
-------
node_table : List of lists. First init_node_table
"""
node_table = []
id_node_0 = 0
id_parent_1 = None
id_child_left_2 = None
id_child_right_3 = None
active_4 = 2
leaf_size_tr_5 = n_tr
leaf_size_oob_6 = n_oob
objective_fct_value_oob_7 = None
next_split_i_8 = None
cut_off_prime_l_9 = None
x_type_10 = None
data_tr_indi_11 = list(range(n_tr))
data_oob_indi_12 = list(range(n_oob))
pot_outcomes_13 = None
pot_variables_used_indi_14 = None
leaf_size_pot_15 = None
indices_oob_16 = indices_oob
node_table.append(id_node_0)
node_table.append(id_parent_1)
node_table.append(id_child_left_2)
node_table.append(id_child_right_3)
node_table.append(active_4)
node_table.append(leaf_size_tr_5)
node_table.append(leaf_size_oob_6)
node_table.append(objective_fct_value_oob_7)
node_table.append(next_split_i_8)
node_table.append(cut_off_prime_l_9)
node_table.append(x_type_10)
node_table.append(data_tr_indi_11)
node_table.append(data_oob_indi_12)
node_table.append(pot_outcomes_13)
node_table.append(pot_variables_used_indi_14)
node_table.append(leaf_size_pot_15)
node_table.append(indices_oob_16)
return [node_table] | b0a6d8605979bc38999d6ca9241f627f73ab2b46 | 37,773 |
import re
def justify(text, cols):
"""
Justify the text for achieving a number of cols
"""
if not re.match(".*\w.*", text):
return text
lines = text.split('\n')
for l, line in enumerate(lines):
if not re.match(".*\w.*", line):
continue
spaces_max = len([c for c in line if c == ' '])
i = 1
spaces_from = " "
spaces_to = " "
while len(line) < cols:
while i <= spaces_max:
justy = line.replace(spaces_from, spaces_to, i)
if len(justy) == cols:
break
i += 1
line = line.replace(spaces_from, spaces_to, i)
spaces_from += " "
spaces_to += " "
i = 1
lines[l] = line
return '\n'.join(lines) | c1056268a23cecee3617ec26759ac5a69c0383a9 | 37,775 |
def _failsafe_values_atom(schema, values, errors, source, kw):
"""Map all erroneous inputs to a single value."""
for key in errors:
kw[key] = values
return kw | 4444bad9737ce4399e8c3cfaa04c3cd745aedde6 | 37,777 |
def driver_info():
""" Base class for test cases that integrate with a server.
"""
return {
"uri_bolt": "bolt://localhost:9001",
"uri_neo4j": "neo4j://localhost:9001",
"user": "test",
"password": "test",
"auth_token": ("test", "test")
} | 237b2a9b154b064116820a6e4ffb52e0cf9fadaa | 37,779 |
def sent2lm(sent, word_dict):
"""
transform a sentence to a list of language model ids.
:param sent:
:param word_dict:
:return:
"""
UNK = word_dict['<unk>']
ids = [word_dict.get(w, UNK) for w in sent.strip().lower().split()] + [word_dict['<e>']]
return ids[:-1], ids[1:] | d87efe32a76b32a53e6b453bfc3af6d10ffa2df9 | 37,780 |
def format_uuid(uuid: str):
"""
Returns UUID formatted according to https://tools.ietf.org/html/rfc4122#section-3 (8-4-4-4-12)
Parameters
----------
module_name : str
unformatted UUID
"""
return f'{uuid[0:8]:s}-{uuid[8:12]:s}-{uuid[12:16]:s}-{uuid[16:20]:s}-{uuid[20:32]:s}' | c3383cab2bbcafb9d67c4535c6369f3646faafea | 37,781 |
def linear(x, weight, bias=None):
"""
f(x) = w*x + b
"""
if bias is not None:
return x.mm(weight).add(bias)
else:
return x.mm(weight) | aaa41b9352da0cc82cf4d73102550a3a0f520d70 | 37,782 |
import os
def valid_obd_file(obd_file):
"""
Check if the content of the given OBD file is valid, i.e. header,
followed by lines of data.
Parameter
---------
obd_file : str
The path of the OBD file
Return
------
True if the content of the file is value; False, otherwise.
"""
if not os.path.isfile(obd_file):
return False
with open(obd_file, 'r') as fp:
_ = fp.readline()
lines = fp.readlines()
try:
line = lines[0]
line = line.replace('"', '').rstrip().split(',')
# assume that the first column is time or others that can be cast to float
_ = float(line[0])
except:
return False
return True | c92e04bf4045e113e7764b441630517d4ce727b6 | 37,785 |
def convertir_negativo(imagen: list) -> list:
""" Transformar a Negativo (Matriz de Listas)
Parámetros:
imagen (list): Matriz que representa la imagen
Retorno:
list: Matriz que representa la imagen convertida a negativo
"""
alto = len(imagen)
ancho = len(imagen[0])
for i in range(alto):
for j in range(ancho):
for k in range(3):
nuevo = abs(imagen[i][j][k] - 1)
imagen[i][j][k] = nuevo
return imagen | aa9d23b62648d67d8247a1bc117f2516f0b3c44a | 37,786 |
def mult2_pas3(nb):
"""fonction qui indique si un nombre nb passé en paramètre est multiple de 2 et pas de 3.
en entrée : nb - 1 nombre - le nombre dont on cherche à savoir s'il est divisible par 2 et pas par 3.
résultat : un booléan = vrai si nb est multiple de 2 mais pas de 3."""
reste2 = (nb % 2)
reste3 = (nb % 3)
if ((reste2 == '0') and (reste3!='0')):
res = True
else:
res = False
return res | afbffa38290cb2c270a0ca03de7eafaa44885fac | 37,787 |
from typing import List
import re
def extract_cve_references(text: str) -> List[str]:
"""
Extract CVE identifiers
"""
return [result.group(0) for result in re.finditer(r"CVE-\d{4}-\d{4,8}", text)] | 8d32525bc04077a418e3f9fd334ab4e8dd905376 | 37,789 |
import math
def ordinal(n):
"""Output the ordinal representation ("1st", "2nd", "3rd", etc.) of any number."""
# https://stackoverflow.com/a/20007730/404321
suffix = "tsnrhtdd"[(math.floor(n / 10) % 10 != 1) * (n % 10 < 4) * n % 10::4]
return f'{n}{suffix}' | fa7f5cadd1c684e048a1bf1557014e37ad69c930 | 37,790 |
def parse_string(string, *, remove):
"""
Return a parsed string
Args:
:string: (str) string to parse
:remove: (list) characters to remove
Returns:
:parsed_string: (str) parsed string
"""
parsed_string = string
for char in remove:
parsed_string = parsed_string.replace(char, '')
return parsed_string | 641feb61ff5ad918fefe14b618863e4b5164ddbf | 37,791 |
import sys
import os
def win32_longpath(path):
""" Helper function to add the long path prefix for Windows, so that shutil.copytree
won't fail while working with paths with 255+ chars.
Vendored in from pytest-datadir – q.v. https://git.io/fjMWl supra.
"""
if sys.platform == 'win32':
# The use of os.path.normpath here is necessary since "the "\\?\" prefix to a path string
# tells the Windows APIs to disable all string parsing and to send the string that follows
# it straight to the file system".
# (See https://docs.microsoft.com/pt-br/windows/desktop/FileIO/naming-a-file)
return '\\\\?\\' + os.path.normpath(path)
else:
return path | 54935d34acf58fee684e6c80cfb68563a1921b40 | 37,792 |
import json
def process(json_mos_path, json_dmos_path, json_train_path, json_val_path):
"""
将外部数据格式转为censeo指定输入的json格式
:param json_mos_path:
:param json_dmos_path:
:param json_train_path:
:param json_val_path:
:return:
"""
with open(json_mos_path, 'r') as fin:
data_mos = json.load(fin)
with open(json_dmos_path, 'r') as fin:
data_dmos = json.load(fin)
results_train = {}
results_val = {}
train_dis_vnames = data_dmos["train"]["dis"]
train_ref_vnames = data_dmos["train"]["ref"]
test_dis_vnames = data_dmos["test"]["dis"]
test_ref_vnames = data_dmos["test"]["ref"]
for i, vname in enumerate(train_dis_vnames):
vname_mp4 = vname.split(".")[0] + ".mp4"
vname_ref_mp4 = train_ref_vnames[i].split(".")[0] + ".mp4"
mos_i = data_mos["train"]["dis"].index(vname)
info_r = {'ref_video': vname_ref_mp4, 'ref_dir': '',
'dis_dir': '', 'mos': float(data_mos["train"]["mos"][mos_i]),
'mos_dv': float(data_dmos["train"]["mos"][i]), 'std': None, "mos_list": None}
results_train[vname_mp4] = info_r
for i, vname in enumerate(test_dis_vnames):
vname_mp4 = vname.split(".")[0] + ".mp4"
vname_ref_mp4 = test_ref_vnames[i].split(".")[0] + ".mp4"
mos_i = data_mos["test"]["dis"].index(vname)
info_r = {'ref_video': vname_ref_mp4, 'ref_dir': '',
'dis_dir': '', 'mos': float(data_mos["test"]["mos"][mos_i]),
'mos_dv': float(data_dmos["test"]["mos"][i]), 'std': None, "mos_list": None}
results_val[vname_mp4] = info_r
print('total train video:%d' % len(results_train))
with open(json_train_path, 'w') as fin:
json.dump(results_train, fin)
print('total val video:%d' % len(results_val))
with open(json_val_path, 'w') as fin:
json.dump(results_val, fin)
results_train.update(results_val)
return results_train | 38a324f8d1acd734accfe8f06a1f7caf0ca16421 | 37,793 |
def build_message(history_record, game):
"""Builds the endpoint message from the game and current history record"""
msg = ""
if history_record[0].result == 'Good guess':
msg += "Good guess! | "
else:
msg += "Wrong guess... | "
msg += game.current_game + " | "
msg += "Strike(s) left: " + str(game.strikes_left) + " | "
if game.game_over is False:
msg += "Carry on!"
if game.game_over is True:
if game.game_won is True:
msg += "YOU WON!"
else:
msg += "YOU LOST!"
return msg | ff3c9dd193de67f1879a4ffe3e9f859993bdef6f | 37,794 |
def decompose_two(p):
"""Decomposes p into p - 1 = q * 2^s
Args:
p: an integer representing a prime number
Results:
p: an integer representing q above
k: an integer representing the exponent of 2
"""
k = 0
while p % 2 == 0:
k += 1
p //= 2
return p, k | 19934989b53707c836c1fb6864365c1828800229 | 37,795 |
def successor_postorder(bst, node):
"""
Cases:
1. If node is the right child of its parent, then successor is the parent
2. If node is the left child of its parent, successor would be in right subtree of the parent. It is reahced
by traversing left childs successively and if there is no left taking right and repeating.
If there is no right child, parent is the successor.
"""
if node.parent is None:
return None
y = node.parent
if y.right == None or y.right == node:
return y
y = y.right
while y is not None:
node = y
y = y.left if y.left is not None else y.right
return node | 92f4101eb971f177796df0816ff729b2bfda4700 | 37,797 |
import pickle
def adapter_func(obj):
"""Convert from in-memory to storage representation.
"""
print('adapter_func({})\n'.format(obj))
return pickle.dumps(obj) | 63f717629462094aca1b14f6c733901f8cfe7768 | 37,798 |
def bijection(x, ensemble_entree, ensemble_sortie):
"""Renvoie la valeur de f(x) par la bijection de l'ensemble_entree
et l'ensemble_sortie en gardant la même distance aux bornes.
Exemple: on a un segment [0,10] et
on veut la valeur de x=3 dans le segment [0,100].
Nous obtiendrons f(x)=30
"""
min1, max1 = ensemble_entree
min2, max2 = ensemble_sortie
return (x - min1) / (max1 - min1) * (max2 - min2) + min2 | adf1bfd564a4b0ae90ae5154db0b6792df986e57 | 37,799 |
def newTitle(tf):
"""asks the user about the title and returns the string"""
try:
t = input("(" + str(tf.tag.title) + ") New Title?> ")
except AttributeError:
t = input("(None) New Title?> ")
if t == None or t == "":
if tf.tag.title != None:
return tf.tag.title
else:
return None
elif t == " ":
return None
else:
return t | 8242ee48feb1f82d83811ac335e13586b3df2801 | 37,800 |
def create_node(id, shape, size, label):
"""Auxiliary function that creates a Node in Vis.js format
:return: Dict with Node attributes
"""
node = {
"id": id,
"shape": shape,
"size": size,
"label": label,
"color": {"background": "#FBD20B"},
}
return node | 007c3d1d42e981aa378664683903caecc898c7c6 | 37,801 |
from typing import Union
import torch
from typing import Tuple
from typing import Any
from typing import Optional
from typing import Dict
from typing import List
def split_non_tensors(
mixed: Union[torch.Tensor, Tuple[Any, ...]]
) -> Tuple[Tuple[torch.Tensor, ...], Optional[Dict[str, List[Any]]]]:
"""
Split a tuple into a list of tensors and the rest with information
for later reconstruction.
Usage::
x = torch.Tensor([1])
y = torch.Tensor([2])
tensors, packed_non_tensors = split_non_tensors((x, y, None, 3))
assert tensors == (x, y)
assert packed_non_tensors == {
"is_tensor": [True, True, False, False],
"objects": [None, 3],
}
recon = unpack_non_tensors(tensors, packed_non_tensors)
assert recon == (x, y, None, 3)
"""
if isinstance(mixed, torch.Tensor):
return (mixed,), None
tensors: List[torch.Tensor] = []
packed_non_tensors: Dict[str, List[Any]] = {"is_tensor": [], "objects": []}
for o in mixed:
if isinstance(o, torch.Tensor):
packed_non_tensors["is_tensor"].append(True)
tensors.append(o)
else:
packed_non_tensors["is_tensor"].append(False)
packed_non_tensors["objects"].append(o)
return tuple(tensors), packed_non_tensors | 9b63e940bccd4e0bfabe618d2986d11f3b16ef93 | 37,804 |
def _is_bright(rgb):
"""Return whether a RGB color is bright or not.
see https://stackoverflow.com/a/3943023/1595060
"""
L = 0
for c, coeff in zip(rgb, (0.2126, 0.7152, 0.0722)):
if c <= 0.03928:
c = c / 12.92
else:
c = ((c + 0.055) / 1.055) ** 2.4
L += c * coeff
if (L + 0.05) / (0.0 + 0.05) > (1.0 + 0.05) / (L + 0.05):
return True | 1e88f719c4fc28dc07de5f380a62f568bb4fec01 | 37,805 |
import inspect
def _update_configurable_argspec(
argspec: inspect.FullArgSpec, cfg_param: str) -> inspect.FullArgSpec:
"""Return an updated :class:`FullArgSpec` for a configurable function."""
return argspec._replace(
kwonlyargs=argspec.kwonlyargs and [arg for arg in argspec.kwonlyargs if arg != cfg_param],
kwonlydefaults=argspec.kwonlydefaults and {k: v for k, v in argspec.kwonlydefaults.items()
if k != cfg_param},
annotations=argspec.annotations and {k: v for k, v in argspec.annotations.items()
if k != cfg_param}) | e1bff1eae9f23b88621086f9a82aaf9a4e17c020 | 37,807 |
def split_url(url):
"""Splits the given URL into a tuple of (protocol, host, uri)"""
proto, rest = url.split(':', 1)
rest = rest[2:].split('/', 1)
host, uri = (rest[0], rest[1]) if len(rest) == 2 else (rest[0], "")
return (proto, host, uri) | 67b09e52d2e3e321d5f1d1effb7f2ac5163e3d29 | 37,808 |
def underscorify(name):
"""Replace ``-`` and ``/`` with ``_``."""
return name.replace("-", "_").replace("/", "_") | a5dcddb62119ea5863522cdffcbc44dc7381a0d6 | 37,809 |
import re
def is_date_index(index):
"""
Checks whether the index is of the agreed upon date format.
In this case YYYY.MM.DD. This is a very 'EU' centric date.
Would have preferred YYYY-MM-DD which is more ISO, however
there are dates which exist in the 'EU' format already (topbeat).
Note that match is very fast. It is faster than doing search and
pulling groups. Using this method helps decouple using a single
regex pattern for all things. While using this method along with
get_date_from_index() below is 2n, we can ensure cognitive load
is low - which is a secondary goal of this repo.
:param index:
:return:
"""
date_pattern = '.*?-\d{4}[-/:.]\d{2}[-/:.]\d{2}'
match = re.match(date_pattern, index)
if match is not None:
return True
return False | b53bb6a58350d6ada8e5fce91c6191296bb93c15 | 37,811 |
import os
def fix_join(path, *paths):
"""Fix joined path.
This workaround function is used in pipelines like DWIPreprocessing* or PETVolume. In the workflow.connect part,
you can use some function that are used as string, causing an import error
"""
return os.path.join(path, *paths) | 55e70860e6b14112553a52351327f6fff3f7aa13 | 37,812 |
from typing import TextIO
from typing import Dict
def parse_percepta_txt_output(
result_file: TextIO, offset: int = 0
) -> Dict[str, Dict[str, str]]:
"""
Parses text output file from perceptabat_cv.
Returns a nested dictionary {compound ID: {property name: value}}.
"""
parsed_output = {}
for line in result_file:
if line.split()[0].isdigit() and line.split()[1] != "ID:":
cp_id = str(int(line.split()[0]) + offset)
col_name = line.split()[1].rstrip(":").lower()
value = line.split(": ")[1].rstrip("\n")
if not cp_id in parsed_output:
parsed_output[cp_id] = {}
parsed_output[cp_id][col_name] = value
else:
continue
return parsed_output | 030af92661e38c7f8e84164639c2bb82da71485c | 37,813 |
def mapValue(value, minValue, maxValue, minResultValue, maxResultValue):
"""
Maps value from a given source range, i.e., (minValue, maxValue),
to a new destination range, i.e., (minResultValue, maxResultValue).
The result will be converted to the result data type (int, or float).
"""
# check if value is within the specified range
if value < minValue or value > maxValue:
raise ValueError("value, " + str(value) + ", is outside the specified range, " \
+ str(minValue) + " to " + str(maxValue) + ".")
# we are OK, so let's map
value = float(value) # ensure we are using float (for accuracy)
normal = (value - minValue) / (maxValue - minValue) # normalize source value
# map to destination range
result = normal * (maxResultValue - minResultValue) + minResultValue
destinationType = type(minResultValue) # find expected result data type
result = destinationType(result) # and apply it
return result | 339268c09e99db9294dc1c30871b6c03f9fca776 | 37,814 |
def opt_bool(opt):
""" Convert bool ini strings to actual boolean values
"""
return opt.lower() in ['yes', 'y', 'true', '1'] | 9f321363cb96b08a122c9437c028cbe142de5881 | 37,815 |
import sys
import platform
def suffix():
"""
add a suffix to a shortcut name = python version + architecture
@return string
"""
ver = ".".join(str(_) for _ in sys.version_info[:2])
arc = platform.architecture()[0]
return "{0}.{1}".format(arc, ver) | b4ef27637339ca182c9f72c802b75fccf9c47910 | 37,817 |
def star(cplx, face):
"""Compute the star over a given face. Works with toplexes (it will return toplexes only, then) or full complexes."""
return [s for s in cplx if set(face).issubset(s)] | 541cf9b2ab4b28d2f78c03943e5ce5e509724794 | 37,819 |
import torch
def compose_transformations(trans_01, trans_12):
"""
Functions that composes two homogeneous transformations.
"""
if not trans_01.dim() in (2, 3) and trans_01.shape[-2:] == (4, 4):
raise ValueError("Input trans_01 must be a of the shape Nx4x4 or 4x4."
" Got {}".format(trans_01.shape))
if not trans_12.dim() in (2, 3) and trans_12.shape[-2:] == (4, 4):
raise ValueError("Input trans_12 must be a of the shape Nx4x4 or 4x4."
" Got {}".format(trans_12.shape))
if not trans_01.dim() == trans_12.dim():
raise ValueError("Input number of dims must match. Got {} and {}"
.format(trans_01.dim(), trans_12.dim()))
# unpack input data
rmat_01 = trans_01[..., :3, :3] # Nx3x3
rmat_12 = trans_12[..., :3, :3] # Nx3x3
tvec_01 = trans_01[..., :3, -1:] # Nx3x1
tvec_12 = trans_12[..., :3, -1:] # Nx3x1
# compute the actual transforms composition
rmat_02 = torch.matmul(rmat_01, rmat_12)
tvec_02 = torch.matmul(rmat_01, tvec_12) + tvec_01
# pack output tensor
trans_02 = torch.zeros_like(trans_01)
trans_02[..., :3, 0:3] += rmat_02
trans_02[..., :3, -1:] += tvec_02
trans_02[..., -1, -1:] += 1.0
return trans_02 | 58ab87dff56a03c59a3aae4ce9a77bc556d2abe6 | 37,821 |
def FormatEssentialDeviceInfoInExcelFormat(device, includeBatteryInfo):
"""
Formats given device information into one string for copying.\n
It contains device info as follows:\n
Device manufacturer + market name\tOS version\tCPU SoC\tGPU renderer\tGLES version\tdevice id\n
'Samsung Galaxy S5'\t '6.0.1'\t 'Snapdragon 801 MSM8974AC'\t 'Adreno 420'\t 'GLES 3.0'\t '12345deviceId'
"""
deviceInfo = device.GetFullDeviceData()
output = ""
output += deviceInfo["manufa"] + " " + deviceInfo["market_name"] + "\t"
output += deviceInfo["os"] + "\t"
output += deviceInfo["cpu_soc"] + "\t"
output += deviceInfo["gpu_renderer"] + "\t"
output += deviceInfo["gpu_gles"] + "\t"
output += deviceInfo["serial"]
if includeBatteryInfo:
output += "\t" + deviceInfo["battery_level"]
output += "\t" + deviceInfo["battery_temp"]
output += "\n"
return output | 754c3d02b222cbae61fcc53354ad2e6f2d19a783 | 37,823 |
import sys
import os
def get_soname_path(libpath, lib_dir):
"""
if name contains more than one number after .so (.so.25.0.0)
it should be shortened (.so.25)
"""
if sys.platform in ("win32", "cygwin"):
return libpath
so_index = libpath.find(".so")
libpath = libpath[:libpath.find(".", so_index + 4)]
return os.path.join(lib_dir, libpath) | 3ca0505091531f94df2a40d46401d416b13644ad | 37,824 |
def read_length(data):
"""Read length from a list of bytes, starting at the first byte.
Returns the length, plus the number of bytes read from the list.
EMV 4.3 Book 3 Annex B2
"""
i = 0
length = data[i]
i += 1
if length & 0x80:
length_bytes_count = length & 0x7F
length = 0
for j in range(length_bytes_count):
length = (length << 8) + data[i + j]
i += length_bytes_count
return length, i | 79ebf734ff863a567727ef0469e37f74327ee6e0 | 37,825 |
def negative_exist(arr: list) -> int:
"""
>>> negative_exist([-2,-8,-9])
-2
>>> [negative_exist(arr) for arr in test_data]
[-2, 0, 0, 0, 0]
"""
arr = arr or [0]
max = arr[0]
for i in arr:
if i >= 0:
return 0
elif max <= i:
max = i
return max | dfa59e9baaa2f4d23bc9d07bec8c3153848a632d | 37,826 |
def getcenter(f, a):
""" Get the center (in relative coords) of the axes)."""
pos = a.position.Copy()
# make relative
relative = pos._GetFractionals()
for i in range(4):
if not relative[i]:
pos[i] = pos[i] / f.position[i%2+2]
if pos[i]<0:
pos[i] = 1.0 - pos[i]
return pos.x + pos.w/2.0, pos.y + pos.h/2.0 | aaea2c4a014863df05ae5985dda1e4e1cecdf543 | 37,827 |
def parse_range(s):
"""Parse a string "a-b" describing a range of integers a <= x <= b, returning the bounds a, b."""
return tuple(map(int, s.split("-"))) | b17314dc729bec8130384a71ee10cf32e60da9c1 | 37,828 |
def get_page_id(title, query_results):
"""
Extracts the title's pageid from the query results.
Assumes queries of the form query:pages:id,
and properly handle the normalized method.
Returns -1 if it cannot find the page id
"""
if 'normalized' in query_results['query'].keys():
for normalized in query_results['query']['normalized']:
if title == normalized['from']:
title = normalized['to']
for page in query_results['query']['pages']:
if title == query_results['query']['pages'][page]['title']:
return str(query_results['query']['pages'][page]['pageid'])
return str(-1) | 8ddce8c95b4a312b7478dc53d5b3a7fb53bba39e | 37,830 |
def get_gt(variant_call):
"""Returns the genotypes of the VariantCall.
Args:
variant_call: VariantCall proto. The VariantCall for which to return GTs.
Returns:
A list of ints representing the genotype indices of this call.
"""
return variant_call.genotype | 865c1954d24c43ee5545dc0f477e66eba8c22a46 | 37,831 |
def as_single(apply_fun):
"""
Turn (N,DX) -> (N,1) fun into a (DX,) -> () so that we can take grads and
vmap later.
"""
return lambda params, rng, x: apply_fun(params, rng, x[None])[0, 0] | e122d1747ba381901be7aa9b548587a5254492a7 | 37,832 |
def read_date(head_data):
"""
当前数据所对应的时间
:return:返回一个包含时间信息的数组
"""
time_data = []
try:
for i in range(6):
time_data.append(head_data[i])
return time_data
except TypeError:
print("没有找到数据头") | 60f53f8fbefe7c517a9a3385721a46956f15bfab | 37,833 |
def price_to_earnings(price, eps):
"""Computes price to earnings ratio.
Parameters
----------
price : int or float
Price per share
eps : int or float
Earnings per share, calculated by "net income / number of shares"
Returns
-------
out : int or float
Returns price to earnings ratio
"""
return price / eps | 1e355aa17a71c03cd16f1a4df855744fdab765c4 | 37,834 |
def stone_to_tex(stone, point):
"""Convert stone at the given point to psgo TeX code."""
line = ' ' * 8
line += r"\stone"
if stone.label:
line += r"[\marklb{" + stone.label + r"}]"
line += r"{" + stone.colour + r"}{"
p_x, p_y = point
# no 'i', should jump to 'j'!
if p_x >= ord('i') - ord('a'):
p_x += 1
line += chr(ord('a') + p_x)
line += r"}{"
line += str(p_y + 1)
line += r"}"
return line | 8dda52cd53865816403ca9910991d70b33917954 | 37,835 |
def celsius_to_fahrenheit(temp):
"""
From Celsius (ºC) to Fahrenheit (ºF)
"""
return temp*9/5 + 32 | 23764794a58e1109698232daae672f2c7564c61f | 37,837 |
def find_video_attachments(document_attachments):
"""This function identifies any attached videos in a collection of document attachments.
:param document_attachments: Attachments associated with a document
:type document_attachments: list, dict
:returns: A list of dictionaries containing info on any video attachments
"""
if isinstance(document_attachments, dict):
document_attachments = [document_attachments]
video_info_list = []
for collection in document_attachments:
if "video" in collection['contentType']:
size = round(collection['size']/1048576, 2)
video_info_list.append({"download_url": collection['url'], "size": size})
return video_info_list | 471e0366279711784d0b628cbd38527e5f15c836 | 37,839 |
def getBytesSize(bytesIn=0, suffix="B"):
"""
Scale bytes to its proper format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
bytesValue = bytesIn
if bytesValue is None or 0:
return int(0)
elif (isinstance(bytesValue, int) or isinstance(bytesValue, float)) and (int(bytesValue) > 0):
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytesValue < factor:
return f"{bytesValue:.2f}{unit}{suffix}"
bytesValue /= factor
return bytesValue
return int(0) | 1e6e2ad83a13ddc0cd35f82925f7617c09e229dc | 37,841 |
def _CreateChartStats(loss_stats):
"""Creates the Chart object to interface with Google Charts drawChart method.
https://developers.google.com/chart/interactive/docs/reference#google.visualization.drawchart
Args:
loss_stats: A dictionary of years paired to square meters.
Returns:
A Python dictionary with all of the parameters required to draw a Chart.
"""
chart_data = {
'type': 'area',
'options': {},
}
columns = [
{'id': 'name', 'label': 'Year', 'type': 'string'},
{'id': 'year', 'label': 'Loss (sq. km)', 'type': 'number'}]
rows = []
for loss_year in sorted(loss_stats.keys()):
entry = {
# The loss stats are in m2; convert them to km2.
'c': [{'v': loss_year}, {'v': loss_stats[loss_year]/1000/1000}]
}
rows.append(entry)
chart_data['data'] = {'cols': columns, 'rows': rows}
return chart_data | 384623ad5cd3c3ee81cf25c00402bd8d65396f59 | 37,842 |
def stat_parente(v_alea, d_obs):
"""Calcul la probabilité d'observer une distance moyenne équivalente à `d_obs` sous l'hypothèse du hasard.
Cette fonction retourne la probabilité d'observer une distance moyenne équivalente à `d_obs` sous l'hypothèse du hasard.
Elle prend en argument le nombre minimal d'inversions néecessaires pour ordonner la séquence de gène (`d_obs`), et une liste de
taille `l` qui contient le nombre minimal d'inversions néecessaires pour ordonner `l` séquences aléatoires de taille équivalente
à la séquence d'intérêt.
Parameters
----------
v_alea : list
`v_alea` est une liste d'entiers.
d_obs : int
`d_obs` nombre minimal d'inversions néecessaires pour ordonner la séquence de gène.
Returns
-------
prob : list
`prob` est une liste contenant le nombre minimal d'inversion obtenu pour les `n`
répétitions.
Notes
-----
La statistique retournée correspond à une proportion telle que :
.. math::
P = \\frac{X}{N}
Où X est le nombre d'inversions aléatoires inférieures à l'observation, et N le nombre total de simulations éffectuées qui
correspond à la taille de `v_alea`
Examples
--------
>>> v = [1,2,3,4,5]
>>> d = 4
>>> stat_parente(v,d)
0.6
"""
e = 0
for res_alea in v_alea:
if res_alea < d_obs:
e += 1
prob = e/len(v_alea)
return prob | 37027788bc738473398861f6a7494e0eff75b919 | 37,843 |
def extract_channel_platform(url):
"""Returns last two elements in URL: (channel/platform-arch)
"""
parts = [x for x in url.split('/')]
result = '/'.join(parts[-2:])
return result | 73c71ed879f07e8eedf2730db174e1cb81177276 | 37,844 |
def is_same_class(obj, a_class):
"""return true if obj is the exact class a_class, otherwise false"""
return (type(obj) == a_class) | 029b12b101b53cc960f72994ae082df5f0fd9d0e | 37,845 |
def first_part(txt):
"""First logical part for password."""
return txt[0].upper() | ba8b3fd3fd22f8b75117ef2ec8fee3bdc17faeba | 37,846 |
def simplify(s):
"""Simplify a split square:"""
# START SOLUTION
# Base case: already a filled square, cannot be simplified
if type(s) == int:
return s
# It's a split square.
#
# Recursively simplify each square in split square.
# Recurse before simplify a split square, so that we can
# simplify things "coming up" --- this catches the case of
# ``[1, 1, 1, [1, 1, 1, 1]]`` => ``1``
# and other times when need to "simplify twice".
s = [simplify(s) for s in s]
# Simplify a split square if possible
if type(s[0]) == int and s[0] == s[1] == s[2] == s[3]:
return s[0]
else:
return s
# END SOLUTION | a7d4151bfd3e1fddfdc503765db61f7b3b57ebba | 37,847 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.