content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def SumCoeffsOverSet(summing_set, A):
"""Returns the sum of coefficients corresponding to the summing set."""
return sum(A[i - 1] for i in summing_set)
|
2fa61493d4abd07cf24670a56d4a56d08b5a602b
| 14,479
|
def camel_split(string):
# test: (str) -> str
"""
>>> print('(%s)' % ', '.join("'%s'" % s for s in camel_split('theBirdsAndTheBees')))
('the', 'Birds', 'And', 'The', 'Bees')
>>> print('(%s)' % ', '.join("'%s'" % s for s in camel_split('theBirdsAndTheBees123')))
('the', 'Birds', 'And', 'The', 'Bees', '123')
>>> print('(%s)' % ', '.join("'%s'" % s for s in camel_split('theBirdsAndTheBeesABC123')))
('the', 'Birds', 'And', 'The', 'Bees', 'ABC', '123')
>>> print('(%s)' % ', '.join("'%s'" % s for s in camel_split('the-Birds-And-The-Bees-ABC--123')))
('the', '-', 'Birds', '-', 'And', '-', 'The', '-', 'Bees', '-', 'ABC', '--', '123')
>>> print('(%s)' % ', '.join("'%s'" % s for s in camel_split('THEBirdsAndTheBees')))
('THE', 'Birds', 'And', 'The', 'Bees')
"""
words = []
character_type = None
acronym = False
for s in string:
if s in '0123456789':
if character_type == 0:
words[-1].append(s)
else:
words.append([s])
character_type = 0
acronym = False
elif s in 'abcdefghijklmnopqrstuvwxyz':
if character_type == 1:
words[-1].append(s)
elif character_type == 2:
if acronym:
words.append([words[-1].pop()] + [s])
else:
words[-1].append(s)
else:
words.append([s])
character_type = 1
acronym = False
elif s in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
if character_type == 2:
words[-1].append(s)
acronym = True
else:
words.append([s])
acronym = False
character_type = 2
else:
if character_type == 3:
words[-1].append(s)
else:
words.append([s])
character_type = 3
return tuple(
''.join(w) for w in words
)
|
f7d0d27e25a26b6e4135bb6d83dc35f5d1cd2374
| 14,480
|
def snake_to_camel(snake_case_string: str) -> str:
"""
Takes in a `snake_case` string and returns a `camelCase` string.
:params str snake_case_string: The snake_case string to be converted
into camelCase.
:returns: camelCase string
:rtype: str
"""
initial, *temp = snake_case_string.split("_")
return "".join([initial.lower(), *map(str.title, temp)])
|
4ef8fa72580739dbedfbac5bf9f95247f5ea69c3
| 14,481
|
def formatsGenres(genres_list):
""" Formats Genres for Web app template. """
genres = ""
try:
for item in genres_list:
print(item.genre_type)
except:
genres = "Unknown"
else:
for genre in genres_list:
print(genre, genres_list[-1])
if len(genres_list) == 1 or genre == genres_list[-2]:
genres = genre.genre_type
elif genre != genres_list[-2] and genre != genres_list[-1]:
genres += genre.genre_type + ", "
elif genre == genres_list[-1]:
genres += " & " + genre.genre_type
return genres
|
e9ba9a9388ba7b19c97f1f9959c3c828d1a15219
| 14,484
|
def parse_int_list(text):
"""Parse a string into a list of integers
For example, the string "1,2,3,4" will be parsed to [1, 2, 3, 4].
Parameters
----------
text : str
String to parse
Returns
-------
List[int]
Parsed integer list
"""
result = [int(i) for i in text.split(',')]
return result
|
f83b21c9038e5ea8eb2c0d53ad94479cab9258f3
| 14,485
|
def to_python(value):
"""Converts Whispy Lispy types to Python types """
return value.values[0]
|
1ffd36443e04b8aeb823e8eb932e4abf888975a2
| 14,487
|
def call_api(query, service):
""" calls api and returns result """
result = service.data().ga().get(**query).execute()
return result
|
9816a2e4a0e38d25a376eb43325bf51982a921ce
| 14,488
|
def build_popularity_dict(game):
"""Return a Dict of how proportionally popular each player count is"""
popularity = {}
for x in game.player_suggestions:
if (
x.numeric_player_count >= game.min_players and
x.numeric_player_count <= game.max_players
):
positive = x.data()['best'] + x.data()['recommended']
total = x.data()['not_recommended'] + positive
if total == 0:
popularity[x.numeric_player_count] = 1.0
else:
popularity[x.numeric_player_count] = positive / total
return popularity
|
9718fca6f6ccb76f378e088c2aa2e16387f2383a
| 14,489
|
def to_python(path):
"""Translates a `Path` object to a string of the form `MontyCarlo.module1`.
"""
res = path.name
while path != path.parent:
path = path.parent
res = path.name + "." + res
res = res[1:][11:]
return res + ".*" if res != "" else "*"
|
24b7a669e0fc1e40113044f2da79e29d42f04c81
| 14,491
|
def cal_error(array, typeb = 0):
"""Calculate all errors of the input array.
Args:
array (numpy.array): The data measured directly for a single
physical datus. We use the mean value as the reliable
measurement for that and get type-A error meanwhile.
typeb (float): The type-B error collected directly from the
instrucments.
Returns:
mean (float): The mean value of the array.
delta_a (float): The type-A error of the array.
error (float): The merged error of type-A and type-B
"""
size = array.size
mean = array.mean()
std = array.std(ddof = 1)
params = {
3: 2.48,
4: 1.59,
5: 1.204,
6: 1.05,
7: 0.926,
8: 0.834,
9: 0.770,
10: 0.715
}
delta_a = std * params[size]
if typeb == 0:
return mean, delta_a
deltb_b = typeb
error = (delta_a**2 + deltb_b**2) ** 0.5
return mean, delta_a, deltb_b
|
f0fc8832f730189e4f2a0597e1342f94ed2d8717
| 14,492
|
def navigation(obj):
"""
Goes through json file and shows its structure
"""
if isinstance(obj,dict):
print()
print("This object is dictionary")
keys = list(obj.keys())
print()
print(keys)
print()
user_choice = input("Here are keys of this dictionary. \
Please enter name of key you want to see: ")
next_element = obj[user_choice]
elif isinstance(obj,list):
print()
print("This object is list.")
print()
user_choice = input('This list consists of '+str(len(obj))+' elements. \
Please enter number from 0 to '+str(len(obj)-1)+' \
to choose number of element you want to display: ')
next_element = obj[int(user_choice)]
else:
print()
user_choice = ''
if isinstance(obj,str):
user_choice = input('This object is a string. Do you want to display it?\
(Enter yes or no): ')
elif isinstance(obj,bool):
user_choice = input('This object is a boolean. Do you want to display it?\
(Enter yes or no): ')
elif isinstance(obj,int):
user_choice = input('This object is a integer. Do you want to display it?\
(Enter yes or no): ')
elif isinstance(obj,float):
user_choice = input('This object is a float number. Do you want to display it?\
(Enter yes or no): ')
else:
print(obj)
if user_choice == 'yes':
print(obj)
print()
print('This is the end of the file.')
return 0
return navigation(next_element)
|
a2b3b556b38082ba06e6f3d1b3ce79d9f0c4de3e
| 14,493
|
def device_mapper(os_type: str, proto: str = "netmiko"):
"""
map an os type to a netmiko device_type
:params os_type: type str
:params proto: type str, default "netmiko"
:returns: device_type string
"""
if proto == "netmiko":
device_types = {
"ios": "cisco_ios",
"iosxr": "cisco_xr",
"iosxe": "cisco_xe",
"nxos": "cisco_nxos",
"eos": "arista_eos",
}
try:
result = device_types[os_type]
except KeyError:
return os_type
elif proto == "netconf":
device_types = {
"csr": "csr",
"iosxr": "iosxr",
"iosxe": "iosxe",
"nxos": "nexus",
"junos": "junos",
}
try:
result = device_types[os_type]
except KeyError:
return "default"
else:
result = os_type
return result
|
81ad4c4dd86c7e6930cf0fb070681872783a5fb8
| 14,495
|
def strip_to_category(category):
"""Strip prefix and postfix from category link.
Parameters
----------
category : str
Returns
-------
stripped_category : str
String with stripped category
"""
if category.startswith('[[Kategori:'):
category = category[11:-2]
elif category.startswith('Kategori:'):
category = category[9:]
return category.split('|')[0]
|
d4a274757aed9b3fbe2e9c5c187949a19b53e3ad
| 14,496
|
import struct
def read_rom_address_list(rom, offset, count):
"""Reads a list of addresses in Wolf3D's weird storage method.
01 35 03 CC 00 -> 0xc0335
"""
rom.seek(offset)
offsets = []
for x in range(count):
zb = rom.read_ubyte()
assert 1 <= zb <= 3, 'zb is {}'.format(zb)
zb -= 1
address = b'\00' * zb + rom.read(4 - zb)
offsets.append(struct.unpack('<I', address)[0] - 0xc00000)
return offsets
|
4e9b0fff0d64a03901d4731c8165bb34fd98e8b5
| 14,497
|
import os
from unittest.mock import patch
def no_file(filename):
"""Context manager that disallows access to a file."""
def side_effect(filename_):
"""Return False for the specified file."""
if filename_ == filename:
return False
return isfile_original(filename_)
# Store the original `os.path.isfile` function, for mocking.
isfile_original = os.path.isfile
patcher = patch('os.path.isfile', side_effect=side_effect)
patcher.start()
yield
patcher.stop()
|
029d087266d08a5aaa72006a80b841339168596c
| 14,499
|
def clean_for_doc(nb):
"""
Cleans the notebook to be suitable for inclusion in the docs.
"""
new_cells = []
for cell in nb.worksheets[0].cells:
# Remove the pylab inline line cells.
if "input" in cell and \
cell["input"].strip().startswith("%pylab inline"):
continue
# Make sure all cells are padded at the top and bottom.
if "source" in cell:
cell["source"] = "\n" + cell["source"].strip() + "\n\n"
# Remove output resulting from the stream/trace method chaining.
if "outputs" in cell:
outputs = [_i for _i in cell["outputs"] if "text" not in _i or
not _i["text"].startswith("<obspy.core")]
cell["outputs"] = outputs
new_cells.append(cell)
nb.worksheets[0].cells = new_cells
return nb
|
2cec552f8c2d2ae4437f7460db4f8bd4919c9f65
| 14,500
|
import argparse
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Make a custom GTF file.")
parser.add_argument("-i", "--input", help="Input GTF file.", required=True)
parser.add_argument("-o", "--output", help="Output GTF file.", required=True)
return parser.parse_args()
|
8d6603bc932b66df485fa2c3af6a35f267a41b6a
| 14,502
|
from typing import Union
from typing import Dict
def _resource_dict(mem: Union[str, int], cpu: Union[str, int]) -> Dict[str, str]:
"""
Private helper function to create a resource dictionary for deployments. Currently only supports the creation
of the requests/limits directory that is needed for a V1ResoruceRequirements object.
@param mem: Memory Request/Limit for a Container's ResoruceRequirement
@type mem: str
@param cpu: CPU Request/Limit for a Container's ResourceRequirement.
@type cpu: int
@return:
@rtype:
"""
return {'memory': f'{mem}', 'cpu': f'{cpu}'}
|
dc00bcba7442f655e0f56e6390b1675078ca9e4f
| 14,503
|
def is_hexstring(string):
"""
Determines if a string is a hexstring.
:param Union[ByteString, str] string: A string.
:return: Whether the string's length is even and all of its characters
are in [0-9a-fA-f].
"""
if isinstance(string, str):
string = string.encode()
return not len(string) % 2 and all(
0x30 <= c <= 0x39 or 0x61 <= c <= 0x66 for c in string.lower()
)
|
7263cbbb464d805b6e5f0142a2ff6772894c4837
| 14,504
|
def min_ind_of_anch(anchor_info):
""" Finds the index of min ID. """
anch_id = list(anchor_info.keys())
min_id_ind = anch_id.index(min(anch_id))
return min_id_ind
|
89e77679a21f36016174cf8a52ec4b210e1ad295
| 14,506
|
def pkcs5_pad(data, block_size):
"""使用 RFC 2898 描述的 PKCS #5 算法补齐。
许多对称加密算法会要求要加密的信息长度需要是多少字节(例如 8
字节)的整倍数,因此需要有某种算法对不符合要求的信息进行补齐。
Args:
data (bytes): 需要补齐的报文。
block_size (int): 块长度。
Returns:
bytes: 补齐后的信息。
"""
if not isinstance(data, bytes):
raise ValueError('first argument sould be bytes')
pad_len = block_size - len(data) % block_size # length of padding
padding = chr(pad_len).encode('ascii') * pad_len # PKCS5 padding content
data += padding
return data
|
3d4e941e844bf4be2bcc9b9318a3472c4b8bbe8e
| 14,507
|
def name_from_id(id):
"""Hash the id into a run name
Construct the name of the run from the id dictionary
Args:
id (dict): id associated to the run
Returns:
str: name of the run associated to the dictionary ``id``
"""
keys = list(id.keys())
keys.sort()
name = ''
for k in keys:
name += k + ':' + str(id[k]) + ','
return name.rstrip(',')
|
ad8eeed94e7f22e96c197753ed59652dcbfcda8e
| 14,508
|
def calculate_clinker(annual_production, clinker_cement_ratio):
"""
Calculate annual clinker production based on given cement/clinker ratio
:param annual_production: Reported annual production
:param clinker_cement_ratio: Amount of clinker produced per cement output
:return: Clinker per year
"""
return annual_production * clinker_cement_ratio
|
f08f4757ee27c2f4e54fd7acbbdaea398bee1a6e
| 14,509
|
def make_generator_instance(int_model_def):
"""Returns instance of a <Model>Run.
"""
return int_model_def.RunClass(int_model_def)
|
7acd0def22a92b6ea5a997a5f0d90e1ab06218af
| 14,510
|
def accuracy(task_preds, task_targets):
"""Computes the accuracy of a given task.
:param task_preds: Predicted labels.
:param task_targets: Ground-truth targets.
:return: a float metric between 0 and 1.
"""
assert task_preds.size > 0
assert task_targets.size > 0
assert task_targets.size == task_preds.size, f"{task_targets.size} vs {task_preds.size}"
metric = (task_preds == task_targets).mean()
assert 0. <= metric <= 1.0, metric
return metric
|
ed8bdef02253c952213b87ee39a86505315e4077
| 14,511
|
import struct
def b2f(b):
"""
32bit int to float
"""
return struct.unpack('f', struct.pack('I', b))[0]
|
d14dbc6182edb9947e4a5416114906a9c09a5751
| 14,514
|
def strip(table, col):
"""Removes column col from the given table
Preconditions: table is a (non-ragged) 2d List,
col valid column"""
n_row = len(table)
n_col = len(table[0])
assert col < n_col, repr(col) + "要删除的列大于总列数!"
for row in range(n_row):
table[row] = table[row][:col]+table[row][col+1:]
# print(table)
return table
|
09171295b7ed46d12188eb8c882a60f5fce80647
| 14,516
|
def _parse_aggregation_feat(aggregating_in, features):
"""Parse aggregation information and format in a correct standard way.
Parameters
----------
aggregating_in: tuple
the information for aggregating features
features: pst.BaseFeatures
the features we want to aggregate.
Returns
-------
agg_f_ret: function
the aggregation function for the retriever part.
desc_in: pst.BaseDescriptorModel
the descriptormodel to compute the aggregated features.
pars_feat_in: dict
the parameters of the featuresmanager to compute the aggregate
features.
pars_feats: dict
the parameters in order to instantiate the new aggregated features.
desc_out: pst.BaseDescriptorModel
the descriptormodel to use in the new aggregated features.
"""
assert(type(aggregating_in) == tuple)
if len(aggregating_in) == 5:
agg_f_ret, desc_in, pars_feat_in, pars_feats, desc_out = aggregating_in
elif len(aggregating_in) == 4:
agg_f_ret, desc_in, pars_feat_in, pars_feats = aggregating_in
desc_out = features.descriptormodel
elif len(aggregating_in) == 3 and type(aggregating_in[1]) == dict:
agg_f_ret, pars_feat_in, pars_feats = aggregating_in
desc_in = features.descriptormodel
desc_out = features.descriptormodel
elif len(aggregating_in) == 3 and type(aggregating_in[1]) != dict:
agg_f_ret, desc_in, desc_out = aggregating_in
pars_feat_in, pars_feats = {}, {}
else:
agg_f_ret = aggregating_in[0]
pars_feat_in, pars_feats = {}, {}
desc_in = features.descriptormodel
desc_out = features.descriptormodel
return agg_f_ret, desc_in, pars_feat_in, pars_feats, desc_out
|
e909cb032d97f5598ceb4321b44a8ecb4f0463fd
| 14,517
|
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['cache'] = 86400
desc['description'] = """This chart shows the number of VTEC phenomena and
significance combinations issued by a NWS Forecast Office for a given year.
Please note that not all current-day VTEC products were started in 2005,
some came a few years later. So numbers in 2005 are not directly
comparable to 2015. Here is a
<a href="http://www.nws.noaa.gov/os/vtec/pdfs/VTEC_explanation6.pdf">handy
chart</a> with more details on VTEC and codes used in this graphic.
"""
desc['arguments'] = [
dict(type='networkselect', name='station', network='WFO',
default='DMX', label='Select WFO:', all=True)
]
return desc
|
0aa6fe9db51641479006e2bd31a5df926bb43d06
| 14,518
|
def reinterpret_latin1_as_utf8(wrongtext):
""" :see:recode_unicode """
newbytes = wrongtext.encode('latin-1', 'replace')
return newbytes.decode('utf-8', 'replace')
|
e17e709dba7dacd4ae38c0787dafaef278610074
| 14,520
|
import os
def dir_size(path):
"""Sum all files' size in a directory
Returns
-------
int
The total size of all files in the directory.
"""
return sum(d.stat().st_size for d in os.scandir(path) if d.is_file())
|
c82ba8990cb82eb8004f14b518beb6f752796811
| 14,521
|
def _get_updated_display_names(attr_name, new_val, old_val):
"""Get difference between old and new display names data"""
new_links = set()
old_links = set()
for val in new_val:
new_links.add(val.get("display_name", ""))
for val in old_val:
old_links.add(val.get("display_name", ""))
return (
attr_name,
list(new_links - old_links),
list(old_links - new_links),
)
|
4dc2c6502758822f4ff9d416c13fcf9f925dfd23
| 14,522
|
def exc_isinstance(exc_info, expected_exception, raise_not_implemented=False):
"""
Simple helper function as an alternative to calling
`~.pytest.ExceptionInfo.errisinstance` which will take into account all
the "causing" exceptions in an exception chain.
Parameters
----------
exc_info : `pytest.ExceptionInfo` or `Exception`
The exception info as returned by `pytest.raises`.
expected_exception : `type`
The expected exception class
raise_not_implemented : bool, optional
Whether to re-raise a `NotImplementedError` – necessary for tests that
should be skipped with ``@skip_if_not_implemented``. Defaults to
``False``.
Returns
-------
correct_exception : bool
Whether the exception itself or one of the causing exceptions is of the
expected type.
"""
if exc_info is None:
return False
if hasattr(exc_info, 'value'):
exc_info = exc_info.value
if isinstance(exc_info, expected_exception):
return True
elif raise_not_implemented and isinstance(exc_info, NotImplementedError):
raise exc_info
return exc_isinstance(exc_info.__cause__, expected_exception,
raise_not_implemented=raise_not_implemented)
|
7e53dd94b7326faea1fe5accdc60b1b3b003f0af
| 14,524
|
def g2c(tensor):
"""
Reshape from groups, channels to groups*channels
"""
if len(tensor.shape) == 5:
b, c, g, h, w = tensor.shape
tensor = tensor.reshape(b, c*g, h, w)
return tensor
|
18517b066dfad91c9e8708fb94058b2780ddab9c
| 14,525
|
def max_column_width(column):
"""
Max width of a column, looping over all column elements
"""
return column \
.apply(lambda x: len(str(x))) \
.max()
|
30dadff91586ed8c270a78aa44ab3fc875ef706e
| 14,526
|
from typing import List
def readfile(filename: str) -> List[str]:
"""Wrote by: Nicholas Garth 101227727
Reads file from filename and returns a list of every line of the file
>>> readfile(hi.txt)
['Hi']
"""
infile = open(filename, "r",encoding="UTF-8")
records = []
for line in infile:
records.append(line.strip())
infile.close()
return records
|
5e71dd5b23221e509ee7d7feeff0241e0177298e
| 14,527
|
import torch
def collate_dataset(batch, test=False):
"""
Preprocessing for given batch.
It will use for the Torch DataLoader(collate_fn=collate_dataset).
Args:
batch(Torch.Tensor)
return:
List(Torch.Tensor)
"""
drug_batch = []
protein_batch = []
y_batch = []
for data in batch:
drug_batch.append(data[0])
protein_batch.append(data[1])
if not test:
y_batch.append(data[2])
if not test:
return [torch.LongTensor(drug_batch), torch.LongTensor(protein_batch), torch.FloatTensor(y_batch)]
else:
return [torch.LongTensor(drug_batch), torch.LongTensor(protein_batch)]
|
9eec71d5fe994bd6bb6742d2eb667ad0dda1bae1
| 14,528
|
def CalculatePlatt(mol):
"""
Calculation of Platt number in a molecule
Parameters:
mol: RDKit molecule object
Returns:
Platt: Platt number
"""
cc = [x.GetBeginAtom().GetDegree() + x.GetEndAtom().GetDegree() - 2 for x in mol.GetBonds()]
return sum(cc)
|
4a960308487fea60857a3ed2268efcd5342099d2
| 14,529
|
def remove_trailing_hyphens(s):
"""
:param s:
:return:
"""
arr = s.split('.')
try:
if arr[0][-1] == '-':
arr[0] = arr[0][:-1]
return '.'.join(arr)
else:
return s
except:
return s
|
412387e6d916322fdcf3a5fb6e61636cc93dba4b
| 14,531
|
def get_cli_kwargs(**kwargs):
"""
Transform Python keyword arguments to CLI keyword arguments
:param kwargs: Keyword arguments
:return: CLI keyword arguments
"""
return ' '.join(
[
f'--{k.replace("_", "-")} {str(v).replace(",", " ")}'
for k, v in kwargs.items()
if v
]
)
|
7357516968952e13d8fe2edf07dc51c3a29e7673
| 14,532
|
def calc_tf_idf(tf_matrix, idf_matrix):
"""
creates dict of doc dict, each doc dict has key:val pais of: word : tf*idf value
:param tf_matrix: returned by calc_tf
:param idf_matrix: returned by calc_idf
:return: dict of dict with word and tf*idf score for each doc in corpus
"""
tf_idf_matrix = {}
for (sent1, f_table1), (sent2, f_table2) in zip(tf_matrix.items(), idf_matrix.items()):
tf_idf_table = {}
for (word1, value1), (word2, value2) in zip(f_table1.items(),
f_table2.items()):
tf_idf_table[word1] = float(value1 * value2)
tf_idf_matrix[sent1] = tf_idf_table
return tf_idf_matrix
|
0e69d66499121f0aad8f2991cabaa6fe643e841a
| 14,533
|
def find_active_sites(activeSites, targetName):
"""
Find an active site in a list of AS's given the name
Input: list of active sites, name of target
Output: list of indeces of matched active sites
"""
return [i for i, j in enumerate(activeSites) if j.name==targetName]
|
544cc215f378eccc5aaa655482c0b3c03fed8974
| 14,534
|
import math
def sinc(x):
""" Incomplete sine """
if (x == 0):
return 1
else:
pix = math.pi*x
return (math.sin(pix)/pix)
|
9f5983fb37ea9c049cc8a77375dc59a9bb78474a
| 14,537
|
def friends():
"""
Returns friendships for given player.
Parameters: `key`, `uuid`
Example Response:
```json
{
'success': True,
'records': [
{
'_id': '************************',
'uuidSender': '********************************',
'uuidReceiver': '********************************',
'started': 1595912821091
},
...
]
}
```
https://github.com/HypixelDev/PublicAPI/blob/master/Documentation/methods/friends.md
"""
return "friends"
|
00873cc71183008be186665c306d625a03709176
| 14,538
|
from typing import Dict
def get_email_to_names_table(email_to_names_filename: str) -> Dict[str, str]:
"""Get a mapping of emails to names from the input file.
Args:
email_to_names_filename: Path to file with emails and corresponding
names, where each email-name pair is on a separate line and each line
looks like the following:
email@example.com,Name
Returns:
A dict mapping emails to names.
"""
email_to_names_table = {}
with open(email_to_names_filename, "r") as email_to_names_file:
for line in email_to_names_file:
email, name = line.rstrip().split(sep=",")
assert email not in email_to_names_table
email_to_names_table[email] = name
return email_to_names_table
|
856ce53a0abee1721b60603c468e5b66a484f7c8
| 14,539
|
def print_tb(traceback, limit=0, file=None):
"""Print up to *limit* stack trace entries from *traceback*. If *limit* is omitted
or ``None``, all entries are printed. If *file* is omitted or ``None``, the
output goes to ``sys.stderr``; otherwise it should be an open file or file-like
object to receive the output."""
return None
|
5d7fb403676aa7836430c1f6518b081d13c80e2c
| 14,540
|
def create_actions_file_msg(second_run: bool):
"""Actions File Message"""
article = "another" if second_run else "an"
return "Do you want to create %s action file?" % article
|
b3029285ac076049ea150155932044942ea3f5f7
| 14,542
|
import subprocess
def _run_git(cmd):
"""Run git with the given arguments, returning the output."""
p = subprocess.Popen(['git'] + cmd, stdout=subprocess.PIPE)
output, error = p.communicate()
ret_code = p.poll()
if ret_code:
raise subprocess.CalledProcessError(ret_code, 'git')
return output
|
b33854f361bbe4e0be7c223e620d6eccdc7e85eb
| 14,545
|
def get_list_of(key, ch_groups):
"""
Return a list of values for key in ch_groups[groups]
key (str): a key of the ch_groups[groups] dict
ch_groups (dict): a group-name-indexed dict whose values are
dictionaries of group information (see expand_ch_groups)
Example: get_list_of('system', ch_groups)
If ch_groups is the one specified in the json in the expand_ch_groups
example, this will return the list [ 'geds', 'spms', 'auxs' ]
"""
values = []
for ch_info in ch_groups.values():
if key in ch_info and ch_info[key] not in values:
values.append(ch_info[key])
return values
|
f5bbc32c836d0de5e9aa2c055d97bacdfc794d0e
| 14,546
|
def _middle(values):
"""Lower bound of median, without using numpy (heavy reqs)"""
n = len(values)
is_odd = n % 2
middle_idx = int((n + is_odd) / 2) - 1
return sorted(values)[middle_idx]
|
2b3f342c700ecfde20eb7ce41d0d071b8be6e281
| 14,548
|
def multiindex_col_ix(df, col):
"""Get the index into df.index.levels[] corresponding to a given "column
name" in a multiindex."""
return df.index.names.index(col)
|
b3c8df1e9d0b36e871fbfaece537704ad13ab7f1
| 14,549
|
def get_be_time(encoded_date):
"""
Returns a reversed list.
@param encoded_date: NetworkList encoded time value.
"""
# Divides the provided value into a four sized element.
time_units_hex_le = [encoded_date[inx:inx + 2] for inx in range(0, 16, 2)]
# Reverse the list and the bytes.
return [time_units_hex_le[inx].hex()[-2:] + time_units_hex_le[inx].hex()[:2] for inx in range(0, 8)]
|
fe5aa25adb4590a606a5671557ac9876b7b07c5b
| 14,551
|
def error(Y, X):
"""
Calculates mean squared error (MSE) of the reconstructed data (X) relative
to the original data (Y). In theory, this number should decrease as the
order of the markov process increases and/or the number of components
involved in the original projection (if used) increases.
Parameters
----------
Y : array, shape (N, M)
The original data.
X : array, shape (N, M)
The reconstructed data (output from "test").
Returns
-------
MSE : array, shape (N, M)
Same dimensions as X and Y, where each element is the MSE as if the
corresponding element in the reconstructed data Y operated as the estimator
for the corresponding element in the original data X.
"""
return (Y - X) ** 2
|
16e392f3ee4bece24ff736e47dcb7cae242a1997
| 14,552
|
def normalize_keys(data):
"""Convert keys to lowercase"""
return {k.lower(): v for k, v in data.items()}
|
b23e2aff374d9413a5c9a63db1fdd955ae7f24a6
| 14,553
|
def accuracy(y_pred, y, tags):
"""
Returns the accuracy of a classifier
"""
o_id = tags.index("O") if "O" in tags else None
correct = ignore = 0
for i, tag_id in enumerate(y):
if y_pred[i] == y[i]:
if tag_id == o_id:
ignore += 1
else:
correct += 1
return float(correct)/(len(y) - ignore)
|
950108d475220dc7e16e62b873c4a5d7dbff0e97
| 14,554
|
def frac_year(hour, leap_year=False):
"""
This function calculates the fraction of the year.
Parameters
----------
hour : integer, float
The hour of the day. Could be a decimal value.
leap_year : boolean
Indicates if the year is a leap year. Default
is False.
Returns
-------
B : float
The fraction of the year
"""
if leap_year:
n_days = 366
else:
n_days = 365
B = (hour - 1944) / 24 * 360 / n_days
return B
|
fe66678278a2257e5b8fc34af042b5a72b29596f
| 14,555
|
def int_wrapper(string):
"""A helper function."""
if string.startswith('#$'):
return int(string[2:], base=16)
elif string.startswith('$'):
return int(string[1:], base=16)
return int(string, 0)
|
909ed1ab0d3a2f6909ae5fb819e63a4658e7a521
| 14,557
|
def box_calc(size_xyz, pallet_xyz):
"""Calculates a list of points to store parts in a pallet"""
[size_x, size_y, size_z] = size_xyz
[pallet_x, pallet_y, pallet_z] = pallet_xyz
xyz_list = []
for h in range(int(pallet_z)):
for j in range(int(pallet_y)):
for i in range(int(pallet_x)):
xyz_list = xyz_list + [[(i+0.5)*size_x, (j+0.5)*size_y, (h+0.5)*size_z]]
return xyz_list
|
0ed2b14e117b1f66be67579136f0f25432367284
| 14,558
|
import argparse
def add_fp16_config_args(parser: argparse.ArgumentParser):
"""Mixed precision arguments."""
group = parser.add_argument_group("fp16", "fp16 configurations")
group.add_argument("--fp16", action="store_true",
help="Run model in fp16 mode.")
return parser
|
5827702ac159f8689551328f7282fff6989018da
| 14,559
|
def coverage_test(hce_list, nhce_list, conservative=True):
"""
Run the coverage test on employees.
Note: This is assuming we calculate based on 1.410(b)(1)(b).
Source: https://www.law.cornell.edu/cfr/text/26/1.410(b)-2
"""
if conservative:
hce_benefit_percentage = (
len([hce for hce in hce_list if hce.percentage_max > 0]) / len(hce_list)
)
nhce_benefit_percentage = (
len([nhce for nhce in nhce_list if nhce.percentage_min > 0]) / len(nhce_list)
)
else:
hce_benefit_percentage = (
len([hce for hce in hce_list if hce.percentage_avg > 0]) / len(hce_list)
)
nhce_benefit_percentage = (
len([nhce for nhce in nhce_list if nhce.percentage_avg > 0]) / len(nhce_list)
)
participation_ratio = nhce_benefit_percentage / hce_benefit_percentage
if participation_ratio >= 0.7:
passed = True
else:
passed = False
return (
f'Coverage test {"passed" if passed else "failed"}. NHCE to HCE participation ratio is '
f'{participation_ratio:.2%} (must be >= 70%).'
)
|
d9ee09fdd5232cc280a1a7a1670633eb43235347
| 14,561
|
def function_intercept(intercepted_func, intercepting_func):
"""
Intercepts a method call and calls the supplied intercepting_func with the result of it's call and it's arguments
Example:
def get_event(result_of_real_event_get, *args, **kwargs):
# do work
return result_of_real_event_get
pygame.event.get = function_intercept(pygame.event.get, get_event)
:param intercepted_func: The function we are going to intercept
:param intercepting_func: The function that will get called after the intercepted func. It is supplied the return
value of the intercepted_func as the first argument and it's args and kwargs.
:return: a function that combines the intercepting and intercepted function, should normally be set to the
intercepted_functions location
"""
def wrap(*args, **kwargs):
real_results = intercepted_func(*args, **kwargs) # call the function we are intercepting and get it's result
intercepted_results = intercepting_func(real_results, *args, **kwargs) # call our own function a
return intercepted_results
return wrap
|
28cfa1e873500cc9ca87a9c07275683cf41a33ae
| 14,562
|
def is_ref(prop):
"""
Returns True if prop is a reference.
"""
return list(prop.keys()) == ['$ref']
|
3c52ba784d3d490cf44a60d5d35b2251b640eeff
| 14,563
|
def get_models(ctx_cmip5, var, table, expts, nens):
"""
Get a list of all models for a given variable for each experiment and store them in a dictionary
:param ctx_cmip5: a esgf-pyclient context of all cmip5
:param var: variable to test
:param table: corresponding cmor_table for variable
:param expts: list of experiments
:param nens: number of ensemble members as minimum requirement
:return: A dictionary of experiments and models which have at least nens ensemble members
"""
result = {}
for expt in expts:
ctx = ctx_cmip5.constrain(variable=var, cmor_table=table, experiment=expt)
mods = []
for mod in ctx.facet_counts['model'].keys():
this_ctx = ctx.constrain(model=mod)
if len(this_ctx.facet_counts['ensemble']) > nens:
# Why does this not work?
# result[expt] = mods.append(mod)
mods.append(mod)
result[expt] = mods
return result
|
34f04c1d65c4c64e4f945a45c2f956477f6a40c5
| 14,564
|
from typing import List
import collections
def molecular_formula_from_symbols(symbols: List[str], order: str = "alphabetical") -> str:
"""
Returns the molecular formula for a list of symbols.
Parameters
----------
symbols: List[str]
List of chemical symbols
order: str, optional
Sorting order of the formula. Valid choices are "alphabetical" and "hill".
Returns
-------
str
The molecular formula.
"""
supported_orders = ["alphabetical", "hill"]
order = order.lower()
if order not in supported_orders:
raise ValueError(f"Unsupported molecular formula order: {order}. Supported orders are f{supported_orders}.")
count = collections.Counter(x.title() for x in symbols)
element_order = sorted(count.keys())
if order == "hill" and "C" in element_order:
if "H" in element_order:
element_order.insert(0, element_order.pop(element_order.index("H")))
element_order.insert(0, element_order.pop(element_order.index("C")))
ret = []
for k in element_order:
c = count[k]
ret.append(k)
if c > 1:
ret.append(str(c))
return "".join(ret)
|
82142bcae734f89c46e9fce854fdf6de080d8fd7
| 14,565
|
def dump(widget, *a, **kw):
"""Dump a TTWidgets WIDGET, showing info about the parent Frame and all child
Label widgets used to represent the compound TTWidgets WIDGET.
See each TTWidget's custom dump method for details.
"""
try:
return widget.dump(*a, **kw)
except AttributeError:
pass
|
cc5c316890317e80c737e98383a7c686b828d435
| 14,566
|
def emission_counts(tokenlists, taglists):
""" Function to find emission counts for each word and their associated POS tag. It returns a dictionary
containing the corpus' emission counts for each token/tag pair. This function specifically caters to files
with the format of the Brown corpus. This function will also be modified for reuse in interpolations.py to
calculate emission probabilities after taking into account low frequency words that appear in the training corpus.
Runtime complexity: O(n^2) """
emissions = {}
for tokenlist, taglist in zip(tokenlists, taglists):
for token, tag in zip(tokenlist, taglist):
if (token, tag) in emissions:
emissions[(token, tag)] += 1
else:
emissions.update({(token, tag) : 1})
#sort tag_sequences dictionary from greatest count to lowest count
sortedemissions = sorted(emissions.items(), key=lambda x: x[1], reverse = True)
return sortedemissions
|
cc1de55a14a906557f8d873bc7def47dcdc52732
| 14,567
|
def is_anonymous_argument(c):
"""Test if one argument is anonymous (unnamed)
In the declaration `void f(int x, int);` the second argument is unnamed
"""
return c.spelling is None or c.spelling == ''
|
1b262e4539b89c81dc21eebc585af8a4dfc9d342
| 14,569
|
def unsplit(t):
"""
Make a RI from a split RI(5-tuple).
"""
s = ''
if t[0] is not None:
s += t[0]
s += '://'
if t[1] is not None:
s += t[1]
if t[2] is not None:
s += '/'
s += t[2]
if t[3] is not None:
s += '?'
s += t[3]
if t[4] is not None:
s += '#'
s += t[4]
return s
|
4ab5e703f98920522b626dd84d3a41805b46248a
| 14,570
|
def find_error_file(view, error_line):
"""Returns the filename that comes just before the given error line region."""
error_files = view.find_by_selector("entity.name.filename.error")
if not error_files:
return None
error_eol = view.line(error_line).end()
for i, file_region in enumerate(error_files):
if file_region.end() > error_eol:
# The filename is after the error line, return the previous one.
return error_files[i - 1]
return error_files[-1]
|
30c4781d6f04e090bafa4222c28579c3364c835c
| 14,571
|
def divide_round_up(a, b):
"""Calculates a / b rounded up to the nearest integer"""
if a % b < b / 2:
return a // b
else:
return (a // b) + 1
|
7ec28dfbe05c006e4e2cad340002a39c9b23f4b9
| 14,572
|
def univcrest():
"""Returns a constant, the University College Crest number (1249 digits long - year of founding)"""
crest = """11111111111111111111111188111111111111111111111111
11111111111111111111881888818811111111111111111111
11111111111111111111118888881111111111111111111111
11111888811111111111118888881118888111111111111111
11111188881111111111118888881111888811111111111111
11111188888888111111118888881111888888811111111111
11111118811188881111118888881111881118888111111111
11111111188888888881118888881111118888888888111111
11111111111888111881118888881111111188811188111111
11111111111111111111118888881111111111111111111111
11188111111111111111118888881111111111111111188111
11118888888888888888888888888888888888888888881111
11888888888888888888888888888888888888888888888811
11118888888888888888888888888888888888888888881111
11188111111111111111118888881111111111111111188111
11111111111111111111118888881111111111111111111111
11111188881111111111118888881118888111111111111111
11111118888111111111118888881111888811111111111111
11111118888888111111118888881111888888811111111111
11111118811188881111118888881111881118888111111111
11111111188888888881118888881111118888888888111111
11111111111888111881118888881111111188811188111111
11111111111111111111118888881111111111111111111111
11111111111111111111881888818811111111111111111111
1111111111111111111111118811111111111111111111111"""
# clean and return
crest = crest.replace('\n','')
crest = crest.replace(' ','')
return (int(crest),(25,50))
|
9c397c666b211fcfd741bc1df49cc9c0726da2e9
| 14,574
|
def get_open_descriptions(get_open, initial_state, current_state):
"""
Get all 'open' descriptions from the current state (if any).
Parameters
----------
get_open: function
Function that gets the drawer or the door which is opened.
initial_state: nd.array
Initial state of the environment.
current_state: nd.array
Current state of the environment.
Returns
-------
descr: list of str
List of 'open' descriptions satisfied by the current state.
"""
open_descriptions = []
open_thing = get_open(initial_state, current_state)
for o in open_thing:
open_descriptions.append('Open the {}'.format(o))
return open_descriptions.copy()
|
cc4abab0cbfc8cb38db7cb46003ec6e80b1df634
| 14,575
|
def parse_extended_entities(extended_entities_dict):
"""Parse media file URL:s form tweet data
:extended_entities_dict:
:returns: list of media file urls
"""
urls = []
if "media" in extended_entities_dict.keys():
for item in extended_entities_dict["media"]:
# add static image
urls.append(item["media_url_https"])
# add best quality video file
if "video_info" in item.keys():
max_bitrate = -1 # handle twitters occasional bitrate=0
video_url = None
for video in item["video_info"]["variants"]:
if "bitrate" in video.keys() and "content_type" in video.keys():
if video["content_type"] == "video/mp4":
if int(video["bitrate"]) > max_bitrate:
max_bitrate = int(video["bitrate"])
video_url = video["url"]
if not video_url:
print("Error: No bitrate / content_type")
print(item["video_info"])
else:
urls.append(video_url)
return urls
|
9fc1ccdf57ac236f6fbca219bb74c96026dac0c2
| 14,576
|
def get_cycle_stats(data_list):
"""
Calculates cycle statistics for test run.
Returns min, max, avg cycle count.
"""
cycles = [data[0] for data in data_list]
min_cycles = min(cycles)
max_cycles = max(cycles)
avg_cycles = sum(cycles) / len(cycles)
return min_cycles, max_cycles, avg_cycles
|
e8fc1a7b3619ed0f9b63995ded217a4037bdf618
| 14,577
|
def LBtoKG(mlb):
"""
Convertie une masse en lb vers kg
note: 1 kg = 2.20462 lb
:param mlb: masse [lb]
:return mkg: masse [kg]
"""
mkg = mlb / 2.20462
return mkg
|
41c4a4a38f4f10b8b1a006ef6da8f9d5f76934fc
| 14,578
|
import itertools
def expand_alt_section(alt_section, max_expansions=None):
"""Helper function for refilter_ctm."""
spans = [['']]
alt_separator_line = ''
for line in alt_section.strip().split('\n'):
if '<ALT_BEGIN>' in line:
spans.append([''])
elif '<ALT_END>' in line:
alt_separator_line = line.replace('<ALT_END>', '<ALT>').strip() + '\n'
spans.append([''])
elif '<ALT>' in line:
spans[-1].append('')
else:
spans[-1][-1] += line + '\n'
alts = list(itertools.product(*spans))
if max_expansions and len(alts) > max_expansions:
alts = alts[:max_expansions]
expanded_alt_sections = [''.join(s) for s in alts]
return alt_separator_line.join(expanded_alt_sections)
|
77ec3e274d5b435994cce2e9e9c289a09c336cc8
| 14,579
|
def get_value_from_aligned_membuf(buf, valtype):
"""Returns the value held in a __gnu_cxx::__aligned_membuf."""
return buf['_M_storage'].address.cast(valtype.pointer()).dereference()
|
09ff1861e1c6c0a5b484be59572ff12cea01878b
| 14,580
|
def compute_mask(cpu):
"""
Given a CPU number, return a bitmask that can be used in /proc/irq to set
the processor affinity for an interrupt.
"""
return 1<<cpu
|
a393c663e5426ecde752aa107c97c7c429aa589a
| 14,581
|
def _prefix_linear(s, op, false, true):
"""Apply associative binary operator linearly.
@param s: container
@param op: operator
@param false, true: values if treating `op` as disjunction
"""
if not s:
return false
u = s[0]
for v in s[1:]:
# controlling value ?
if u == true:
break
if v == true:
u = true
break
u = op + ' ' + u + ' ' + v
return u
|
85f799e826297c8478bb03c2d4dd26643247a9e1
| 14,582
|
def decode_string(val: bytes) -> str:
"""Decodes a possibly null terminated byte sequence to a string using ASCII and strips whitespace."""
return val.partition(b'\x00')[0].decode('ascii').strip()
|
1e963040a41ae16ba1bb7750ecf17b5e5cdb680f
| 14,584
|
def product_id_attrs(obj):
"""A ``choice_attrs`` function to label each item with its product ID."""
return {"data-product-id": obj.product.id}
|
89a710104bdc907fcfecfc6359469986a7649989
| 14,585
|
import re
def bqtk_default_context(request) -> str:
"""Generate unique id by using pytest nodeid.
Args:
request: pytest fixture which provide tests context info.
Returns:
str: unique id matching big query name constraints
"""
invalid_bq_chars = re.compile('([^a-zA-Z0-9_]+)')
nodeid = request.node.nodeid
return invalid_bq_chars.sub('_', nodeid)
|
5f486f625ae0ff2670dd457a25043ad61193af90
| 14,586
|
def sort_list(this_list):
""" Lamda Function sorts pairs in list by 2nd-part (Shannon Entropy). """
# Sort the domain tuples (Entropy, FQDN) pairs high entropy items.
# Return SORTED LIST
return sorted(this_list, key = lambda pairs: pairs[0])
|
75f4def632b330c392f7a4f4db416262585c432b
| 14,587
|
import os
import subprocess
def find_repos(path):
"""
Finds repositories within the given path.
:param path: Path to search for repositories.
:return: List of strings with found repository paths.
"""
try:
with open(os.devnull, 'w') as devnull:
data = subprocess.check_output(
['/usr/bin/find', '-L', path, '-name', '.leapp'], stderr=devnull).decode('utf-8').split('\n')
return [os.path.abspath(os.path.dirname(rpath)) for rpath in data if rpath.strip()]
except subprocess.CalledProcessError:
return ()
|
268b19747fe86ee3c9f49d17f8c8178acb0d0fba
| 14,588
|
from os import system
from typing import List
def type_checks(paths_to_check: List[str]) -> bool:
"""
Run typing checks using mypy on the supplied list of files
Args:
paths (List[str]): List of paths to check with mypy
Returns:
bool: True if it failed the checks, False otherwise
"""
if not paths_to_check:
print("No files found to check")
return True
command = "mypy --strict {paths}".format(paths=" ".join(paths_to_check))
print(f"Checking types with the following command: {command}")
result = system(command)
return bool(result)
|
36db43367ac89af5172c36ac6732ae10bcbad8ae
| 14,589
|
def ascii(str):
"""Return a string with all non-ascii characters hex-encoded"""
if type(str) != type(''):
return map(ascii, str)
rv = ''
for c in str:
if c in ('\t', '\n', '\r') or ' ' <= c < chr(0x7f):
rv = rv + c
else:
rv = rv + '\\' + 'x%02.2x' % ord(c)
return rv
|
7224579507cc9e0efb0bb1700e2b8b0096422532
| 14,590
|
def right_shift(number, n):
"""
Right shift on 10 base number.
Parameters
----------
number : integer
the number to be shift
n : integer
the number of digit to shift
Returns
-------
shifted number : integer
the number right shifted by n digit
Examples
--------
>>> right_shift(123, 1)
3
>>> right_shift(0, 1)
0
>>> right_shift(1234, 2)
34
"""
return number % 10 ** n
|
e6d23b5bd630449aba54cb38c2c6d9be174386c0
| 14,591
|
def get_interconnector_index(data) -> list:
"""Get interconnector index"""
return [i['@InterconnectorID'] for i in (data.get('NEMSPDCaseFile')
.get('NemSpdInputs')
.get('PeriodCollection')
.get('Period')
.get('InterconnectorPeriodCollection')
.get('InterconnectorPeriod'))]
|
9cb2a8a09e98ad308b23d670283d2b358abc9856
| 14,592
|
def check_sched(sched):
"""Parse the YAML for the resulting schedule of a scheduling algorithm.
.. literalinclude:: ../../wikipedia-sched.yaml
:language: yaml
:linenos:
:param task_list: List of sched descriptors, as in the example above.
:type task_list: List of dictionaries.
:return: True for success, False otherwise.
:rtype: bool
"""
##############
# validate the input format first. some fields are expected for rms
##############
# must have at least 1 task
if len(sched['sched']) < 1:
print ("ERROR: the sched list must have at least 1 task. Found", len(sched['sched']))
return False
# check if all tasks have the mandatory fields
print ('checking the scheduling list ... ', end='')
for task in sched['sched']:
if 'name' not in task:
print ("\nERROR: field 'name' not found in task")
return False
if 'jobs' not in task:
print ("\nERROR: field 'jobs' not found in task")
return False
if len(task['jobs']) <= 0:
print ("\nWARNING: task %s has no job. Got" % task['name'], len(task['jobs']))
#return False
for job in task['jobs']:
if type(job[0]) is not int or type(job[1]) is not int:
print ("\nERROR: jobs must be int initial and final times. Got", type(job[0]), type(job[1]))
return False
if job[0] > job[1]:
print ("\nERROR: the initial job time must be lower than the the final time. Got", job[0], job[1])
return False
# zero is not supported in the plotting function
if job[0] < 0:
print ("\nERROR: the initial job time must be greater than 0. Got", job[0])
return False
if job[1] < 0:
print ("\nERROR: the initial job time must be greater than 0. Got", job[1])
return False
print ('passed !')
return True
|
7eb2e00813d38993072ae326da16069b607faea2
| 14,593
|
def binarygap(u):
"""maximal sequence of consecutive zeros
>>> binarygap(6)
1
>>> binarygap(9)
2
>>> binarygap(129)
6
"""
s = format(u, 'b').split('1')
return 0 if len(s) < 3 else len(max(s))
|
3909779446dd500f6d3897c9fabd2628371d8f08
| 14,594
|
import torch
import math
def log_lerp(x: torch.Tensor, b: float):
"""
Linearly extrapolated log for x < b.
"""
assert b > 0
return torch.where(x >= b, x.log(), math.log(b) + (x - b) / b)
|
c09eef11de46a3bf6905627f619df35e9de6e37f
| 14,595
|
import argparse
def parse_args():
""" Parses command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--in', dest='inout',
action='store', type=str, required=True, default=True,
help="Stay in? (True/False)")
parser.add_argument('--s', dest='season',
action='store', type=str, required=True, default=True,
help="Season? (Spring/Summer/Fall/Winter/Any)")
parser.add_argument('--n', dest='nactivities',
action='store', type=int, required=False, default=1,
help="Number of activities? (Default of 1)")
args = parser.parse_args()
return args
|
3d91e030efe98093632d03f6f2c4447364ac5a89
| 14,596
|
def fast_multiply(matrix, value):
"""
Fast multiply list on value
:param matrix: List of values
:param value: Multiplier
:return: Multiplied list
"""
for ix in range(0, matrix.shape[0]):
matrix[ix] *= value
return matrix
|
8a9eb29b1c6c0cc56b35f43f128e2595b45e1ff6
| 14,598
|
def freeze(x):
"""Freezes a dictionary, i.e. makes it immutable and thus hashable."""
frozen = {}
for k, v in x.items():
if isinstance(v, list):
frozen[k] = tuple(v)
else:
frozen[k] = v
return frozenset(frozen.items())
|
fc3bf21419057563f2389ab7e26279bb1f37436b
| 14,599
|
def humanize(seconds):
"""
Convert a duration in seconds to an human-readable form
"""
assert type(seconds) is int
if seconds >= 86400:
res = seconds // 86400
return '%s day' % res if res == 1 else '%s days' % res
if seconds >= 3600:
res = seconds // 3600
return '%s hour' % res if res == 1 else '%s hours' % res
if seconds >= 60:
res = seconds // 60
return '%s minute' % res if res == 1 else '%s minutes' % res
return '%s second' % seconds if seconds == 1 else '%s seconds' % seconds
|
2a32c7b54b1be58ce571910edbf8d5383db66aa2
| 14,600
|
import requests
import json
def getNLP(url = "http://nlp.ailab.lv/api/nlp", data = { "data" : { "text":"change me"}, "steps": ["tokenizer", "morpho", "parser", "ner"], "model": "default", "config": None}, headers = {'content-type': 'application/json'}):
"""Give a Python object and returns a Python object decoded from JSON"""
# print(json.dumps(data))
response = requests.post(url, json.dumps(data), headers=headers)
if (response.status_code != 200):
print(f"Bad response code: {response.status_code}")
return None
return response.json()
|
cb63deaa4a0a98cf6641eae56ad5fb9fd81173ca
| 14,601
|
def schema_input_type(schema):
"""Input type from schema
:param schema:
:return: simple/list
"""
if isinstance(schema, list):
return 'list'
return 'simple'
|
cdcc9b724005083995f26a767d9b2ab95645ad79
| 14,602
|
def integrator_RK(x,step_size,function_system):
"""
Integrate with Runga Kutta
Parameters
x : state
step_size
function_system
"""
k1 = function_system(x)
k2 = function_system(x + step_size * k1 / 2)
k3 = function_system(x + step_size * k2 / 2)
k4 = function_system(x + step_size * k3)
x_new = x + (step_size / 6) * (k1 + 2 * k2 + 2 * k3 + k4)
return x_new
|
97380328a9bf1c3e91f161a3a39d7a06c479690c
| 14,603
|
def _jwt_decode_handler_with_defaults(token): # pylint: disable=unused-argument
"""
Accepts anything as a token and returns a fake JWT payload with defaults.
"""
return {
'scopes': ['fake:scope'],
'is_restricted': True,
'filters': ['fake:filter'],
}
|
9374f03065a8592448ae3984e56bb9cae962059f
| 14,604
|
def seconds_to_time(seconds):
"""Return a nicely formatted time given the number of seconds."""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
return "%d days, %02d hours, %02d minutes, %02d seconds" % (d, h, m, s)
|
bf87da51527af08f60b3425169af1f696ecc9020
| 14,605
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.