content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import json
def load_cat_to_name(path):
"""Load a JSON file containing a mapping of category labels to flower names.
Args:
path: A pathlib.Path object containing the location of the JSON file.
Returns:
Dictionary: Containing the category labels as keys and the flower names
as values.
"""
with open(path, 'r') as file:
return json.load(file) | 3ca0d36caaf3315c021cea2c4d1b8d691d7b351e | 95,536 |
import re
def naive_error_classifier(error_string):
"""Attempts to guess the cause of an error in R
Parameters
----------
error_string : string
error output
Returns
-------
Likely error type (library, setwd, No such file) or None
"""
if re.search('library\s*\(', error_string):
return '(This is likely an error with a call to the \"library\" function. ' + \
'Please ensure that you\'re not specifying a particular location for the package you\'re trying to load. ' + \
'To try and automatically correct this error, select the automatic error fixing option in the Build Image form.)'
if re.search('setwd\s*\(', error_string):
return '(This is likely an error with a call to the \"setwd\" function. ' + \
'Please ensure that you\'re not specifying an absolute path for your working directory. ' + \
'To try and automatically correct this error, select the automatic error fixing option in the Build Image form.)'
if (re.search('file\s*\(', error_string) or re.search('cannot open the connection', error_string)
or re.search('No such file', error_string)):
return '(This is likely an error importing data from a file. ' + \
'Please ensure that you\'re specifying the correct path to your data, and that your data is ' + \
'included in the file you uploaded. ' + \
'To try and automatically correct this error, select the automatic error fixing option in the Build Image form.)'
return '' | f70fbcd9cd2d70cf5653a1aa1ca13387655a5b00 | 95,537 |
def test_model(model, test_review_values):
"""
Get predicted values from test set
:param model: classifier model
:param test_review_values: values to test with
:return: predicted values (binary), predicted values (probability)
"""
predicted = model.predict(test_review_values)
predicted_prob = model.predict_proba(test_review_values)
return predicted, predicted_prob | 0f1309b7d79f2120b35ff54f76f40334dde135ba | 95,545 |
from typing import Any
from pathlib import Path
def r(obj: Any, ignoreintkey: bool = True) -> str:
"""Convert a python object into R repr
Examples:
>>> True -> "TRUE"
>>> None -> "NULL"
>>> [1, 2] -> c(1, 2)
>>> {"a": 1, "b": 2} -> list(a = 1, b = 2)
Args:
ignoreintkey: When keys of a dict are integers, whether we should
ignore them. For example, when `True`, `{1: 1, 2: 2}` will be
translated into `"list(1, 2)"`, but `"list(`1` = 1, `2` = 2)"`
when `False`
Returns:
Then converted string representation of the object
"""
if obj is True:
return 'TRUE'
if obj is False:
return 'FALSE'
if obj is None:
return 'NULL'
if isinstance(obj, str):
if obj.upper() in ['+INF', 'INF']:
return 'Inf'
if obj.upper() == '-INF':
return '-Inf'
if obj.upper() == 'TRUE':
return 'TRUE'
if obj.upper() == 'FALSE':
return 'FALSE'
if obj.upper() == 'NA' or obj.upper() == 'NULL':
return obj.upper()
if obj.startswith('r:') or obj.startswith('R:'):
return str(obj)[2:]
return repr(str(obj))
if isinstance(obj, Path):
return repr(str(obj))
if isinstance(obj, (list, tuple, set)):
return 'c({})'.format(','.join([r(i) for i in obj]))
if isinstance(obj, dict):
# list allow repeated names
return 'list({})'.format(','.join([
'`{0}`={1}'.format(
k,
r(v)) if isinstance(k, int) and not ignoreintkey else \
r(v) if isinstance(k, int) and ignoreintkey else \
'`{0}`={1}'.format(str(k).split('#')[0], r(v))
for k, v in sorted(obj.items())]))
return repr(obj) | 5c907ee0725ac49958f01ee0fe59f2b9deb7a4fd | 95,546 |
import typing
import itertools
def islice_limit(iterable,
*, limit: typing.Optional[int] = None,
offset: typing.Optional[int] = 0):
"""Return a slice from iterable applying limit and offset.
>>> list(islice_limit('spam', limit=3))
['s', 'p', 'a']
>>> list(islice_limit('spam', offset=3))
['m']
>>> list(islice_limit('spam', offset=1, limit=2))
['p', 'a']
>>> list(islice_limit('spam'))
['s', 'p', 'a', 'm']
"""
if limit is not None and offset:
stop = offset + limit
return itertools.islice(iterable, offset, stop)
elif limit is not None:
return itertools.islice(iterable, limit)
elif offset:
return itertools.islice(iterable, offset, None)
return iterable | 1777c470901b98209fad841c0c25859adbdec7d7 | 95,550 |
def compare_version_numbers(version1, version2):
"""
Compare two version numbers.
Return -1 if version1 < version2, 0 if version1 == version2, +1 if version1 > version2
Version :== (k,l,m,n)
k, l, m, n := int
"""
if version1 is None:
if version2 is None:
return 0
else:
return -1
elif version2 is None:
return +1
else:
for i in range(0, 4):
if version1[i] > version2[i]:
return +1
elif version1[i] < version2[i]:
return -1
return 0 | c07db8c961042beecd1c358fc3734e460251056d | 95,552 |
def MakeUrl(host, port=80, location=''):
"""
Create a Tasmota host url
@param host:
hostname or IP of Tasmota host
@param port:
port number to use for http connection
@param location:
http url location
@return:
Tasmota http url
"""
return "http://{shost}{sdelimiter}{sport}/{slocation}".format(\
shost=host,
sdelimiter=':' if port != 80 else '',
sport=port if port != 80 else '',
slocation=location ) | 1865a59648ce0cd1ec04426a783eae043b9600d0 | 95,553 |
def hex_to_string(val):
"""Converts hex string to utf-8 string.
Accepts padded or unpadded values.
"""
if val is None:
return ""
s = val.strip('0x').rstrip('0')
if len(s) % 2 == 1:
s += "0"
return bytes.fromhex(s).decode('utf-8') | dfe4445a37d07e146df706607028d58d64df74a0 | 95,555 |
def sign(val):
"""
Mimics MATLAB's sign function
:param val: Input value
:type val: float
:return: The sign of ``val``
:rtype: float
"""
if val > 0.0:
return 1.0
elif val == 0.0:
return 0.0
else:
return -1.0 | dc7404ab357182ea718d1f925e2e12fda0f2c96a | 95,557 |
def get_folded_phase(t,best_period):
"""Fold the observation times with the best period of the variable signal."""
# light curve over period, take the remainder (i.e. the "phase" of one period)
phase = (t / best_period) % 1
return phase | 76788c1ebeaea4ade928a987ebc423f1eba4cdea | 95,558 |
def poisson(t, y, u):
"""Poisson's equation as system of 1st order ODEs."""
y1, y2 = y
y1d = y2
y2d = -u(t) ** 2 / t
return [y1d, y2d] | 8c51650044fe91ae71b80732773d1b4f84766395 | 95,560 |
def compute_overview_levels(band):
"""Return an appropriate list of overview levels."""
max_dim = max(band.XSize, band.YSize)
overviews = []
level = 1
while max_dim > 256:
level *= 2
overviews.append(level)
max_dim /= 2
return overviews | 96c2a73b902ac11dbedfde6e77511519a5f93619 | 95,564 |
def calc_SEFD(A, Tsys, eff=1.0):
""" Calculate SEFD
Tsys = system temperature
A = collecting area
Ae = effective collecting area
eff = aperture efficency (0.0 to 1.0)
"""
kb = 1.3806488e3 # 1.38064852e-23 Boltzmann constant
Ae = A*eff
return 2 * Tsys * kb / Ae | ac53e3925c8665a4e1ee524f84c49eb244f3adf1 | 95,565 |
import time
def convert_millisecond_date(x: int, date_format: str = "%Y-%m-%d") -> str:
"""
Converts a date in milliseconds to a string date.
:param x: time in milliseconds
:param date_format: date format to return
:return: time in date format
"""
return time.strftime(date_format, time.gmtime(x / 1000.0)) | 317ce900bcbeaa80b265231608ef1cb2ad9fac56 | 95,566 |
import itertools
def peek(iterable):
"""
Get the next value from an iterable, but also return an iterable
that will subsequently return that value and the rest of the
original iterable.
>>> l = iter([1,2,3])
>>> val, l = peek(l)
>>> val
1
>>> list(l)
[1, 2, 3]
"""
peeker, original = itertools.tee(iterable)
return next(peeker), original | c91cddb1f72b3a081f506022a0cffbf500f90844 | 95,570 |
def div_stand_devs(df, inputs):
"""
Divides each column by its standard deviation.
"""
sd = df.std(axis = 0)
for i in df:
df[i] = df[i] / sd[i]
return df | b52b5f46b86dae5fbbd2e400fc60d6b482dbcf42 | 95,572 |
import torch
def init_model(model_path: str):
"""
Loads PyTorch neural net located at <model_path>, and sets the model to
evaluation mode.
:param model_path: Global path to PyTorch model.
:return: Module
"""
model = torch.load(model_path, map_location=torch.device('cpu'))
model.eval()
return model | fc3c5d35099dd306c675b323094dc5d5660f045e | 95,573 |
import json
def dump_jsonl(obj):
"""Convert JSON object into 'JSON Lines' format string.
See http://jsonlines.org/ for the detail of the format.
Parameters
----------
obj : list of JSON compatible objects
Objects to be dumped.
Returns
-------
str
Resulting string.
"""
if not isinstance(obj, list):
raise TypeError('The most outer structure must be list.')
return '\n'.join(json.dumps(obj_, ensure_ascii=True) for obj_ in obj) | c8ce5f6a190c74e4683e47cb674a96b6398d991e | 95,574 |
def jvq_to_list(jvq):
"""Turns a joint value quantity into a list
Parameters:
jvq JointValueQuantity
Return:
list with 7 elements
"""
return [getattr(jvq, 'joint_' + str(num)) for num in range(1, 8)] | b3924d0079092425d43dfa5bb3d8df6f8dbb40ef | 95,577 |
def is_supported_key_type(key):
"""
checks if the given key type is supported
Supported Types:
- int
- float
- str
- tuple
- NoneType
"""
if (
isinstance(key, (int, float, str, tuple, )) or
key is None
):
return True
return False | 18bc01dc2d909d2c0fff0b4d68256f3b591a70ba | 95,581 |
def fvfm(fm, f0):
"""Calculate Fv/Fm
Fv/Fm = (fm - f0) / fm
:param fm: Fm
:param f0: F0
:returns: Fv/Fm (float)
"""
return (fm - f0) / fm | 9299dc5e77618180989c718b09b77e0217d84550 | 95,586 |
from typing import Dict
def _get_export_config(
multi_value_delimiter: str = "|", limit_records: int = 0, columns_exclude_regex: str = "",
) -> Dict:
"""Packages relevant pieces of JdbcExportInfo object into an exportDataConfig for jdbc export
in form of json dictionary
Args:
multi_value_delimiter: value with which to delimit multivalues. default is |
limit_records: number of records to stream. default is 0 (export all records)
columns_exclude_regex: override config file for columnsExcludeRegex, default is empty
string
Returns:
A dictionary suitable for usage in all df_connect API calls around jdbc export
"""
# build json object
export_config = {
"mergedArrayValuesDelimiter": multi_value_delimiter,
"limitRecords": limit_records,
"columnsExcludeRegex": columns_exclude_regex,
}
return export_config | d9934680b86c87782884608f8ce5a2a1b96187d4 | 95,587 |
def create_cagr(equity, periods=252):
"""
Calcola il tasso di crescita annuale composto (CAGR)
per il portafoglio, determinando il numero di anni e
quindi creando un tasso annualizzato composto basato
sul rendimento totale.
Parametri:
equity - Una serie di pandas che rappresenta la curva equity.
periods: giornaliero (252), orario (252 * 6.5), minuto (252 * 6.5 * 60) ecc.
"""
years = len(equity) / float(periods)
return (equity[-1] ** (1.0 / years)) - 1.0 | 7a714db066d42a2b4217c34027d9d5877353b022 | 95,588 |
def output_ext(key: int, ext: str):
"""
given a key and file ext, concats them both.
de, 6 --> de6
"""
new_ext = ext + str(key)
return new_ext | 48a3e1ab76f3f764fe6a236aa8ae5fd06f45ab24 | 95,589 |
def bubblesort(numbs: list) -> list:
"""
Go through the array from left to right, for each couple of
numbers, check which one is bigger. If the first element of
the couple is bigger than the second, swap the two numbers.
Repeat the same process until the array is sorted (i.e.
when the number of swaps is equal to 0).
:param numbs: The array to be sorted
:return: The sorted array
"""
for i in range(len(numbs) - 1):
for j in range(len(numbs) - i - 1):
if numbs[j] > numbs[j + 1]:
numbs[j], numbs[j + 1] = numbs[j + 1], numbs[j]
return numbs | 1e688a195b9dcfb7d92ee75fdc5cbed12ab2e693 | 95,593 |
from typing import Any
def add_to(collection: Any, elem: Any) -> Any:
"""Add element to collection; return new collection."""
if isinstance(collection, str):
return collection + elem # Strings
try: # Lists and other collections
return collection + type(collection)([elem])
except TypeError:
pass
try: # Sets
return collection | type(collection)([elem])
except TypeError:
pass
raise ValueError("Cannot add element to collection") | 49afdd40ca3ad296283d3ab54dca24195c0d8cf9 | 95,594 |
def process_ports_list(ports_list: list):
"""
:param ports_list: list of ports
[{'port name': 'RAW_L0', 'port size': 'L0_SIZE_RGB', 'port type': 'B'}]
:return: A list only with the port name
'RAW_L0, '
"""
string_to_return = ''
if ports_list is None:
string_to_return = 'None'
else:
for i in range(len(ports_list)):
string_to_return += ports_list[i]['port name'] + ", "
return string_to_return | 142fae3e7187df87f37812b680fce040262c9ca1 | 95,596 |
def checkArrSqr(arr):
"""Check if arr has square shape"""
if arr.shape[0] == arr.shape[1]:
return True
else:
return False | e058293ab2fdac789fdf7211991399eddd617837 | 95,601 |
def append_suffix(name: str, suffix: str):
"""
Append a suffix (e.g. numeric or layer name) to a file-like name; preserve
the original non-numeric file extension (e.g. .fits); return the unmodified
name if it already contains this suffix
:param name: data file name, group name, etc.
:param suffix: suffix to append, including separator
:return: `name` with `suffix` appended in the appropriate place
"""
if name.endswith(suffix):
return name
try:
base, ext = name.rsplit('.', 1)
if ext:
# noinspection PyBroadException
try:
int(ext)
except Exception:
ext = '.' + ext
else:
# Numeric suffix; treat as no suffix
raise ValueError('Numeric suffix')
except ValueError:
base, ext = name, None
if base.endswith(suffix):
return name
name = base + suffix
if ext:
name += ext
return name | 102bd67acb8552963a9ece0dc9e94776732746cb | 95,603 |
def num_months_between(start, end):
"""
Returns number of months between two dates.
Ignores day of month.
"""
m1 = start.year * 12 + (start.month - 1)
m2 = end.year * 12 + (end.month - 1)
return m2 - m1 | cfce358a668874fefacb0fde76face4b9bb2f17b | 95,605 |
import re
def GetBackgroundColorTypeTagForTableValue(Color, ColorType):
"""Setup color type tage for setting background of a table value."""
ColorTypeTag = "class" if re.match("^colorclass", ColorType, re.I) else "bgcolor"
return ColorTypeTag | 6df7cb31b66ff38a228e618ee914281eee2da5b7 | 95,606 |
import hashlib
def _path_safe(text):
"""
Return a filesystem-safe version of a string ``text``
返回文件系统安全版本的字符串``text``
>>> _path_safe('simple.org').startswith('simple.org')
True
>>> _path_safe('dash-underscore_.org').startswith('dash-underscore_.org')
True
>>> _path_safe('some@symbol?').startswith('some_symbol_')
True
"""
pathable_slot = "".join([c if c.isalnum() or c in '-._' else '_' for c in text])
# as we replace some letters we can get collision for different slots
# add we add unique part
unique_slot = hashlib.md5(text.encode('utf8')).hexdigest()
return '-'.join([pathable_slot, unique_slot]) | 57d5fd527fc5515a84c3aae1b35f91a5da804d0b | 95,607 |
def grange(a,b,step=1.2):
"""
Returns a list between a and b of geometrically progressing series
"""
r = []
while(a < b):
r += [a]
a *= step
return r | a135c9ac7e64b58e6cc60ada49ffd577299f36cf | 95,608 |
def unpackRangeBits(ur1, ur2, ur3, ur4):
"""Given the ulUnicodeRange1, ulUnicodeRange2, ulUnicodeRange3,
ulUnicodeRange4 values from the OS/2 table, return a set of bit numbers.
>>> unpackRangeBits(0x0, 0x0, 0x0, 0x0)
set()
>>> unpackRangeBits(0x1, 0x0, 0x0, 0x0)
{0}
>>> unpackRangeBits(0x1, 0x1, 0x1, 0x1)
{0, 32, 64, 96}
>>> unpackRangeBits(0xffffffff, 0x1, 0x2, 0x4)
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 65, 98}
"""
bitNum = 0
bitSet = set()
for bitField in [ur1, ur2, ur3, ur4]:
for localBitNum in range(32):
mask = 1 << localBitNum
if bitField & mask:
bitSet.add(bitNum)
bitNum += 1
assert bitNum == 128
return bitSet | 51f050c33246b4bd7b2111564ce6d5a0eb53ea7f | 95,609 |
def round_to_closest(x, y):
"""
A function to round x to divisible by y
"""
return int( y * ( ( x // y) + (x % y > 0) ) ) | ef8f5fff0ecd7314afe3e653e8cc5619c6fe19b6 | 95,611 |
def _casing_permutations(noun_chunk):
"""Generates casing permutations before wiki2vec entity lookup
Case matters during lookup into the pretrained entity embeddings, so
generate some permuatations to avoid misses.
Parameters
----------
noun_chunk : spacy.tokens.Span
Returns
-------
list of str
Different permutations of casing of the input.
"""
return [noun_chunk.text.capitalize(), noun_chunk.text.title()] | 95ba5af822f7e7ddbb6adcf97c0e3e6ff079131f | 95,614 |
def image_to_text(image, threshold):
"""Converts an image to text, lighting pixel greater than a threshold"""
text = ""
for y in range(image.size[1]):
for x in range(image.size[0]):
if image.getpixel((x, y)) > threshold:
text += "^^"
else:
text += " "
text += "\n"
return text.rstrip() | 4719053395592ea60c0fd19bed9eaed8bbe68829 | 95,615 |
def vertex_group_data(mesh, index):
"""
Return vertex group data for each vertex. Vertices not in the group
get a zero value.
:param mesh:
:param index:
"""
group_data = []
for vertex in mesh.vertices:
weight = None
for group in vertex.groups:
if group.group == index:
weight = group.weight
group_data.append(weight or 0.0)
return group_data | 691a6b531add2961012858cffa33f4e7829344a6 | 95,616 |
def _get_tf_batch_norm_parameter_name(bn_counter):
"""Returns the name of the batch_norm layer for the given batch norm index.
Args:
bn_counter: Integer number denoting the batch norm op index.
Returns:
A String denoting the name of the batch norm layer.
"""
if bn_counter != 0:
return 'batch_normalization_%d' % bn_counter
return 'batch_normalization' | 2259cb01188354eaaeab082b6d00071326b9d9fa | 95,618 |
def _pluralize(wordlist):
"""Take a list of words and return a list of their plurals"""
return [i + 's' for i in wordlist] | f3656f4ba4516dd7a8cc3f1a5de2caa0894e9a8e | 95,620 |
def make_edge(u, v):
"""
Create a tuple representing an undirected edge.
@param u, v endpoints of the edge
@return a tuple (u, v) representing the edge. It is guaranteed that u <= v.
"""
if u > v:
u, v = v, u
return (u, v) | 1b2a1c0937be95bd47b0ad516d1012e4c38bce9a | 95,623 |
import json
def prettify_json_message(json_message):
"""
This function is a helper function that is used to prettify a json/dict message obj so that is more readable
for humans when it is logged.
:param json_message (dict): A message that is to be prettified before being logged.
Returns:
- (str): A prettified json message string.
"""
return json.dumps(json_message, indent=2, sort_keys=True) | 42ca0e4e41b323c16c787c5586c580dcd3728949 | 95,627 |
def is_multiline(s):
"""Returns True if the string contains more than one line.
This tends to perform much better than len(s.splitlines()) or s.count('\n')
:param s: string to check
:return: Bool
"""
start = 0
lines = 1
while True:
idx = s.find('\n', start)
if idx == -1:
return lines > 1
else:
lines += 1
if lines > 1:
return True
else:
start = idx + 1 | 1953791a57e257189ad944a363279ed74df593c3 | 95,628 |
def row_split(A, *indices):
"""
returns a list of rows corresponding to the passed indices
"""
if not indices:
indices = list(range(A.shape[0]))
res = [ A[i, :] for i in indices ]
return res | 829b40d2de2bc4e2ae79b14081c765abc4bad08c | 95,629 |
def mol_file_basename(tag):
"""Convert a tag into a molecular data file name.
This could be a tag string, if it is quoted and starts with a 0,
e.g. '032504', or a molecule number (int), e.g. 32504"""
if str(tag)[0] == '0':
return 'c'+tag+'.'
else:
return 'c%06d.' % tag | 7eac5dcf7716eced33bc5b0d3a3c8e35c9773ce5 | 95,631 |
from typing import Dict
import json
def load_params_file(params_file: str) -> Dict:
"""
Load a JSON file of training parameters.
:param params_file: The input file.
:return: A dictionary of training parameters.
"""
with open(params_file, 'r') as fin:
return json.load(fin) | 0538c795c706f6a4edf1c523b643dc349d1e033e | 95,632 |
def mock_object(**params):
"""creates an object using params to set attributes
>>> option = mock_object(verbose=False, index=range(5))
>>> option.verbose
False
>>> option.index
[0, 1, 2, 3, 4]
"""
return type('Mock', (), params)() | 937f41d630300adc3b5ba47aefb9a117261e5583 | 95,637 |
def extract_active_ids(group_status):
"""Extracts all server IDs from a scaling group's status report.
:param dict group_status: The successful result from
``get_scaling_group_state``.
:result: A list of server IDs known to the scaling group.
"""
return [obj['id'] for obj in group_status['group']['active']] | 2247e1aec63058e78b32e4d837e14ef5d978db16 | 95,639 |
def calc_check_digit(number):
"""Calculate the check digits for the number."""
return str(sum(int(n) * (7 - i) for i, n in enumerate(number[:6])) % 10) | e88e0f049599b2393ef55693db136ce1e10186fc | 95,649 |
def filter_black(word_list: list[str], black_list: str):
"""filters out words containing one of the given letters"""
for letter in black_list:
word_list = [word for word in word_list if not letter in word]
return word_list | 86254e087ca8dea39944f2544a771b1c6fa53ad9 | 95,650 |
def proxy_url(value, socks_scheme=None):
"""
Parse a single proxy config value from FindProxyForURL() into a more usable element.
:param str value: Value to parse, e.g.: ``DIRECT``, ``PROXY example.local:8080``, or ``SOCKS example.local:8080``.
:param str socks_scheme: Scheme to assume for SOCKS proxies. ``socks5`` by default.
:returns: Parsed value, e.g.: ``DIRECT``, ``http://example.local:8080``, or ``socks5://example.local:8080``.
:rtype: str
:raises ValueError: If input value is invalid.
"""
if value.upper() == 'DIRECT':
return 'DIRECT'
parts = value.split()
if len(parts) == 2:
keyword, proxy = parts[0].upper(), parts[1]
if keyword == 'HTTPS':
return 'https://' + proxy
if keyword == 'PROXY':
return 'http://' + proxy
if keyword == 'SOCKS':
if not socks_scheme:
socks_scheme = 'socks5'
return '{0}://{1}'.format(socks_scheme, proxy)
raise ValueError("Unrecognized proxy config value '{}'".format(value)) | 38be927f309374f1d17c383aa7059f735b22e3e4 | 95,651 |
def list_results(query_results):
"""
keys :: [key]
= all possible keys. (Not all results will contain all keys).
results :: [dict]
= list of results
"""
keys = query_results['head']['vars']
results = query_results['results']['bindings']
return keys, results | 6a7f927219411ad1e02b9d9c0c8e39f8547d34a5 | 95,654 |
from typing import Optional
import re
def parse_to_n_digit(url: str) -> Optional[str]:
"""Parses a n-hentai url to its digit."""
n_digit_match = re.search('([1-9][0-9]*)', url)
return n_digit_match.group(1) if n_digit_match is not None else None | 09cdc24eb2acabee3a5a58aa4ba0b3d89ac868cd | 95,663 |
def _embed_initial_state(initial_state, embedding, qubits):
"""Embed the states provided by the initial_state parameter used for reverse annealing.
Args:
initial_state (list of lists): Logical initial state as it would be passed to SAPI for reverse annealing.
embedding (dict): The embedding used to embed the initial state. Maps logical indices to chains.
qubits (list): A list of qubits on the target topology.
Returns (list of lists):
The initial_state, embedded according to the provided embedding.
"""
# Initialize by setting all qubits to 1 (these will be overwritten for active qubits).
embedded_state = {q: 1 for q in qubits}
for logical_idx, logical_value in initial_state: # Iterate through the logical qubit, state pairs.
for embedded_idx in embedding[logical_idx]: # For each embedded qubit in the corresponding chain...
embedded_state[embedded_idx] = int(logical_value) # make the embedded state equal to the logical state.
# Convert dictionary to a list of lists.
embedded_state_list_of_lists = [[q_emb, embedded_state[q_emb]] for q_emb in sorted(embedded_state.keys())]
return embedded_state_list_of_lists | 0c5b3722421288674efceba743a55c45d90ac7d4 | 95,666 |
def maxL(*it):
"""
Calculate maximum length of provided items.
Parameters
----------
*it : objects
Items of various lengths. Only lengths
of iterables are returned.
Returns
-------
Length of longest object (int).
"""
m = set()
for i in it:
try:
m.add(len(i))
except TypeError:
pass
if len(m) > 0:
return max(m)
else:
return 1 | da8ef145097f03a02dea113ab0c0164b334cc35a | 95,670 |
def list_flatten(input_list):
"""Flatten an N-dimensional list of lists into a single dimension."""
# Convert atomic elements to single item lists then using a list
# comprehension convert to a flattened equivalent.
formatted_list = []
for element in input_list:
element = element if isinstance(element, list) else [element]
formatted_list.append(element)
output_list = [item for sublist in formatted_list for item in sublist]
# If all elements are atomic return, else recursively call until flattened.
if all(not isinstance(x, list) for x in output_list):
return output_list
else:
return list_flatten(output_list) | 6dba6a59c3814d0a30c944662604ced6100f8b1e | 95,673 |
def index_fixer(x_init, y_init):
"""
:param x_init: input X df
:param y_init: input y
:return: X,y with aligned indices
"""
y0 = y_init.dropna(how='any', axis=0)
x0 = x_init.dropna(how='any', axis=0)
common_indices = x0.index.intersection(y0.index)
y_final = y0.loc[common_indices]
x_final = x0.loc[common_indices]
return x_final, y_final | 96062063e7676fe2c3a75685f493b2315b342745 | 95,675 |
import re
def remove_comments(contents):
"""
Remove the comments from the contents
"""
contents = re.sub(re.compile(r"#.*?\n", re.DOTALL), "",
contents) # remove all occurrences of #COMMENT from line
return contents | 26dce5172c16b68b1ab5a1b64b1dd2f4d49b0aaf | 95,679 |
import yaml
def replace_pcell_params_from_yaml(pcell_script, params_yaml_path):
"""Replace PCell params from YAML file
Parameters
----------
pcell_script : str
Python PCell script.
params_yaml_path : str
Absolute path of design params YAML.
Returns
-------
pcell_script : str
PCell script with tags replaced with params in YAML file
"""
try:
with open(params_yaml_path, 'r') as f:
params_dict = yaml.load(f, Loader=yaml.FullLoader)
except FileNotFoundError:
print("YAML file does not exist... Cannot obtain design params... Passing...")
return pcell_script
except yaml.YAMLError:
print("Error in YAML file... Cannot obtain design params... Passing...")
return pcell_script
design_params = params_dict.get('design-params')
if design_params:
for param_name, param_val in design_params.items():
pcell_script = pcell_script.replace('{%s}' % param_name, str(param_val))
simulation_params = params_dict.get('simulation-params')
if simulation_params:
for param_name, param_val in simulation_params.items():
pcell_script = pcell_script.replace('{%s}' % param_name, str(param_val))
compact_model_params = params_dict.get('compact-model')
if compact_model_params:
for param_name, param_val in compact_model_params.items():
pcell_script = pcell_script.replace('{cm-%s}' % param_name, str(param_val))
return pcell_script | 152e2689c9dacdd68aa09bbb141dfbc79de96539 | 95,684 |
def next_marker_by_offset(collection, limit, marker):
"""
Returns the next marker that is just the current marker offset by the
length of the collection or the limit, whichever is smaller
:param collection: an iterable containing the collection to be paginated
:param limit: the limit on the collection
:marker: the current marker used to obtain this collection
:return: the next marker that would be used to fetch the next collection,
based on the offset from the current marker
"""
return (marker or 0) + limit | ae13d1568dc27ab64d8821ba530f2e294ec47082 | 95,690 |
import random
def evolve(pop, mutate_rate, mu, lambda_):
"""
:param pop: populations of the parents
:param mutate_rate: default rate is 0.02 in function mutate
:param mu: select the size of parents who behave better
:param lambda_: size of the mutated children
:return: a new populations
"""
pop = sorted(pop, key=lambda individual: individual.score)
parents = pop[-mu:]
# generate lambda new children via mutation
offspring = []
for i in range(lambda_ - 3):
parent = random.choice(parents)
offspring.append(parent.mutate(mutate_rate))
best = pop[-1]
second_best = pop[-2]
for i in range(3):
offspring.append(best.mutate(mutate_rate - 0.01))
offspring.append(second_best.mutate(mutate_rate - 0.01))
return parents + offspring | d6c98156b22b6a1c53bc30b2d2a2152e1c6babf2 | 95,695 |
from typing import List
from typing import Dict
def build_url_filter(
excluded_fields: List[str],
included_fields: List[str]
) -> Dict[str, Dict[str, bool]]:
"""Return a URL filter based on included and excluded fields.
If a field appears in both excluded_fields and included_fields, it
is ultimately included.
Args:
excluded_fields (list[str]): names of the fields to explicitly
exclude from the result.
included_fields (list[str]): names of the fields to explicitly
include in the result.
Returns:
dict: the query, as a dict in the format for the API.
"""
excluded_fields = excluded_fields or []
included_fields = included_fields or []
field_flags = {}
ret = {}
# Build a map of fields to bool.
for field_ in excluded_fields:
field_flags[field_] = False
for field_ in included_fields:
# Set the included fields. If a field_ here was also in
# excluded_fields, it is overwritten here.
field_flags[field_] = True
if 'properties' in field_flags:
field_flags['calibration'] = field_flags.pop('properties')
if field_flags:
ret = {'fields': field_flags}
return ret | 611e15cab2e1bd7927e5ea1b441dbf13f34f48bf | 95,696 |
def count_model_params(model):
"""
Returns number of trainable and non-trainable parameters in a model.
:param model: A PyTorch nn.Module object.
:return: A tuple (train_params_count, non_train_params_count)
"""
train_params_count = 0
non_train_params_count = 0
for p in model.parameters():
if p.requires_grad:
train_params_count += p.numel()
else:
non_train_params_count += p.numel()
return train_params_count, non_train_params_count | cd2d6f41d0e65d2c6ff9dd5ee7ce02a554c5edfa | 95,697 |
def pad_pkcs5(data, block_size):
"""
Returns the data padded using PKCS5.
For a block size B and data with N bytes in the last block, PKCS5
pads the data with B-N bytes of the value B-N.
:param data: Data to be padded.
:type data: :class:`str`
:param block_size: Size of the block.
:type block_size: :class:`int`
:return: :class:`str` -- PKCS5 padded string.
"""
pad = block_size - len(data) % block_size
return data + pad * chr(pad) | 5b28ef53a2fba394741a040509b5f82b2d947021 | 95,702 |
import string
import random
def _random_string(length, chars=string.ascii_lowercase, prefix='', suffix=''):
"""
Return a random string with given length, prefix and suffix.
The random characters are chosen from the chars iterable (lowercase
letters by default)
"""
r = ''.join(random.choice(chars) for i in range(length))
return prefix + r + suffix | 9d2f349d5ddcacf6f1b367bac0f8dbbbd9b31fa2 | 95,704 |
def split_context(method, args, kwargs):
""" Extract the context from a pair of positional and keyword arguments.
Return a triple ``context, args, kwargs``.
"""
return kwargs.pop('context', None), args, kwargs | d37c0112eed8d54741b0d1b73dda8187b95752a4 | 95,705 |
def clean_text(text: str) -> str:
"""Clean Text of a String and make it URI-Compatible
Args:
text (str): The Text to String
Returns:
str: Cleaned Text
"""
return (
text.replace(",", "%252C")
.replace("?", "%253F")
.replace("/", "%252F")
.replace("\\", "%255C")
.replace("#", "%2523")
.replace(" ", "%2520")
) | 3da7bd895c8977aa307048912196b9a8099704e8 | 95,711 |
def tupleized(a, b):
"""Python supports a single return value.
None by default.
Multiple return values can be packed into a tuple.
"""
return b, a | c4ec00760aa86380044ef626bb20595345001e9a | 95,715 |
def lookup(dictionary, key, *keys):
"""
Helper to lookup a key or nested keys within a dictionary
"""
if keys:
return lookup(dictionary.get(key, {}), *keys)
return dictionary.get(key) | be7ff76162f751b6b835855438de165d87ead752 | 95,717 |
def get_target(df):
"""Get the target variable as a numpy array."""
return df['target'].values | 1f840830a54b5f5bd81f3a39b969cdd6903fe3e6 | 95,719 |
def link(href):
"""Create a regex for a <a> tag."""
return '<a ([^>]*?)href="{}"'.format(href) | 751eb26be44a65d9b844657cc434894b8d9cff50 | 95,724 |
from typing import Union
import json
def dump_json(path: str, data: Union[dict, list], indent: int = 4) -> str:
"""
Shortcut for dumping data to a json file.
Parameters
----------
`path` : str
The path to the json file.
`data` : Union[dict, list]
The json serializable object to dump.
`indent` int
Indentation. Defaults to 4.
Returns
-------
`str` :
The path where the json was dumped.
"""
with open(path, "w+") as f:
json.dump(data, f, indent=indent)
return path | 00f173e2414ac2fb63b4178717af931e1c6b5439 | 95,726 |
def tesla_to_gauss(tesla):
"""Converts tesla to gauss"""
return tesla*1e4 | c43aa035a2cbf1625f1ea3c71b3068d6356ff847 | 95,734 |
def vector_to_probs(vec, counts):
""" Return dict of probabilities.
Parameters:
vec (ndarray): 1d vector of probabilites.
counts (dict): Dict of counts
Returns:
dict: dict of probabilities
"""
out_counts = {}
idx = 0
for key in counts:
out_counts[key] = vec[idx]
idx += 1
return out_counts | f1ab089c80b5cc4445b6038f02809bd6afcc0146 | 95,738 |
def load_palette(filename):
"""
Loads a 64 value palette from a .pal file (binary, 3 bytes per color).
Palettes can be created from bisqwit's tool here: https://bisqwit.iki.fi/utils/nespalette.php
:return: A list of rgb tuples (elements in range 0-255) corresponding to the palette colors
"""
with open(filename, "rb") as f:
pal = f.read()
palette = []
for i in range(64):
palette.append( (pal[i * 3], pal[i * 3 + 1], pal[i * 3 + 2]) )
return palette | dcd450a54d617cf7b5759b6b5d35ec65faccde40 | 95,739 |
def get_engine(self, name):
"""
Retrieves the (template) engine for the requested name,
this value is going to be retrieved from the currently
set map of engines in from the controller.
:type name: String
:param name: The name of the engine that is meant to
be retrieved, this name should be compliant with the
short name of the associated plugin.
:rtype: Plugin
:return: The template engine plugin for the requested
short name, may be latter used for the rendering of
template based files (as defined by each specification).
"""
return self.engines.get(name, None) | 6025e9cfd4d88ad972f176f437ae9747ad2e839d | 95,741 |
def get_year(date):
"""Get year of DATE string"""
return date[0:4] | 547b6cab1610e10772a98ec4170c47e381ef879f | 95,744 |
import tempfile
import json
def convert_txt_to_tmp_json_file(txt_inference: str) -> str:
"""Converts a txt with inference content to a JSON file with schema
expected by read_examples. Returns a filename to the temp JSON file."""
tmp_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
json_data = [{"doc_id": 0, "doc_text": txt_inference}]
tmp_file.write(json.dumps(json_data))
tmp_file.close()
return tmp_file.name | b12b51ebb4b7f0d33e87e0738be1a1b557240fcc | 95,760 |
def _execute(executable_resource):
"""
Invoke a get/list Google Classroom SDK function,
retrying if there are errors.
Parameters
----------
executable_resource: function
is the get/list Google Classroom SDK function to call
Returns
-------
object
a Google Classroom SDK response object
Raises
------
IOError
if there is an IOError after retrying
RequestException
if there is a RequestException after retrying
"""
assert hasattr(executable_resource, "execute")
return executable_resource.execute() | 84398e56f04961977d802a17ffff00cc8f8932fc | 95,764 |
from typing import List
def tokenize(expr: str) -> List[str]:
"""Breaks expression `expr` into a list of tokens"""
return expr.split(" ") | 3ea07a9d99937297bb78d15da509d1c7f96072b0 | 95,766 |
from typing import TextIO
def read_sequence(input_io: TextIO) -> str:
""" Read sequence data. """
return "".join([s.strip() for s in input_io.readlines()]) | 0da8aca479fd59768f8c8a2e953b24797a3db8e6 | 95,773 |
def get_number_of_epochs(opt, loader_train):
"""define the number of training epochs based on the lenght of the dataset and the number of steps"""
epochs = opt['epochs']
iter_per_epoch = -(-len(loader_train.dataset) // opt['bs'])
if epochs < 0:
epochs = 1 + opt['nsteps'] // iter_per_epoch
return epochs, iter_per_epoch | a73904933e151ad03bb7ef3f9a23f0627d0ca086 | 95,777 |
import re
def parse_dependencies_from_ldd_output(content):
"""Takes the output of `ldd` as a string or list of lines and parses the dependencies."""
if type(content) == str:
content = content.split('\n')
dependencies = []
for line in content:
# This first one is a special case of invoking the linker as `ldd`.
if re.search('^\s*(/.*?)\s*=>\s*ldd\s*\(', line):
# We'll exclude this because it's the hardcoded INTERP path, and it would be
# impossible to get the full path from this command output.
continue
match = re.search('=>\s*(/.*?)\s*\(', line)
match = match or re.search('\s*(/.*?)\s*\(', line)
if match:
dependencies.append(match.group(1))
return dependencies | c423ba158f1bba5b6100f22428fa6cf513ef2dd2 | 95,778 |
import logging
def run_initializer(initializer, initargs):
"""Runs the Pool initializer dealing with errors."""
try:
initializer(*initargs)
return True
except Exception as error:
logging.exception(error)
return False | f07f99c650f90531c8f0f9976db5a8e40150c8e7 | 95,779 |
def convert_to_oneline(multiline: str) -> str:
"""Converts a multiline Sleep command to a single line.
Args:
multiline (str): The multiline Sleep command.
Returns:
str: A single-lined version of the same Sleep command.
"""
# Format wrapper so it sends as one-line
oneline = multiline.replace('\n', '')
# Replace 4 spaces with nothing (if tabbed but using spaces as tabs)
nospaces = oneline.replace(' ', '')
# Replace tabs with nothing
notabs = nospaces.replace('\t', '')
return notabs | edf833ff0920673bbab1cff2122f66b22bac08cc | 95,782 |
def list_from_file(filename, encoding='utf-8'):
"""Load a text file and parse the content as a list of strings. The
trailing "\\r" and "\\n" of each line will be removed.
Note:
This will be replaced by mmcv's version after it supports encoding.
Args:
filename (str): Filename.
encoding (str): Encoding used to open the file. Default utf-8.
Returns:
list[str]: A list of strings.
"""
item_list = []
with open(filename, 'r', encoding=encoding) as f:
for line in f:
item_list.append(line.rstrip('\n\r'))
return item_list | 9f61c00107d715e01e0f3f0821e222eb5e641e51 | 95,784 |
def constrain(x, x_max, x_min=None):
"""
Constrans a given number between a min and max value.
Parameters
----------
x : scalar
The number to constrain.
x_max : scalar
The upper bound for the constrain.
x_min : scalar, default=None
The lower bound for the constrain. If None, defaults to -`x_max`.
Returns
-------
scalar
The constrained number.
"""
if x_min is None:
x_min = -x_max
return min(max(x, x_min), x_max) | 0876ce02d4a61f717824613c0543d83228ab6902 | 95,787 |
import json
def ensure_serializable(data):
"""
This ensures data is serializable.
Args:
data: Any data to serialize.
Returns:
data: The serializable data.
"""
try:
json.dumps(data)
return data
except:
return str(data) | 83703458fd95154ff12addc0e93d7cf3f031a827 | 95,791 |
import math
def logistic_function(beta, h):
"""logistic function for logistic regression"""
return 1/(1 + math.exp(-(beta[0] + beta[1]*h))) | 0735c4b4eb3a1248bd9b8b28220d2ea68904637c | 95,795 |
def _check_if(cond, fn, value, name):
"""Performs a check if condition is true."""
return fn(value, name) if cond else value | 73adef5ad56934f3bf9c77bd75ac3c3a1dee034b | 95,796 |
import toml
def environment(file, transect):
"""
Read environmental metadata, temperature and salinity, from the file
"transects.toml".
Args:
file (str): path to transects.toml.
transect (str): Name of transect.
Returns:
float: Temperature (degrees Celsius)
float: Salinity (PSU)
"""
data = toml.load(file)
try:
data = [x for x in data['transect'] if x['name']==transect][0]
except IndexError:
raise Exception('Transect name doesn\'t exist')
temperature = data['temperature']
salinity = data['salinity']
return temperature, salinity | 0623b41a7c163e7fe2a10cd3c5b2ca7877263c5e | 95,797 |
def get_points(view):
"""Get the list of cursor positions."""
return [region.b for region in view.sel()] | 9a59f098eb85d574554434df8542473f7459575d | 95,799 |
def determine_elevations(grid_elevation_file, elevations, height):
"""Determine a list of adjusted elevations to use
Args:
grid_elevation_file <file>: File where 0 elevations are to be written
elevations [<float>]: Initial list of elevations
height <float>: Height for the bottom pressure layer
Returns
[<float>]: List of elevations to use
"""
# Copy the elevations to iterate over
new_elevations = [x for x in elevations]
# Adjust the first element to be the geometric height unless it
# is negative, then use 0.0
if height < 0.0:
new_elevations[0] = 0.0
else:
new_elevations[0] = height
# Write the first elevation to the elevation file. Write it with more
# precision for science calculations and less to exactly match the
# directory name
grid_elevation_file.write('{0:05.8f} {0:05.3f}\n'.format(new_elevations[0]))
return new_elevations | 4d4446739ffbeda0724aa995c808ad2f54b44be4 | 95,800 |
def energyNormFunc(v, mean, stddev):
"""Energy normalizes the given value.
For normally distributed data, 95% of it should lie within 2 stddev's of the mean.
So we want to get 95% of the data between -127 and +127, and then we'll shift it
up by 127 to get it from 0 to 255"""
factor = 127.5/max(2*stddev, 0.01)
ret = (v-mean)*factor + 127.5
return ret | b00de9326b940fec2be9928fcb132d46f4f597a2 | 95,802 |
def all_exist(paths):
"""Return True if all paths in list exist."""
return all([path.exists() for path in paths]) | e27d911e20b78e64320f573ce502453516833b5b | 95,809 |
def get_current_light_reading(response):
"""Get light value from response."""
current_light_reading = response.get('light')
return current_light_reading | b1e1cd9a5acf5adef0beab8785a2dba6050e1f4b | 95,810 |
import csv
from collections import namedtuple
def new_objects_from_csv(csv_file: str):
"""For each row in the .csv, returns a list of custom objects with properties/attributes corresponding to the header names of each column.
Example:
>>> results = new_objects_from_csv('brandnew.csv')\n
>>> pprint(results)\n
[CustObj(NAME='bob', AGE='21', JOB=' janitor', DEPARTMENT=' sanitization team', PAY='2'),\n
CustObj(NAME='alice', AGE='22', JOB=' secretary', DEPARTMENT=' admin team', PAY='3'),\n
CustObj(NAME='chuck', AGE='23', JOB=' plumber', DEPARTMENT=' construction team', PAY='4')]\n
Reference:
# I retrieved the full body of the code below from here; this article has some great info
https://realpython.com/python-namedtuple/\n
Args:
csv_file (str): Reference an existing .csv.
Returns:
object: Returns a custom object.
"""
results_list = []
with open(csv_file, 'r') as f:
reader = csv.reader(f)
CustObj = namedtuple('CustObj', next(reader), rename=True)
for row in reader:
myobj = CustObj(*row)
results_list.append(myobj)
return results_list | 92e55fa34783f38ca787cb19842db4d00cc3f76f | 95,812 |
def find_datafields(obj):
"""Return pairs of (`number`, `title`) for all available data fields in
`obj`.
"""
token = '/data/title'
channels = [int(k[1:-len(token)]) for k, _ in obj.items()
if k.endswith(token)]
titles = [obj['/{}/data/title'.format(ch)] for ch in channels]
return zip(channels, titles) | 164c0b262b78f15a633c13b2c1a625bb54904b86 | 95,814 |
def recurrent_name(recurrent):
"""
Gets a human readable name for the recurrent.
:param recurrent: recurent instance
:return: name
"""
return ' '.join(recurrent.__name__.split('_')) | 1e14635fcb164b374d227dadc4a293defc5b101a | 95,815 |
def reactionStrToInt(reactionStr):
""" Groups GENIE reaction string into following categories
Reactions can be divided into two basic categories:
1) CC, 2) NC
These can then be further divided into subcategories of
interaction modes
1) QES, 2) RES, 3)DIS, 4) COH
And the target nucleon can be either:
1) n, 2) p, 3) other (e.g. C12 in case of COH)
We use a three digit integer to represent all the different
possibilities, with the order being:
first digit = target_nucleon
second digit = CC_NC
third digit = interaction_mode
In the case of a string that doesn't fit any of the
possibilities, a "0" is returned
"""
reactionInt = 0
if 'QES' in reactionStr:
reactionInt += 1
elif 'RES' in reactionStr:
reactionInt += 2
elif 'DIS' in reactionStr:
reactionInt += 3
elif 'COH' in reactionStr:
reactionInt += 4
if 'CC' in reactionStr:
reactionInt += 10
elif 'NC' in reactionStr:
reactionInt += 20
if '2112' in reactionStr:
reactionInt += 100
elif '2212' in reactionStr:
reactionInt += 200
else:
reactionInt += 300
return reactionInt | c64adc712bc996923ad4991e51d64abcb4b2e69a | 95,820 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.