content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def get_latest_instance(model):
""" Returns the latest row in a model """
return model.objects.all().order_by('id').last()
|
0b8ba9e09637c25516b1c02d926835772664fe3d
| 464,164
|
def iter_range(items):
""" returns a zero based range list for iteration in templates """
return range(0,items)
|
15a22a26acdd8f0f2d4559d53f87837b634db5c2
| 609,317
|
def get_full_text(status):
"""Full text of a tweet."""
if hasattr(status, "retweeted_status"):
try:
return status.retweeted_status.extended_tweet["full_text"]
except AttributeError:
return status.retweeted_status.text
try:
return status.extended_tweet["full_text"]
except AttributeError:
return status.text
return None
|
a8d750f0319fec192cba12cfb3b74b16ec62b749
| 535,792
|
def get_start_middle_end_point(line):
"""
Ger three point on line
Transform it to DB.Curve
:param line: Line
:type line: DB.Line
:return: Start, middle and end point of line
:rtype: [DB.XYZ, DB.XYZ, DB.XYZ]
"""
curve = line.GeometryCurve
start = curve.GetEndPoint(0)
end = curve.GetEndPoint(1)
middle = (start + end)/2
return start, middle, end
|
b6b4f578cd8ce44663a3c7ebb0c6b7dff3ab18d8
| 668,754
|
def convert_ms(ms):
"""
Converts milliseconds into h:m:s
:param ms: int
:return: str
"""
seconds = (ms / 1000) % 60
seconds = int(seconds)
if seconds < 10:
seconds = "0" + str(seconds)
else:
seconds = str(seconds)
minutes = (ms / (1000 * 60)) % 60
minutes = int(minutes)
if minutes < 10:
minutes = "0" + str(minutes)
else:
minutes = str(minutes)
hours = (ms / (1000 * 60 * 60)) % 24
hours = int(hours)
if hours < 10:
hours = "0" + str(hours)
else:
hours = str(hours)
return hours + ":" + minutes + ":" + seconds
|
1b39e0ac151fbdf6a3e7a6a15e83c36ff40e0810
| 42,011
|
def timedelta_to_seconds(td):
"""
Converts a timedelta to total seconds.
(This is built-in in Python 2.7)
"""
# we ignore microseconds for this
if not td:
return None
return td.seconds + td.days * 24 * 3600
|
ef4ebd88581d8a2a1f64b9f940afbe22da8f55bc
| 699,003
|
import math
def is_triangle(n):
"""Return True iff n is a triangle number."""
x = (-1 + math.sqrt(1 + 8 * n)) / 2
return x == int(x)
|
ad81cf7e6ad5e8eb7a4a85ecd6a6c8d5f1e8201e
| 533,944
|
import itertools
def co_oridinates(l):
"""
# Returns a list of co-ordinates for slicing based on the value l. Used for resizing
grids which contain 3 x 3 geomtry or 9 square shapes within the grid in total
Parameters:
l (int): Size of each square in grid to be re-sized
Returns:
list: List of indexes for the starting point of each square with in the shape
"""
# Get the step size for each square ie the length of each square shape within the grid
step =int(l/3)
lst = []
# For each square loop through and get the starting row and col index for that square
for i in range(0, l, step):
lst.append(list([i, i+step]))
# Get all combinations of lst with itself for all row and col slice combinations (9 in total)
return list(itertools.product(lst, lst))
|
162f59dcce607f0286d7611a090500af9af31543
| 150,052
|
def retirement_plan (estimated_savings, state_costs):
"""filter out with states and their cost of living comfortably.
args:
estimated_savings(float): The customer's expected amount of savings.
state_cost: The list of available states of where the users wants to live
QUESTIONS TO ANSWER:
"WHERE DO YOU WANT TO LIVE/ HOW MUCH DO YOU PLAN OF SAVING."
Return:
a list of available state depending on how much the customer has in savings
"""
state_approval_list=[]
for state in state_costs:
if estimated_savings >= float(state[2]):
state_approval_list.append(state)
return state_approval_list
|
b1cf794f181d7e4d77f225958e9c6c990ea96d60
| 168,475
|
def _fake_is_smpl(*args):
"""Assume the Shadow Image pair status is SMPL."""
return True
|
4f059a0db244e51a17e4ecad5c3165e2cb458afb
| 286,158
|
from typing import Dict
from typing import Any
from typing import List
def validate_required_keys_for_update_asset(valid_json: Dict[str, Any], required_keys: List[str],
required_keys_for_bulk_update: List[str]) -> bool:
"""
Check if the required keys for updating an asset or required keys for bulk updating assets are present or not
:param valid_json: The valid input asset JSON
:param required_keys: The required keys for updating an asset
:param required_keys_for_bulk_update: The required keys for bulk updating assets
:return: True if the required values are present else false
"""
return (all(key in valid_json.keys() for key in required_keys) or all(key in valid_json.keys()
for key in required_keys_for_bulk_update))
|
0c6a83fc606c235911e7937e3256707aaf7d8f5a
| 360,419
|
from typing import Optional
from typing import Tuple
import re
def _guess_show_num(episode_name: str) -> Optional[Tuple[int, int]]:
"""
Guess the show number from the episode name.
Supports:
Exx-Eyy
Exx
YYYYY show_num ZZZZZZZZ ...
Returns:
(show_num_start, show_num_end) where show_num_end is equal show_num_start if only one show number is present.
if no show number is present, returns None.
"""
one_episode_regexp = re.compile(r"^E(?P<show_num_start>\d+)$")
two_episode_regexp = re.compile(r"^E(?P<show_num_start>\d+)-E(?P<show_num_end>\d+)$")
match = two_episode_regexp.match(episode_name)
if match:
return (
int(match.group("show_num_start")),
int(match.group("show_num_end")),
)
match = one_episode_regexp.match(episode_name)
if match:
return (int(match.group("show_num_start")), int(match.group("show_num_start")))
try:
show_num = int(episode_name.split(" ")[1])
return (show_num, show_num)
except (IndexError, ValueError):
return None
|
71708a616a57e852a1bb9c79e521e1432df5b5f5
| 518,070
|
def map_dict(d, fn):
"""Recursively does operations to all values in a nested dictionary and
returns a new dictionary of the result.
Arguments: \n
d = dictionary to do operations on \n
fn = function to apply\n"""
o = {}
for key, value in d.items():
if isinstance(value, dict):
o[key] = map_dict(value, fn)
else:
o[key] = fn(value)
return o
|
caceb0cfb7e01a83c9c0dbc1cb21bf7e3e8b2002
| 138,543
|
def parse_input(fname: str):
"""Read the input file and return the parsed data."""
data = []
with open(fname, "rt") as inf:
for line in inf:
(op, arg) = line.strip().split()
data.append((op, int(arg)))
return data
|
b408dbddfd3f46a596d1b50bc7f72aab70a54c95
| 473,335
|
import csv
import itertools
def setup_temperature_csv(csv_location):
"""Return a list of CSV values containing day & temperature data. The index
of the list represents the day of the year, and the value at that index represents
the temperature at that day.
"""
with open(csv_location) as csvfile:
reader = csv.DictReader(csvfile)
rows = [list(itertools.repeat(float(row['Celsius']), 24)) for row in reader]
return rows
|
0a06c77fb7a1ce968384744b0069cda7af22b6ca
| 609,294
|
def is_guc_configurable(cursor, guc):
"""Check if guc is a preset parameter
https://www.postgresql.org/docs/current/static/runtime-config-preset.html
"""
cursor.execute("""
SELECT EXISTS
(SELECT 1
FROM pg_settings
WHERE context <> 'internal'
AND name = %s);
""",
(guc,)
)
return cursor.fetchone()[0]
|
ca01a26c7cd276eabc44414820cb8c10dd837a18
| 642,127
|
def formatdt(date, include_time=True):
"""Formats a date to ISO-8601 basic format, to minute accuracy with no
timezone (or, if include_time is False, omit the time)."""
if include_time:
return date.strftime("%Y-%m-%dT%H:%M")
else:
return date.strftime("%Y-%m-%d")
|
91cb0acaf6a20a68c00557bacbd9327db624b927
| 667,338
|
def _generate_magic_packet(mac_address):
"""Generate WoL magic packet.
A WoL 'magic packet' payload consists of six FF (255 decimal) bytes
followed by sixteen repetitions of the target's 6-byte MAC address.
Parameters
----------
mac_address : str
12-digit hexadecimal MAC address without separators.
Returns
-------
bytes
102-byte magic packet payload.
"""
return bytes.fromhex("FF" * 6 + mac_address * 16)
|
3bf41766d18ecb12dd63515e05d7575a8aadfeca
| 303,635
|
def LB_commutation_grad(C, ev_sqdiff):
"""
Compute the gradient of the LB commutativity constraint
Parameters
---------------------
C : (K2,K1) Functional map
ev_sqdiff : (K2,K1) [normalized] matrix of squared eigenvalue differences
Output
---------------------
gradient : (K2,K1) gradient of the LB commutativity squared norm
"""
return C * ev_sqdiff
|
f19dd52d54ca3fb5c8c0f0327befd3f2d771674c
| 528,346
|
def is_numpy_file(filepath: str) -> bool:
"""
Helper function to check if the file extension is for npy
Parameters
---
filepath (str)
File path
Result
---
bool
Returns True if file path ends with the npy extension.
"""
return filepath.endswith(".npy")
|
e2cf8be27036abeba70bce3134fb306cd8084218
| 107,558
|
def lettergrade(value):
"""
Maps grade point average to letter grade.
"""
if value > 3.85:
return 'A'
elif value > 3.5:
return 'A-'
elif value > 3.15:
return 'B+'
elif value > 2.85:
return 'B'
elif value > 2.5:
return 'B-'
elif value > 2.15:
return 'C+'
elif value > 1.85:
return 'C'
elif value > 1.5:
return 'C-'
elif value > 1.15:
return 'D+'
elif value > 0.85:
return 'D'
elif value > 0.5:
return 'D-'
else:
return 'F'
|
149c6e29a54b199747cf64fe58f28561dda005f3
| 110,503
|
import math
def none_or_nan(v):
"""Returns True if the value is None or float('nan')"""
return v is None or (isinstance(v, float) and math.isnan(v))
|
21449517031fabe1ab3284ec1d38e11a1a8d001c
| 220,569
|
def typeAndClickFileName(language: str) -> str:
"""
The name of the file where the Type and
Click web application should be saved
"""
return language + "ClickTypist.html"
|
cb3eb5a2f58ba7e5ed7a7f6ce67f434c083b3475
| 467,140
|
import jinja2
def ps_filter(val):
"""Jinja2 filter function 'ps' escapes for use in a PowerShell commandline"""
if isinstance(val, jinja2.Undefined):
return "[undefined]"
escaped = []
for char in str(val):
if char in "`$#'\"":
char = "`" + char
elif char == '\0':
char = "`0"
elif char == '\a':
char = "`a"
elif char == '\b':
char = "`b"
elif char == '\f':
char = "`f"
elif char == '\n':
char = "`n"
elif char == '\r':
char = "`r"
elif char == '\t':
char = "`t"
elif char == '\v':
char = "`v"
escaped.append(char)
return ''.join(escaped)
|
495cd87bfc930089aaa5f4f9b282d20b4883bfb5
| 28,257
|
def _gf2mulxmod(a,m):
"""
Computes ``a * x mod m``.
*NOTE*: Does *not* check whether `a` is smaller in degree than `m`.
Parameters
----------
a, m : integer
Polynomial coefficient bit vectors.
Polynomial `a` should be smaller degree than `m`.
Returns
-------
c : integer
Polynomial coefficient bit vector of ``c = a * x mod m``.
"""
c = a << 1
c2 = c^m
if c2 < c:
c = c2
return c
|
b5065747dd5ae934cd86de6712844865df2bb0ff
| 476,491
|
def onlyinA(listA, listB):
""" return element that's only in list A but not in B"""
setA = set(listA)
setB = set(listB)
return list(setA.difference(setB))
|
2c3dd3b995430ab172706e49c306c97d3e1cd0c8
| 103,886
|
def get_neighbors(N, word, k):
"""
Args:
N (dict[dict]): A dict of dict, storing the neighbours of
each word with IDB values.
word (str): The target word whose neighbours are to be
fecthed.
k (int): top `k` neighbours.
Returns:
A list of top `k` neighbours for `word`.
"""
return list(N[word].keys())[1:k+1]
|
69f04338d2b917041912bdefa76dce5a046c6c44
| 270,412
|
def optimal_row_and_column_count_for_subplots(n):
"""Returns the optimal number of rows and columns for a given number of subplots
:param int n: number of subplots required
:return: n_cols, n_rows
"""
n_cols = 1
n_rows = 1
increase_next = 'cols'
while n_rows * n_cols < n:
if increase_next == 'cols':
n_cols += 1
increase_next = 'rows'
elif increase_next == 'rows':
n_rows += 1
increase_next = 'cols'
return n_cols, n_rows
|
46bb88eddc5900a4896dc5895b6d61a0beea76e3
| 313,935
|
def calc_correlation(data, refparam, lags=[0, 10], exclude=None):
"""
Calculates the correlations between parameters and a reference
parameter given as columns in a DataFrame.
Parameters
----------
data : pandas.DataFrame
DataFrame containing data in columns.
refparam : str
Reference parameter.
lags : list of int, optional
Time periods to shift parameter against refparam,
defaults to [0, 10].
exclude : string, optional
Variable which should not be used for calculation of the correlation.
Returns
-------
correlation : dict
Dictionary containing correlations and max time lags.
"""
correlation = {}
for param in data.keys():
if exclude is not None and exclude in param:
continue
correlation[param] = {'corr': None, 'lag': None}
for i in range(lags[0], lags[1]):
i += abs(lags[0]) + 1
corr = data[param].corr(data[refparam].shift(periods=i),
method='pearson')
if correlation[param]['corr'] is None:
correlation[param]['corr'] = abs(corr)
correlation[param]['lag'] = i
if abs(corr) > abs(correlation[param]['corr']):
correlation[param]['corr'] = abs(corr)
correlation[param]['lag'] = i
if abs(corr) == abs(correlation[param]['corr']):
if abs(i) < abs(correlation[param]['lag']):
correlation[param]['corr'] = abs(corr)
correlation[param]['lag'] = i
return correlation
|
5dfdd5023320017573e600683149944bf72de037
| 478,820
|
import random
import math
def number_of_bytes_to_modify(buf_len, fuzz_factor):
"""Calculate number of bytes to modify.
:param buf_len: len of data buffer to fuzz.
:param fuzz_factor: degree of fuzzing.
:return: number of bytes to change.
"""
return random.randrange(math.ceil((float(buf_len) / fuzz_factor))) + 1
|
28b3c08cbbf7c78bf5c75908c60467fc4a6b5721
| 465,117
|
import ntpath
def path_leaf(path):
"""Returns the leaf of a given path.
Args:
path (string): path that is going to be processed.
Returns:
string: path leaf.
"""
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
|
4fa581178cf7018148bb9b466766b5b80732a997
| 48,591
|
import torch
def get_pytorch_device() -> torch.device:
"""Checks if a CUDA enabled GPU is available, and returns the
approriate device, either CPU or GPU.
Returns
-------
device : torch.device
"""
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda:0")
return device
|
0109f3146c96bec08bbe6fa62e5a8bc5638e461c
| 703,645
|
def update_parameters(parameters, grads, learning_rate):
"""
Update the parameters' values using gradient descent rule.
Arguments
---------
parameters : dict
contains all the weight matrices and bias vectors for all layers.
grads : dict
stores all gradients (output of L_model_backward).
Returns
-------
parameters : dict
updated parameters.
"""
L = len(parameters) // 2
for l in range(1, L + 1):
parameters["W" + str(l)] = parameters[
"W" + str(l)] - learning_rate * grads["dW" + str(l)]
parameters["b" + str(l)] = parameters[
"b" + str(l)] - learning_rate * grads["db" + str(l)]
return parameters
|
4e0a93607ab975eebebc44221ec8d8acc84b8ccf
| 624,209
|
def backoff_constant(n):
"""
backoff_constant(n) -> float
Constant backoff implementation. This returns the constant 1.
See ReconnectingWebSocket for details.
"""
return 1
|
345ab3c2c0147a4f102e9518cb4022b55ef9cb7c
| 65,140
|
import math
def poslen(pos):
""" Returns the vector length of a vec2 used by SelCoord
"""
fpos = [float(pos[0]), float(pos[1])]
plen = math.sqrt(fpos[0]*fpos[0] + fpos[1]*fpos[1])
return plen
|
2f5e3a96ae95f2626a85e8c7b552fb7baf30a0ba
| 150,483
|
def average(v, state):
""" This function is used to output a stream
where the n-th value on the output stream is
the average of the first n values of the
input stream.
The state of the input stream is the pair (n,cum).
When used to create a stream, n is the number
of values received on the input stream, and cum
is the sum of all the values in the input stream
received so far.
v is the next value received in the input stream.
"""
n, cum = state
n += 1
cum += v
mean = cum/float(n)
state = (n, cum)
return (mean, state)
|
5e25313ba1724f29d20923da2f42280754749f89
| 349,736
|
import fnmatch
def match_filter(rfilter, title):
"""
Match title against list of rfilters
"""
if not rfilter:
return True
for f in rfilter:
if fnmatch.fnmatch(title, f):
return True
return False
|
575333daf93f6a7211375b52ca9945a0eca7b30d
| 63,542
|
def roundup(x):
"""
round up to a multiple of 100
:param x: The number to be rounded up.
:type x: float
:return: `x` rounded up to a multiple of 100.
:rtype: float
"""
return x if x % 100 == 0 else x + 100 - x % 100
|
556b94db3a8b81ab3e94e2c9665914002013b19b
| 172,615
|
def radiative_heat_transfer_coefficient_simplified_winter(effective_emissivity: float) -> float:
"""
放射熱伝達率[W/(m2・K)]の計算(簡易計算、冬期条件)
:param effective_emissivity: 有効放射率, -
:return: 放射熱伝達率, W/(m2・K)
"""
return 5.054 * effective_emissivity
|
69421b30d4186b3c448e53cfd48734aa6a568b26
| 507,500
|
import random
def random_state(primes: dict, subspace={}):
"""
Generates a random state of the transition system defined by *Primes*.
If *Subspace* is given then the state will be drawn from that subspace.
**arguments**:
* *Primes*: prime implicants
* *Subspace* (str/dict): a subspace
**returns**:
* *State* (dict): random state inside *Subspace*
**example**::
>>> random_state(primes)
{'v1':1, 'v2':1, 'v3':1}
>>> random_state(primes, {"v1":0})
{'v1':0, 'v2':1, 'v3':0}
>>> random_state(primes, "0--")
{'v1':0, 'v2':0, 'v3':1}
"""
if type(subspace) == str:
assert (len(subspace) == len(primes))
x = {}
for name, value in zip(sorted(primes), subspace):
if value.isdigit():
x[name] = int(value)
subspace = x
else:
assert (set(subspace).issubset(set(primes)))
items = list(subspace.items()) + [(x, random.choice([0, 1])) for x in primes if x not in subspace]
return dict(items)
|
6ea31b78d79de9af694bf54918ed9b50640ae4c4
| 190,430
|
def parse_vars(variables):
"""Parse variables passed as args to list of tuples
If variable has required value then
it'll be appended in format (key, value).
If variable has no variable (it should just exist)
then it'll be appended as (key,)
:param variables: string of variables in args format 'key:value,key2:value2,key3'
:type variables: str
:return: List of parsed variables
:rtype: list
"""
list_of_items = variables.split(',') if variables else []
parsed_variables = []
for variable in list_of_items:
if ':' in variable:
parsed_variables.append(tuple(variable.split(':')))
else:
parsed_variables.append((variable, None))
return parsed_variables
|
ed3944a85f5c85da15accbba855dd3e81489a0c1
| 502,533
|
import mimetypes
def _is_image(url):
"""
Checks whether a normal url contains an image.
Params:
(str) url: the url for the image
Returns:
(bool): True if url contains an image, False otherwise.
"""
mimetype, encoding = mimetypes.guess_type(url)
return (mimetype and mimetype.startswith('image'))
|
32110d546abc67a067e80d499c6fd567e329bba1
| 566,614
|
def time2levels(tstamp):
"""Given a :class:`datetime.datetime` object,
return a list of directory levels (as strings).
For example, given "2013-09-08 13:01:44",
return ['2013', '09', '08', '13', '01']
"""
return [tstamp.strftime(xx) for xx in ('%Y', '%m', '%d', '%H', '%M')]
|
d4e83672b9dd613f16f1b34a4103584413621e09
| 627,405
|
def title(text):
"""Convert to title case."""
return text.title()
|
4e66499cb607f5656f2463b28ed84ba40cb6039a
| 40,069
|
def CreateRegionalEndpoint(region, url):
"""Returns a new endpoint string with the defined `region` prefixed to the netlocation."""
url_parts = url.split('://')
url_scheme = url_parts[0]
return url_scheme + '://' + region + '-' + url_parts[1]
|
c237d9a328ffb749b438caf4364537f5a52a3a78
| 194,048
|
def EscapeSConsVariableExpansion(s):
"""SCons has its own variable expansion syntax using $. We must escape it for
strings to be interpreted literally. For some reason this requires four
dollar signs, not two, even without the shell involved."""
return s.replace('$', '$$$$')
|
a053ed1305638aeb249eb334a1c2f77da43bd345
| 469,227
|
def calculate_roc_points(instances):
"""From a sorted list of instances, calculate the points that draw the ROC curve."""
# Calculate the number of positives and negatives (the real ones).
P = N = 0
for label, score in instances:
if label == 'p':
P += 1
else:
N += 1
# Calculate each point.
TP = FP = 0
points = []
for label, score in instances:
if label == 'p':
TP += 1
else:
FP +=1
point = (FP/N, TP/P)
points.append(point)
return points
|
015b3f65d53d17db027814563351650192faea1f
| 402,837
|
from typing import Tuple
from typing import Type
from typing import Any
def get_types_filter(desired_types: Tuple[Type[Any], ...]):
"""Returns a value filter that only keeps values with the given types."""
return lambda arg_value: arg_value.type in desired_types
|
a245199b6d7bfa7203828b45d32cd62e1900e811
| 19,947
|
import re
def normalize_reference(s):
"""Normalize reference label.
Collapse internal whitespace to single space, remove
leading/trailing whitespace, case fold.
"""
return re.sub(r'\s+', ' ', s.strip()).upper()
|
e90ec91cb0d50da8e871a0e5026952367143f803
| 622,060
|
import math
def compute_bearing(
start_lat: float, start_lon: float, end_lat: float, end_lon: float
) -> float:
"""
Get the compass bearing from start to end.
Formula from
http://www.movable-type.co.uk/scripts/latlong.html
"""
# make sure everything is in radians
start_lat = math.radians(start_lat)
start_lon = math.radians(start_lon)
end_lat = math.radians(end_lat)
end_lon = math.radians(end_lon)
dLong = end_lon - start_lon
if abs(dLong) > math.pi:
if dLong > 0.0:
dLong = -(2.0 * math.pi - dLong)
else:
dLong = 2.0 * math.pi + dLong
y = math.sin(dLong) * math.cos(end_lat)
x = math.cos(start_lat) * math.sin(end_lat) - math.sin(start_lat) * math.cos(
end_lat
) * math.cos(dLong)
bearing = (math.degrees(math.atan2(y, x)) + 360.0) % 360.0
return bearing
|
d27feb8d7766abb3c3b3a7e168b277c78cc09d2d
| 291,558
|
import math
def _s2_uv_to_st(component: float) -> float:
"""
Convert S2 UV to ST.
This is done using the quadratic projection that is used by default for S2. The C++ and Java S2
libraries use a different definition of the ST cell-space, but the end result in IJ is the same.
The below uses the C++ ST definition.
See s2geometry/blob/c59d0ca01ae3976db7f8abdc83fcc871a3a95186/src/s2/s2coords.h#L317-L320
"""
if component >= 0.0:
return 0.5 * math.sqrt(1.0 + 3.0 * component)
return 1.0 - 0.5 * math.sqrt(1.0 - 3.0 * component)
|
da51d6e7736f87858c36ca13a8df2a2f292fb7a2
| 444,560
|
def get_ffmpeg_codec_params_string() -> str:
"""Generate command line params for FFMPEG for a widely compatible codec with good compression"""
codec_params = [
"-vcodec libx264",
"-profile:v main",
"-level 3.1",
"-preset medium",
"-crf 23",
"-x264-params ref=4",
"-acodec copy",
"-movflags +faststart",
"-pix_fmt yuv420p",
"-vf scale=920:-2",
]
return " ".join(codec_params)
|
8f38ef24b7eccdbe5cf909244da198e68db01432
| 134,597
|
def generateTOD(step:int = 10):
""" Generates an order dict with time of day as key and 0 as value
This is later used to calculate time distributions.
Arguments:
- step int (default: 10) determines the step for minute, increments by that amount everytime
"""
step = 10 if step is None else step
result = dict()
for hour in range(0, 24):
for minute in range(0, 60, step):
current_time = '{:02d}:{:02d}'.format(hour, minute)
result[current_time] = 0
return result
|
508fc3a189bf4ee3a5c718de28a03c4906915d9a
| 522,566
|
from typing import List
def summation_s(q: List[float]) -> float:
"""
Calculate the summation for the improved letter frequency attack
Input:
q: the frequency of the ith letter of the alphabet in the ciphertext
"""
S = 0
for i in range(0, len(q)):
S += pow(q[i], 2)
return S
|
b19bd0e11c6fec6fdc90406b5a3cdc97079030d0
| 270,541
|
from typing import List
def max_profit(prices: List[int]) -> int:
"""
Find the maximum profit that can be made from buying and selling a stock once
"""
if prices is None or len(prices) < 2:
return 0
min_price = prices[0]
current_max_profit = 0
for price in prices:
if price < min_price:
min_price = price
elif price - min_price > current_max_profit:
current_max_profit = price - min_price
return current_max_profit
|
a411fde1659aa187484e6fbea7633d41ffae8868
| 645,096
|
def instance_name(instance):
"""Shortcut to get instance name."""
return getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', None)
|
f0668aa43b70c1d9dc3d873430f03fa7c4b11d62
| 386,977
|
def _getSVlist(ln: str, N: int, sv: list) -> list:
""" parse a line of text from RINEX2 SV list"""
for i in range(N):
s = ln[32+i*3:35+i*3].strip()
if not s.strip():
raise ValueError(f'did not get satellite names from {ln}')
sv.append(s)
return sv
|
63cf409ee47169ed0cb0676955d52b13f5514675
| 350,570
|
def convert_to_ascii(string: str) -> str:
"""Given a string, omit any non-ascii characters.
The knowledge graph only supports ascii.
Args:
string (str): the string to convert.
Returns:
str: the same string, without any non-ascii characters.
"ñàlom" would be converted to "lom".
"""
# Get rid of non-ascii characters.
# KG only supports ascii characters.
string_ascii: list = [i if ord(i) < 128 else '' for i in string]
return ''.join(string_ascii)
|
7c727eed92947199e14472036c7e866b63f863d8
| 626,262
|
def wrap_python_args_with_string(args):
"""Wrap argument values with string
Args:
args: list like ["--foo", "3", "--bar", False]
Returns:
list of string: like ["--foo", "'3'", "--bar", "'False'"]
"""
result = []
for value in args:
if not value.startswith("--"):
result.append("'{}'".format(value))
else:
result.append(value)
return result
|
387f2e400a1eaa218cd35d6aa7adc780bbbd8752
| 81,401
|
import math
def arcsine_sqrt_transform(rel_abd):
"""
Takes the proportion data from relative_abundance() and applies the
variance stabilizing arcsine square root transformation:
X = sin^{-1} \sqrt p
"""
arcsint = lambda p: math.asin(math.sqrt(p))
return {col_id: {row_id: arcsint(rel_abd[col_id][row_id])
for row_id in rel_abd[col_id]} for col_id in rel_abd}
|
58e9d5e10337ff289427538ca4fcc2a2c112a08d
| 427,857
|
def loadMeteoritesElements(meteorites_file):
""" Loads orbital elements of meteorites with orbits."""
meteorites_list = []
with open(meteorites_file) as f:
for line in f:
# Skip commented lines
if line.startswith("#"):
continue
line = line.split(';')
meteorite_name = line[1]
a = float(line[8])
e = float(line[10])
# Compute perihelion
q = a*(1 - e)
incl = float(line[12])
peri = float(line[14])
node = float(line[16])
meteorites_list.append([meteorite_name, q, e, incl, peri, node])
return meteorites_list
|
d26ea509a0597f59689caf1110031e239b93fceb
| 325,718
|
def get_urls(tweet, key='url'):
"""Extract urls from a tweet"""
ent = tweet.get('entities', [])
if not ent:
return []
return (tag[key] for tag in ent.get('urls', []))
|
c212672cbb511fb6178d1240d6167c8f400c0a43
| 220,990
|
def load_model_configurations(model):
"""
Arguments:
model: A SSD model with PriorBox layers that indicate the
parameters of the prior boxes to be created.
Returns:
model_configurations: A dictionary of the model parameters.
"""
model_configurations = []
for layer in model.layers:
layer_type = layer.__class__.__name__
if layer_type == 'PriorBox':
layer_data = {}
layer_data['layer_width'] = layer.input_shape[1]
layer_data['layer_height'] = layer.input_shape[2]
layer_data['min_size'] = layer.min_size
layer_data['max_size'] = layer.max_size
layer_data['aspect_ratios'] = layer.aspect_ratios
layer_data['num_prior'] = len(layer.aspect_ratios)
model_configurations.append(layer_data)
return model_configurations
|
34c6efbca820bd5461b2e5aeb2c7b30184afa250
| 20,540
|
import re
def strip_quotations(source_code):
"""
Strips quotations from code to prevent false positives due to keywords
inside strings.
First the function matches groups of quotations by looking for text within
quotations (not escaped quotations). Then it replaces every group with an
empty string.
:param source_code: the string that contains source code
:return: same source code as source_code but without any quoted strings
"""
# based on regex from
# http://stackoverflow.com/questions/171480/regex-grabbing-values-between-quotation-marks
quotation_regex = "([\"'])(?:(?=(\\\\?))\\2.)*?\\1"
return re.sub(quotation_regex, "", source_code)
|
8ea256a72f95bb588b968eff1f5d44ce6826efb1
| 284,312
|
import traceback
import time
def wrapLoop(loopfunc):
"""Wraps a thread in a wrapper function to restart it if it exits."""
def wrapped():
while True:
try:
loopfunc()
except BaseException:
print(f"Exception in thread {loopfunc},"
" restarting in 10s...")
traceback.print_exc()
else:
print(f"Thread {loopfunc} exited, restarting in 10s...")
time.sleep(10)
return wrapped
|
86c48bc850bb1cf17121130ee9349dd529acf5e3
| 5,807
|
def parse_subnets_by_tag(subnets, tag_key, tag_value, return_key='id'):
"""
Args:
subnets (list): List of dictionaries of the subnets that were created.
tag_key (str): The tag key you are searching by.
tag_value (str): The value of the tag you want to search by.
Kwargs:
return_key (str): The key you want returned.
Basic Usage:
>>> subnets = [
{
"az": "eu-west-1a",
"cidr": "10.1.0.0/24",
"id": "subnet-f6275193",
"resource_tags": {
"Environment": "dev",
"Name": "dev_public",
"Tier": "public"
}
},
{
"az": "eu-west-1a",
"cidr": "10.1.100.0/24",
"id": "subnet-f1275194",
"resource_tags": {
"Environment": "dev",
"Name": "dev_private",
"Tier": "private"
}
}
]
>>> tag_key = "Name"
>>> tag_value = "Development Private"
>>> subnet_ids = parse_subnets_by_tag(subnets, tag_key, tag_value)
Returns:
List of vpc subnet ids
"""
# return an attribute for all subnets that match
subnet_values = []
for item in subnets:
for key, value in item['resource_tags'].iteritems():
if key == tag_key and value == tag_value:
subnet_values.append(item[return_key])
subnet_values.sort()
return subnet_values
|
f61d483b89aff91e44dd161a18d19a6319752109
| 590,853
|
from typing import IO
import io
def with_mode_binary(stream: IO) -> bool:
"""
Return whether ``stream`` is a binary stream (i.e. reads bytes).
"""
result = False
try:
result = 'b' in stream.mode
except AttributeError:
if isinstance(stream, (io.RawIOBase, io.BufferedIOBase, io.BytesIO)):
result = True
return result
|
4c4bb068895bbb3e75b7390e11ebb7a431199e1f
| 484,733
|
import warnings
def get_config(default, config=None, warn=1):
"""
Return a dictionary containing default configuration settings and any
settings that the user has specified. The user settings will override the
default settings.
Args:
default(dict) : A dictionary of default configuration settings.
config(dict) : A dictionary of user-specified configuration settings.
warn: Issue a warning if `config` contains an unknown key (not found in
`default`).
Returns:
dict : User-specified configuration supplemented with default settings
for field the user has not specified.
"""
config_out = {}
# Set defaults
for key in default:
config_out[key] = default[key]
if not config:
return config_out
for key in config:
if key not in default and warn:
warnings.warn('Unknown option: %s in `config`' % key)
# assert 0
# Override defaults
for key in config:
config_out[key] = config[key]
return config_out
|
c2b1c7f32ad5141682a1167008e86bcf68860182
| 489,372
|
from typing import List
def find_max_subarray_bruteforce(sequence: List[int]):
"""Brute force implementation of the maximum subarray problem"""
max_sum = float("-inf")
for left_index in range(len(sequence)):
curr_sum = 0
for right_index in range(left_index, len(sequence)):
curr_sum += sequence[right_index]
max_sum = max(max_sum, curr_sum)
return max_sum
|
3ba74f51dfdf3f750096a68651267fec39f9785e
| 141,067
|
import functools
import time
def timeit(func):
"""Decorator to measure the time spent by a function."""
@functools.wraps(func)
def timed(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
return end - start, result
return timed
|
5fe7f53238c8b000fa39865f0100ad6add34e1af
| 596,983
|
def default_replacement(random, population, parents, offspring, args):
"""Performs no replacement, returning the original population.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
return population
|
de493ef120787c35f94af5ebf327121fc24988dd
| 464,532
|
def circle(lat, lon, radius):
"""
Creates an API-ready circle from the given latitude, longitude,
and radius parameters
"""
return {'$circle': {'$center': [lat, lon], '$meters': radius}}
|
8771481241d83e252c584bba4941ee9191ba9182
| 412,400
|
from typing import Optional
from typing import Tuple
from pathlib import Path
def dataset_adress(
adress_archive_root: Optional[str],
corpus_name: str,
dataset_type: str,
preprocess_args: str,
) -> Tuple[str, Path]:
"""Path of dataset archive file and contents directory.
Args:
adress_archive_root:
corpus_name:
dataset_type:
preprocess_args:
Returns: [archive file adress, contents directory path]
"""
# Design Notes:
# Why not `Path` object? -> Archive adress could be remote url
#
# Original Data (corpus) / Prepared Data (dataset) / Transformation (preprocss)
# If use different original data, everything change.
# Original item can be transformed into different type of data.
# Even if data type is same, value could be changed by processing parameters.
#
# Directory structure:
# datasets/{corpus_name}/{dataset_type}/
# archive/{preprocess_args}.zip
# contents/{preprocess_args}/{actual_data_here}
# Contents: Placed under default local directory
contents_root = local_root = "./tmp"
# Archive: Placed under given adress or default local directory
archive_root = adress_archive_root or local_root
rel_dataset = f"datasets/{corpus_name}/{dataset_type}"
archive_file = f"{archive_root}/{rel_dataset}/archive/{preprocess_args}.zip"
contents_dir = f"{contents_root}/{rel_dataset}/contents/{preprocess_args}"
return archive_file, Path(contents_dir)
|
77f87d18b5532b8381adc218a8c3a21fe30caf3c
| 630,519
|
import six
def _to_text(obj, encoding):
"""
In Python3:
Decode the bytes type object to str type with specific encoding
In Python2:
Decode the str type object to unicode type with specific encoding,
or we just return the unicode string of object
Args:
obj(unicode|str|bytes) : The object to be decoded.
encoding(str) : The encoding format
Returns:
decoded result of obj
"""
if obj is None:
return obj
if isinstance(obj, six.binary_type):
return obj.decode(encoding)
elif isinstance(obj, six.text_type):
return obj
elif isinstance(obj, (bool, float)):
return obj
else:
return six.u(obj)
|
d1ea66b7d27b6ebc90e5252038254235e3b599ce
| 43,888
|
def validate_rule_port(port):
"""
Validates that any value in a port list really is valid.
Valid values are an integer port, or a string range separated by a colon.
:param port: the port, which is validated for type
:returns: None or an error string if invalid
"""
if isinstance(port, int):
if port < 0 or port > 65535:
return "integer out of range"
return None
# If not an integer, must be format N:M, i.e. a port range.
try:
fields = port.split(":")
except AttributeError:
return "neither integer nor string"
if not len(fields) == 2:
return "range unparseable"
try:
start = int(fields.pop(0))
end = int(fields.pop(0))
except ValueError:
return "range invalid"
if start >= end or start < 0 or end > 65535:
return "range invalid"
return None
|
bdfe9cd488a1335fe82a09483400a5a3e155ce3c
| 229,390
|
def lower_mask(dqarr, bitmask):
"""
Function that lowers (unsets) all the bits in 'dqarr' contained
in the bitmask.
:Parameters:
dqarr: numpy array or integer
numpy array which represents a dq plane (or part of it).
The function also works when dqarr is a scalar integer
bitmask: integer
A bit mask specifying all the bits to be logically "raised"
in dqarr. For example,
* bitmask=1 = 2**0 will lower bit 0.
* bitmask=5 = 2**0 + 2**2 will lower bits 0 and 2.
:Returns:
newdqarr: numpy array or integer
Returns array 'dqarr' with the specified bits lowered in all
elements (pixels).
"""
assert isinstance(bitmask, int)
# The bits are lowered with a binary AND NOT operation.
return dqarr & ~bitmask
|
103053a1a8cac55928592a0f65b695c1a29a4f92
| 411,086
|
def str2tuple(s, sep=',', converter=None, *, maxsplit=-1):
"""Convert a string to a tuple.
If ``converter`` is given and not ``None``, it must be a callable that
takes a string parameter and returns an object of the required type,
or else a tuple with string elements will be returned.
>>> str2tuple('1, 2, 3,4', converter=int)
(1, 2, 3, 4)
>>> str2tuple('on, off, no, true, YES')
('on', 'off', 'no', 'true', 'YES')
>>> str2tuple('on, off, no, true, YES', converter=str2bool)
(True, False, False, True, True)
>>> str2tuple('a, b, , d')
('a', 'b', '', 'd')
:param str s: the string
:param str sep: the separator (whitespace around ``sep`` will be ignored)
:param converter: the converter function
:type converter: callable(str)
:param int maxsplit: max. number of splits (-1 means no limit)
:return: tuple with elements of the required type
:rtype: tuple
.. versionchanged:: 0.14.0 Add parameter ``maxsplit``
"""
if s:
f = converter or str
return tuple(f(x.strip()) for x in s.split(sep, maxsplit))
return ()
|
dd66a46377bec43f42ea3f04a2094bbfc1f2fae5
| 202,261
|
def tamper(payload, **kwargs):
"""
Appends special crafted string
Notes:
* Useful for bypassing Imperva SecureSphere WAF
* Reference: http://seclists.org/fulldisclosure/2011/May/163
>>> tamper('1 AND 1=1')
"1 AND 1=1 and '0having'='0having'"
"""
return payload + " and '0having'='0having'" if payload else payload
|
4ee160fd5be3d25848d842a98f4e8932e29f7f79
| 223,566
|
def estimate_fuse_passengers(fus_nb, FLOORS_NB, PASS_PER_TOILET, cabin_area,\
MASS_PASS, pass_density):
""" The function evaluates the number of passengers members on board in
case of an unconventional aircraft with fuselage.
Source : passengers density is defined averaging the ratio between
the maximum passengers allowed and the cabin area on different
conventional aircraft.
Args:
fus_nb (int): Number of fuselages.
FLOORS_NB (int): Number of floors.
PASS_PER_TOILET (int): Number of passengers per toilet.
cabin_area (float): Cabin Area [m^2].
MASS_PASS (float): Passenger mass [kg].
pass_density (float): Base passengers density [pass/m^2].
Returns:
pass_nb (int): Number of passengers.
toilet_nb (int): Number of toilet.
mass_pass_tot (int): Total passengers mass [kg].
"""
MF = FLOORS_NB * 0.5 + 0.5
pass_nb = 0
for i in range(0, fus_nb):
pass_nb += cabin_area[i-1] * pass_density
pass_nb = int(round(pass_nb * MF,0))
mass_pass_tot = round(pass_nb * MASS_PASS,3)
toilet_nb = round(pass_nb/PASS_PER_TOILET,0)
return(pass_nb, toilet_nb, mass_pass_tot)
|
65385f644302acde58ed2cf575f119ec210bc1ed
| 639,851
|
import posixpath
def is_special_file(name):
"""Returns true if a file is some special Git metadata and not content."""
return posixpath.basename(name) in ['.gitignore', '.gitattributes', '.gitmodules',
'.gitftpignore']
|
94aa516c0e2d0987d1096d6958203884dbf451c5
| 573,455
|
from typing import Optional
def _text_compare(t1: Optional[str], t2: Optional[str]) -> bool:
"""Compare two text strings, ignoring wrapping whitespaces.
Args:
t1:
First text.
t2:
Second text.
Returns:
True if they are equal, False otherwise.
"""
if not t1 and not t2:
return True
#if t1 == '*' or t2 == '*':
# return True
return (t1 or '').strip() == (t2 or '').strip()
|
965c61e565aa2608742bcd291fcdf04b26d3ff38
| 651,610
|
import torch
def inner_product(state_1, state_2):
"""Compute < state_1 | state_2 > (the left entry is conjugate-linear).
"""
return torch.sum(state_1.conj() * state_2, dim=-1)
|
806b8644207023ed96c8061e075aa66026b472df
| 243,305
|
def to_list(*args) -> tuple:
"""
Recives a positional args expanded and return it in list format
Args:
args:
Positional args expanded
Returns:
Compacted positional args (tuple)
"""
return args
|
290ba5eb5836c0f925370611ed370ffe1ba0cd7f
| 573,965
|
from datetime import datetime
def dt_to_dec(dt):
"""Convert a datetime to decimal year."""
year_start = datetime(dt.year, 1, 1)
year_end = year_start.replace(year=dt.year+1)
return dt.year + ((dt - year_start).total_seconds() / # seconds so far
float((year_end - year_start).total_seconds()))
|
0841f21c245b0f3a2a1404c7c8c5bff9a26aae21
| 700,360
|
def is_signed_integer(t):
"""
Return True if value is an instance of any signed integer type.
"""
return t.typecode in "csil"
|
1610c1f69a53f1f89176cc732e8f36ca9c5ac211
| 481,841
|
import re
def unformat_message(symbolic_string_list):
""" `symbolic_string_list` : a list of strings with symbolic formattation, example:
symbolic_string_list = ["{yellow}\tPluto\n{end}",
"{reverse}{blue}======>{end}{red}\t\tPaperino{end}"]
>>> unformat_message(symbolic_string_list)
>>> [['\tPluto\n'], ['======>', '\t\tPaperino']]
"""
pattern = r"(?<!\{)\{([^}]+)\}(?!\})"
picktxt, pickarrw = [ [] for _ in range(2) ]
for item in symbolic_string_list:
try:
# only for py-kms MsgMap.
picklist = re.sub(pattern, '*', item['text'])
except:
# generalization.
picklist = re.sub(pattern, '*', item)
picklist = list(filter(None, picklist.split('*')))
picktxt.append(picklist)
return picktxt
|
4e0aff47ca1f22e130264b50e80433e63fce3091
| 464,449
|
def load_loglikelihood_from_hdf5_group(group):
"""
Loads a loglikelihood from the given hdf5 group.
group: the hdf5 file group from which to load the Loglikelihood
returns: Loglikelihood object of the correct type
"""
try:
class_name = group.attrs['class']
cls = eval(class_name)
except KeyError:
raise ValueError("This group doesn't appear to contain a " +\
"Loglikelihood.")
except NameError:
raise ValueError("The class name of this loglikelihood is not known!")
return cls.load_from_hdf5_group(group)
|
c96e9cfff710c49dd850d64aa3a187589ac1dac9
| 618,889
|
def parse_view_size(value):
"""
Attempts to parse a view size from 000x111 format to the tuple required by the mosaic classes. Note that the input
string will have width first, whereas the format required by the mosaics has height first
"""
try:
dimensions = value.split("x")
return (int(dimensions[1]), int(dimensions[0]))
except:
return (600, 800)
|
2b07f17d4a8696a670fc722fd9a2eddd4e798cad
| 487,726
|
def _h_3ab(P):
"""Define the boundary between Region 3a-3b, h=f(P)
>>> "%.6f" % _h_3ab(25)
'2095.936454'
"""
return 0.201464004206875e4+3.74696550136983*P-0.0219921901054187*P**2+0.875131686009950e-4*P**3
|
68e8974cbcbeb873ac06fc1f61b32058b34899a3
| 656,602
|
def quality_to_proba_sanger(quality):
"""Quality to probability (Sanger)"""
return 10**(quality/-10.)
|
99f068e7d57d291802d8411ccac88897cb4b6110
| 71,693
|
from operator import mul
def multiplyLists(lst1, lst2):
"""Return the elementwise product of lst1 and lst2."""
assert len(lst1) == len(lst2), "The lists have to be the same length."
return list(map(mul, lst1, lst2))
|
fe26b0d143a1beec53c7ed771e383e7edff6154a
| 636,815
|
def split_pem(s):
"""
Split PEM objects. Useful to process concatenated certificates.
"""
pem_strings = []
while s != b"":
start_idx = s.find(b"-----BEGIN")
if start_idx == -1:
break
end_idx = s.find(b"-----END")
end_idx = s.find(b"\n", end_idx) + 1
pem_strings.append(s[start_idx:end_idx])
s = s[end_idx:]
return pem_strings
|
ba2200fe50a298cb38f111ffb66963277e0e7550
| 240,836
|
def checkbit(packedint, offset):
"""
Check for a bit flag in a given int value.
Args:
packedint: bit packed int
offset: binary offset to check
Returns:
bool
"""
bit = 1 << offset
return (packedint & bit) > 0
|
ced4c286e80c4f2eabc6a1586a8fc336a961a2aa
| 168,584
|
def get_entity_map_parameters(self, entity_map, delete_parameters = True):
"""
Retrieves the entity map parameters value.
The parameters are stored in a special part of
the entity map, that is removed after read.
The removal of the parameters is optional and may be
prevented.
:type entity_map: Dictionary
:param entity_map: The entity map.
:type delete_parameters: bool
:param delete_parameters: If the parameters value should be
removed from the entity map.
:rtype: Dictionary
:return: The parameters map for the entity.
"""
# retrieves the entity parameters
entity_parameters = entity_map.get("_parameters", {})
# removes the parameters from the entity map
# in order to avoid possible attribute problems
# in case the delete parameters is set
if delete_parameters and ("_parameters" in entity_map): del entity_map["_parameters"]
# returns the entity parameters
return entity_parameters
|
ec57eaabb09cce17746ec84c2d65024e6bc7cb5c
| 402,379
|
def base_b(n: int, b: int):
"""
Return the string representation of n in base-b.
Example:
base_b(11, 3) = '102'
"""
e = n // b
q = n % b
if n == 0:
return '0'
elif e == 0:
return str(q)
else:
return base_b(e, b) + str(q)
|
9bbd1faadb7c68004830d3de31ccd3c746678c70
| 399,809
|
from typing import Tuple
from typing import Any
def fst(tup: Tuple[Any, Any]) -> Any:
""" Return first member of tuple """
return tup[0]
|
3a6965611cbd5964efbab6346f5a4878f955a70e
| 287,284
|
import torch
def rss(data: torch.Tensor, dim: int = 0) -> torch.Tensor:
"""
Compute the Root Sum of Squares (RSS).
RSS is computed assuming that dim is the coil dimension.
Args:
data: The input tensor
dim: The dimensions along which to apply the RSS transform
Returns:
The RSS value.
"""
return torch.sqrt((data**2).sum(dim))
|
f30084fd0255a70b4c2df5f0a877757a0616a884
| 424,989
|
def is_valid_dict(val):
""" Validates if the value passed is of dict type or not.
Args:
val (any type): value to be tested
Returns:
bool: True if dict else False
"""
return type(val) is dict
|
fb7f47888e0c3a0c74fbcdc0e92afcb3c8736a18
| 535,107
|
import pickle
def load_object(path):
"""Load object from pickle file. """
with open(path, 'rb') as file:
return pickle.load(file)
|
4aaa685ef94137b51cd1983c1d7ab83d56fc4488
| 144,688
|
from typing import Tuple
def int_to_binary(val: int, dim: int) -> Tuple[int, ...]:
"""Convert an integer to corresponding binary tuple, with most significant bit as
the first element of tuple.
:param val: input integer
:type val: int
:param dim: Bit width
:type dim: int
:return: Binary tuple of width dim
:rtype: Tuple[int, ...]
"""
return tuple(map(int, format(val, "0{}b".format(dim))))
|
06177cf750132fba2a8062376e56618f7ab60749
| 489,959
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.