content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def pad_program(bytecode):
""" Returns bytecode padded with zeros to a length of 256. """
return bytecode + '\x00' * (256 - len(bytecode)) | 68cab93a09dc80026cca58824e57bd2a82d19ba0 | 97,022 |
def parse_kwargs(s):
"""Parse command line arguments into Python arguments for parsers.
Converts an arguments string of the form: key1=value1,key2=value2 into
a dict of arguments that can be passed to Python initializers.
This function also understands type prefixes and will cast values prefixed
with 'int:' or 'float:'. For example magic_number=int:4 will be converted
to {"magic_number": 4}.
"""
if s is None:
return {}
kwargs = {}
for argument in s.split(","):
key, value = argument.strip().split("=")
if value.startswith("int:"):
value = int(value[4:])
elif value.startswith("float:"):
value = float(value[6:])
kwargs[key] = value
return kwargs | 2ae3cdd52275540aa44de85af33826f02d05ae26 | 97,025 |
def generate_counter(start_at=0):
"""Creates a counter function that increments by 1 every time it is called"""
d = {'counter': start_at}
def inc():
d['counter'] += 1
return d['counter']
return inc | f8062ad3cecd30ea3cebc1cf388f56f66a9dc482 | 97,027 |
def set_rules(memory, index, state):
"""Returns the write, offset for the new index and next state"""
write, position, next_state = 1, 1, state
if memory[index] == 0:
if state == 'A':
write, position, next_state = 1, 1, 'B'
elif state == 'B':
write, position, next_state = 1, -1, 'C'
elif state == 'C':
write, position, next_state = 1, -1, 'D'
elif state == 'D':
write, position, next_state = 1, -1, 'E'
elif state == 'E':
write, position, next_state = 1, -1, 'A'
elif state == 'F':
write, position, next_state = 1, -1, 'E'
else:
if state == 'A':
write, position, next_state = 0, -1, 'E'
elif state == 'B':
write, position, next_state = 0, 1, 'A'
elif state == 'C':
write, position, next_state = 0, 1, 'C'
elif state == 'D':
write, position, next_state = 0, -1, 'F'
elif state == 'E':
write, position, next_state = 1, -1, 'C'
elif state == 'F':
write, position, next_state = 1, 1, 'A'
return write, position, next_state | a67d5227315001df92be8f2ef2e864c151870ef6 | 97,030 |
import ipaddress
def compare_addresses(allowed_networks, test_addresses):
"""Compare addresses from A records to allowed networks, return list of bad addresses."""
bad_addresses = []
# Compare addresses to allowed networks.
for address in test_addresses:
ok_address = False
for network in allowed_networks:
# print(" %s" % network)
if ipaddress.IPv4Address(address) in network:
ok_address = True
if not ok_address:
bad_addresses.append(address)
return bad_addresses | 5ba06a08f83abd612d85042bc388fe27cbb454bd | 97,033 |
def __atom_type_sortkey(atom, order=None):
"""
Used as a key for sorting atom type for GPUMD in.xyz files
Args:
atom (ase.Atom):
Atom object
order (list(str)):
A list of atomic symbol strings in the desired order.
"""
if order:
for i, sym in enumerate(order):
if sym == atom.symbol:
return i
else:
ValueError('type sortkey error: Missing order.') | d506dae3429259f07ac82961461b0f26f072305c | 97,034 |
def getPrice(building_type, line):
"""Gets the price of the item that is being bought or sold"""
if building_type == "buy":
return line.split(" is buying ")[1].split(" for ")[0].strip()
if building_type == "sell":
return line.split(" is selling ")[1].split(" for ")[0].strip() | 14c761f4b635fa1eedd711991584afa62614718f | 97,035 |
def getSeqRegions(seqs, header, coordinates):
"""From sequence dictionary return spliced coordinates.
Takes a sequence dictionary (ie from fasta2dict), the contig name (header)
and the coordinates to fetch (list of tuples)
Parameters
----------
seqs : dict
dictionary of sequences keyed by contig name/ header
header : str
contig name (header) for sequence in seqs dictionary
coordinates : list of tuples
list of tuples of sequence coordinates to return [(1,10), (20,30)]
Returns
-------
result : str
returns spliced DNA sequence
"""
# takes SeqRecord dictionary or Index, returns sequence string
# coordinates is a list of tuples [(1,10), (20,30)]
result = ""
sorted_coordinates = sorted(coordinates, key=lambda tup: tup[0])
for x in sorted_coordinates:
result += seqs[header][x[0] - 1 : x[1]]
return result | 2881c9cef621be24e77943b8d527c380499befff | 97,038 |
import re
def make_into_file_name(str):
"""
Remove spaces in string and replace with underscore
:param str: string
:return: string
"""
# Replace all runs of whitespace with a single dash
str = re.sub(r"\s+", '_', str)
return str | 9ce5e1741d7072b610c46b614008b9d4a827e9f8 | 97,040 |
from pathlib import Path
def pid_path_for_service(root_path: Path, service: str, id: str = "") -> Path:
"""
Generate a path for a PID file for the given service name.
"""
pid_name = service.replace(" ", "-").replace("/", "-")
return root_path / "run" / f"{pid_name}{id}.pid" | 0361ec52e8aa9d1928b21dca04f450304ec4bc55 | 97,041 |
import time
def wait_for_all_nodes_state(batch_client, pool, node_state):
"""Waits for all nodes in pool to reach any specified state in set
:param batch_client: The batch client to use.
:type batch_client: `batchserviceclient.BatchServiceClient`
:param pool: The pool containing the node.
:type pool: `batchserviceclient.models.CloudPool`
:param set node_state: node states to wait for
:rtype: list
:return: list of `batchserviceclient.models.ComputeNode`
"""
print('waiting for all nodes in pool {} to reach one of: {!r}'.format(
pool.id, node_state))
i = 0
while True:
# refresh pool to ensure that there is no resize error
pool = batch_client.pool.get(pool.id)
if pool.resize_errors is not None:
resize_errors = "\n".join([repr(e) for e in pool.resize_errors])
raise RuntimeError(
'resize error encountered for pool {}:\n{}'.format(
pool.id, resize_errors))
nodes = list(batch_client.compute_node.list(pool.id))
if (len(nodes) >= pool.target_dedicated_nodes and
all(node.state in node_state for node in nodes)):
return nodes
i += 1
if i % 3 == 0:
print('waiting for {} nodes to reach desired state...'.format(
pool.target_dedicated_nodes))
time.sleep(10) | caaf59d191179bea60d6a62b0f22f42b19683bc6 | 97,046 |
def get_workers(c):
"""
Find worker containers for the current project
"""
cmd = c.run('docker ps -a --filter "label=docker-starter.worker.%s" --quiet' % c.project_name, hide='both')
return list(filter(None, cmd.stdout.rsplit("\n"))) | 4d14ddeb0ea27a479c691b7de5d283a266c6dfa0 | 97,051 |
def get_shapefile_record_name_from_location(shapefile_record_names_df, location):
"""
Get the shapefile record name for the location given.
Args:
shapefile_record_names_df (pandas DataFrame) : dataframe of shapefile record names and the associated locations
location (str) : name of the location
Returns:
str: Record name for the location as listed in the shapefiles used.
"""
return shapefile_record_names_df.loc[shapefile_record_names_df['location'] == location].record_name.values[0] | 46e52c302736a77a549046cb15243ff375136cb6 | 97,060 |
from functools import partial
from random import choice
def picker(seq):
"""
Returns a new function that can be called without arguments
to select and return a random entry from the provided sequence
"""
return partial(choice,seq) | 7fc3228b0a125bbad58123924683d811751578c5 | 97,061 |
def compose(*funcs):
"""Return a new function s.t.
compose(f,g,...)(x) == f(g(...(x)))"""
def inner(*args):
result = args
for f in reversed(funcs):
if type(result) in (list, tuple):
result = f(*result)
else:
result = f(result)
return result
return inner | 1dddb974778bfc9db5d46fe2dfe743cd394aea0c | 97,066 |
def cb_detect_production(step):
"""
Detection of production level build.
"""
if step.getProperty('build_codename') == 'production':
return True
return False | bb2dcaab8052c1a7f75773534d9acbd42ac62b30 | 97,068 |
def to_base(num, b, numerals='0123456789abcdefghijklmnopqrstuvwxyz'):
"""
Python implementation of number.toString(radix)
Thanks to jellyfishtree from https://stackoverflow.com/a/2267428
"""
return ((num == 0) and numerals[0]) or (to_base(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b]) | 83044870c7b4ccc2f931c2284acbcdc9081573eb | 97,069 |
from typing import Sequence
def _is_sequence(typ):
"""
returns True if the type in question
is a Sequence[] object from the typing module.
"""
# PEP_560 deprecates issubclass for
# List types, for the time being
# we'll support a specific escape hatch.
if hasattr(typ, "__origin__"):
return issubclass(typ.__origin__, Sequence)
return False | 7f6f45454636d5d61769abaed8a094e8049b8d49 | 97,070 |
def dictfetchall(cursor):
"""
Args:
cursor: A python database cursor.
Returns:
A dict of the information in the cursor object.
"""
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
] | bb257c50e16aa41cfc2a08334399a12a483f3b8b | 97,076 |
import random
def _exponential_backoff(max_tries):
"""
Returns a series of floating point numbers between 0 and 2^i-1 for i in 0 to max_tries
"""
return [random.random() * (2**i - 1) for i in range(0, max_tries)] | 496aee1d4745d6e5452e2386f57e1a2290926721 | 97,077 |
def _modify_line(my_string, new_value):
"""Gets a line and the new parameter value as inputs
and returns the line with the new parameter"""
splitted = my_string.split(' ', 1)
cnt = 0
for element in splitted[1]:
if element == ' ':
cnt += 1
else:
break
whitespace_count = cnt + 1
splitted[1] = new_value
ret_str = splitted[0] + whitespace_count*' ' + splitted[1] + '\n'
return ret_str | a4ccc59f0f499870d61414cdc22bcbfdfc26c8be | 97,078 |
def is_translation(args):
"""Translations live in the site/<lang>/ directory of the docs-l10n repo."""
path_str = str(args["path"].resolve())
if "site/" not in path_str:
return False
elif "site/en/" in path_str:
return False
elif "site/en-snapshot/" in path_str:
return False
else:
return True | 3cd1e6c5a3d91d5a517d88ac9479a2c838af0434 | 97,079 |
import json
def _GetBotWithFewestNamedCaches(bots):
"""Selects the bot that has the fewest named caches.
To break ties, the bot with the most available disk space is selected.
Args:
bots(list): A list of bot dicts as returned by the swarming.bots.list api
with a minimum length of 1.
Returns:
One bot from the list.
"""
# This list will contain a triplet (cache_count, -free_space, bot) for each
# bot.
candidates = []
for b in bots:
try:
caches_dimension = [
d['value'] for d in b['dimensions'] if d['key'] == 'caches'
][0]
# We only care about caches whose name starts with 'builder_' as that is
# the convention that we use in GetCacheName.
cache_count = len(
[cache for cache in caches_dimension if cache.startswith('builder_')])
bot_state = json.loads(b['state'])
free_space = sum(
[disk['free_mb'] for _, disk in bot_state['disks'].iteritems()])
except (KeyError, TypeError, ValueError):
# If we can't determine the values, we add the bot to the end of the list.
candidates.append((1000, 0, b))
else:
# We use negative free space in this triplet so that a single sort will
# put the one with the most free space first if there is a tie in cache
# count with a single sort.
candidates.append((cache_count, -free_space, b))
return sorted(candidates)[0][2] | 5a8911985a064781fcd608f01b3ceeba8cc4d7c5 | 97,080 |
def _format_visibility(data):
"""Return a formatted visibility string
:param data:
The server's visibility (is_public) status value: True, False
:rtype:
A string formatted to public/private
"""
if data:
return 'public'
else:
return 'private' | eb7a76a2b77685796b37cb74a5ea990ca8733b62 | 97,084 |
def get_shapes_F1(F1):
"""
Get string showing shapes of F1.
Parameters
----------
F1: list of multidimensional np.arrays
(each elment has the samples for each station-poliarization.
Returns
-------
out : str
"""
return(str([(F1[i].shape) for i in range(len(F1))])) | b8c40ad7ba688612ae82056b8ae060f776c8c21b | 97,091 |
import struct
def write_message(out_file, message):
"""Serializes `message` to binary proto, writes a size header and the proto
data to `out_file`.
Args:
* out_file (writeable file-like object) - The handle to write the header and
proto to.
* message (instance of google.protobuf.Message subclass) - The message to
write.
Returns True iff both writes succeeded.
Raises anything message.SerializeToString could raise.
"""
out_data = message.SerializeToString()
try:
out_file.write(struct.pack('!L', len(out_data)))
out_file.write(out_data)
return True
except IOError:
return False | 8344af26da3718110052344b328efe40230ae31f | 97,095 |
import math
def sphere_volume(r):
"""Calculate the volume of a sphere with radius r."""
return 4/3. * math.pi * r ** 3 | 02966eb6fdf902a69e0fd24801b11e8501f77790 | 97,097 |
def bins_poorstats(data, nbins, ndata, threshold_percent=1.0):
"""
Get Percentage of total bins which are poorly populated
Parameters
----------
data : numpy.ndarray[int, ndim=1]
A numpy array holding observed frequencies of bins used for histogram
nbins : int
number of bins
ndata : int
Number of sample points to be histogrammed.
threshold_percent : float
A float value in an open interval(0.0, 100.0) bins with occupancy(%)
below which will be labelled blank(poorly-populated). Here, it is
assumed that sample-data is from uniform distribution, and thershold
is percent-thresold of expected occupancy.
e.g. If data has 1000000 sample points, hisstogrammed with 200 bins,
then with 1.0% thershold value. bins having less-than
(1.0/100.0) * (1000000 / 50.0) i.e. 50 samples will be considered
poorly populated, if it comes less than 2 then minimum observed freq is
considered 2.
Returns
-------
float
A value in an open interval(0.0, 100.0) representing percentage of
total bins labeled blank(poorly-populated)
"""
min_count = threshold_percent * ndata / (100 * nbins)
min_count = 2 if min_count < 2 else min_count
n_almost_blanks = len(data[data <= min_count])
res = round(n_almost_blanks * 100.0 / nbins, 1)
return res | 5db95693b64ace4d22c35dcb3549c776d6ec34c5 | 97,103 |
def _get_frame_time(pkt):
"""Get packet arrival time
Parameters
----------
pkt:
a packet
Returns
-------
pkt_time: float
"""
return float(pkt.time) | cc2ac28ff44c172293d1783f47493bc309a5cea9 | 97,106 |
def create_string(dict_input):
"""Creates a string where 'x', 'y' and 'data' fields are not mapped to str.
For example:
**{'data':dc_0,'index':0, 'dimension':'bands'}
**{'data':[10000, nir_2, p1_6, p2_7]}
**{'x':nir_2,'y':red_3}
"""
inputs = []
to_remove = []
for key, value in dict_input.items():
if key in ('x', 'y', 'data', 'value', 'base', 'p','target'):
to_remove.append(key)
if isinstance(value, list):
val_str = "["
for val in value:
val_str += str(val) + ', '
inputs.append(f"'{key}': {val_str[:-2]}]")
else:
inputs.append(f"'{key}': {value}")
else:
continue
for key in to_remove:
_ = dict_input.pop(key)
replace_str = '{' + ','.join(inputs)
if dict_input:
replace_str += ', '
return f"**{dict_input}".replace('{', replace_str, 1) | 0e05526c7e86fda6c04c07ed6db447b89bd08a71 | 97,107 |
def getint(string):
"""Try to parse an int (port number) from a string."""
try:
ret = int(string)
except ValueError:
ret = 0
return ret | 93ad70e59aa3d264b3215892cf3178d9ab3d8065 | 97,112 |
import torch
def euclidean_dist(x, y):
"""
Calculates Euclidean distance (square of it)
:param x: size [n_query_total, out_dim=1600] - queries
:param y: size [n_ways, out_dim=1600] - prototypes
"""
n = x.size(0) # total number of query points = n_query_total
m = y.size(0) # number of classes = n_ways
d = x.size(1) # dimension of pic embedding = 1600 for mini-ImageNet
if d != y.size(1):
raise ValueError(f'Pic embedding for prototype {y.size(1)} and query {d} data arent equal')
x = x.unsqueeze(1).expand(n, m, d) # size = [n_query_total, n_ways, 1600]
y = y.unsqueeze(0).expand(n, m, d) # size = [n_query_total, n_ways, 1600]
return torch.pow(x - y, 2).sum(2) | 6327f150843719edb94f0d3766a1d310bb828024 | 97,118 |
def parsiraj_tablicu_KA(tablica):
"""Parsiranje tabličnog zapisa konačnog automata (Sipser page 36).
Prvo stanje je početno, završna su označena znakom # na kraju reda."""
prva, *ostale = tablica.strip().splitlines()
znakovi = prva.split()
assert all(len(znak) == 1 for znak in znakovi)
abeceda = set(znakovi)
stanja, završna = set(), set()
prijelaz, početno = {}, None
for linija in ostale:
stanje, *dolazna = linija.split()
if početno is None: početno = stanje
extra = len(dolazna) - len(znakovi)
assert extra in {0, 1}
if extra == 1:
assert dolazna.pop() == '#'
završna.add(stanje)
for znak, dolazno in zip(znakovi, dolazna):
prijelaz[stanje, znak] = dolazno
stanja.add(stanje)
return stanja, abeceda, prijelaz, početno, završna | e7153d421c4cb0eb0953fb476bde8172a04ab1ba | 97,122 |
def qml(yi, yi1, dqml):
"""
Computes transition probability q(y_i | y_i-1) using maximum likelihood estimation on the training set.
:param yi: a label/state y_i
:param yi1: a label/state y_i-1
:param dqml: dictionary for qml where pre-computed values are stored
:return: qml(y_i | y_i-1)
"""
return dqml[yi1][yi] / sum(dqml[yi1].values()) | 11276abc4770227c36ad6feb0ad86645cdafab76 | 97,125 |
def is_evenly_divisible(pair):
"""Evaluates if pair is evenly divisible"""
return not pair[0] % pair[1] | db70b088a7510a6e2567c2b5498f80145a8f6d23 | 97,126 |
def parse_interface_params(list):
"""
Parse a variable list of key=value args into a dictionary suitable for kwarg usage
"""
return {} if list is None else dict([s.split('=') for s in list]) | 71c10f01d2f4d267394269642776132138fa869c | 97,135 |
def validate_ecoli(seq_list, metadata_reports):
"""
Checks if the uidA marker and vt markers are present in the combinedMetadata sheets and stores True/False for
each SeqID. Values are stored as tuples: (uida_present, verotoxigenic)
:param seq_list: List of OLC Seq IDs
:param metadata_reports: Dictionary retrieved from get_combined_metadata()
:return: Dictionary containing Seq IDs as keys and (uidA, vt) presence or absence for values.
Present = True, Absent = False
"""
ecoli_seq_status = {}
for seqid in seq_list:
print('Validating {} uidA and vt marker detection'.format(seqid))
df = metadata_reports[seqid]
observed_genus = df.loc[df['SeqID'] == seqid]['Genus'].values[0]
uida_present = False
verotoxigenic = False
if observed_genus == 'Escherichia':
if 'uidA' in df.loc[df['SeqID'] == seqid]['GeneSeekr_Profile'].values[0]:
uida_present = True
if 'vt' in df.loc[df['SeqID'] == seqid]['Vtyper_Profile'].values[0]:
verotoxigenic = True
ecoli_seq_status[seqid] = (uida_present, verotoxigenic)
return ecoli_seq_status | 2d038799d6cce9588215893835ea6ae62139d07f | 97,136 |
import struct
def _decode_str(reader):
"""
Reads and decodes string.
Parameters
----------
reader : apptrace.Reader
Trace reader object.
Returns
-------
tuple
a tuple containg number of read bytes and decoded value.
"""
sz = 0
val = ''
sz, = struct.unpack('<B', reader.read(1))
if sz == 0xFF:
buf = struct.unpack('<2B', reader.read(2))
sz = (buf[1] << 8) | buf[0]
val, = struct.unpack('<%ds' % sz, reader.read(sz))
val = val.decode("utf-8")
if sz < 0xFF:
return (sz + 1,val) # one extra byte for length
return (sz + 3,val) | a53e8e425d14a147f364223abd4cb6334e86fde9 | 97,137 |
import torch
def make_batch_align_matrix(index_tensor, size=None, normalize=False):
"""
Convert a sparse index_tensor into a batch of alignment matrix,
with row normalize to the sum of 1 if set normalize.
Args:
index_tensor (LongTensor): ``(N, 3)`` of [batch_id, tgt_id, src_id]
size (List[int]): Size of the sparse tensor.
normalize (bool): if normalize the 2nd dim of resulting tensor.
"""
n_fill, device = index_tensor.size(0), index_tensor.device
value_tensor = torch.ones([n_fill], dtype=torch.float)
dense_tensor = torch.sparse_coo_tensor(
index_tensor.t(), value_tensor, size=size, device=device).to_dense()
if normalize:
row_sum = dense_tensor.sum(-1, keepdim=True) # sum by row(tgt)
# threshold on 1 to avoid div by 0
torch.nn.functional.threshold(row_sum, 1, 1, inplace=True)
dense_tensor.div_(row_sum)
return dense_tensor | 639a22823714789abb4ac5b28eb8a3252ec569b6 | 97,143 |
import re
def get_uncompressed_filename(filename):
"""
For .gz file, returns the filename when the file is uncompressed.
"""
return re.sub('[.]gz$', '', filename) | 451c8621a1698c19d09184f0778cca8d05fb5a4f | 97,145 |
def formatHex(n):
"""
Format 32-bit integer as hexidecimal
"""
n = n if (n >= 0) else (-n) + (1 << 31)
return "0x" + '{:08X}'.format(n) | 663777bd732ad8fc561735e0b9d10bb0fb5dcf53 | 97,146 |
import json
def check_file_is_valid_json(path):
"""Checks if file contains valid json
:param str path: path to json file
:returns: True if valid False if invalid
"""
try:
json.load(open(path, 'r'))
except (ValueError, IOError):
return False
return True | 3924fbb5a28cf784f73f0ef53561285246487ec1 | 97,149 |
def get_on_diagonal_densities(block_ms, block_ns):
"""
Calculates the densities within each block (densities of on diagonals of the
block matrix)
Parameters
----------
block_ms: 2D array
Matrix counting the number of edges that exist between pairs of blocks
block_ns: 1D array
Array counting the number of nodes in each block
Returns
-------
ps: 1D array
Array of the density within each block
"""
ps = []
for l in range(len(block_ns)):
n = block_ns[l]
m = block_ms[l,l]
if n == 0 or n == 1:
ps.append(0)
else:
ps.append(2 * m / (n * (n - 1)))
return ps | 73a579d5b3296ff6c6bf197691514fe58b574503 | 97,150 |
def intersects(box, new_box):
"""
Check whether two bounding boxes are intersected
:param box: one bounding box
:type box: list[int]
:param new_box: another bounding box
:type new_box: list[int]
:return: whether two bounding boxes are intersected
:rtype: bool
"""
box_x1, box_y1, box_x2, box_y2 = box
x1, y1, x2, y2 = new_box
return not (box_x2 < x1 or box_x1 > x2 or box_y1 > y2 or box_y2 < y1) | 6341afae9c675d840d5374aba7e3cd84b82571b4 | 97,152 |
def split_params(params, ncomps):
"""Split params into amps, fwhms, offsets."""
amps = params[0:ncomps]
fwhms = params[ncomps:2*ncomps]
offsets = params[2*ncomps:3*ncomps]
return amps, fwhms, offsets | 8a9fab2698d23267540859395dc3ca2647f29ea1 | 97,154 |
def stringify(sub):
"""Returns python string versions ('' and "") of original substring"""
check_str_s = "'" + sub + "'"
check_str_d = '"' + sub + '"'
return check_str_s, check_str_d | b4854c86b7f5405efdc8d93fb930fce2fbc859ae | 97,156 |
import torch
def argSoftmax2d(tensor, temperature, num_samples=1, return_1d_idx=False):
"""
Find the index of the Softmax value in a 2d tensor. Prob is proportional to q/temperature
Args:
- tensor: PyTorch tensor of size (n x 1 x d x d)
Returns: n x num_samples x 2 PyTorch tensor containing indexes of max values
"""
n = tensor.size(0)
d = tensor.size(2)
probs = (tensor / temperature).view(n, -1).softmax(dim=-1)
m = torch.multinomial(probs, num_samples)
if not return_1d_idx:
return torch.cat(((m // d).view(-1, 1), (m % d).view(-1, 1)), dim=1).long()
else:
return torch.cat(((m // d).view(-1, 1), (m % d).view(-1, 1)), dim=1).long(), m.long() | 3fae89065314e487fcef4d2d0da72b9cb58b0aa0 | 97,162 |
from functools import reduce
import operator
def _vector_add(*vecs):
"""For any number of length-n vectors, pairwise add the entries, e.g. for
x = (x_0, ..., x_{n-1}), y = (y_0, ..., y_{n-1}),
xy = (x_0+y_0, x_1+y_1, ..., x_{n-1}+y{n-1})."""
assert(len(set(map(len, vecs))) == 1)
return [reduce(operator.add, a, 0) for a in zip(*vecs)] | d5a8c9a6dc42c65e879b721e75c42abd17d97ef0 | 97,163 |
def parse_rgb_txt_file(path):
"""
Parse the given rgb.txt file into a Python dict.
See https://en.wikipedia.org/wiki/X11_color_names for more information
:param str path: the path to the X11 rgb.txt file
"""
#: Holds the generated color dict
color_dict = {}
with open(path, 'r') as rgb_txt:
for line in rgb_txt:
line = line.strip()
if not line or line.startswith('!'):
continue # skip comments
parts = line.split()
color_dict[" ".join(parts[3:])] = (int(parts[0]), int(parts[1]), int(parts[2]))
return color_dict | 4d8e5a2cd981568159cac230652d327a26accda7 | 97,167 |
def parse_rose_stream_name(stream_name):
"""
Convert the Rose stream name given to this ESGF dataset into more useful
components.....
:param str stream_name: The Rose stream name given to this ESGF data set.
:returns: The dataset components from the stream name.
:rtype: dict
"""
cmpts = stream_name.split('_')
cmpt_names = ['source_id', 'experiment_id', 'variant_label',
'table_id', 'cmor_name']
return {cmpt_name: cmpts[index]
for index, cmpt_name in enumerate(cmpt_names)} | 21c5a90cdb20c060a999cae13dcf74a92578aaf1 | 97,168 |
def get_float_formatter_func(precision=None, thousands_sep=False,
zero_string='0'):
"""
Returns a function that gives ``zero_string`` if the float is 0, else
a string with the given digits of precision.
"""
def float_formatter(f):
if isinstance(f, str):
return f
if thousands_sep:
ts = ','
else:
ts = ''
if precision is not None:
prec = '.' + str(precision) + 'f'
elif float(f).is_integer():
prec = '.0f'
else:
prec = ''
fmt = '{:' + ts + prec + '}'
try:
if f == 0:
return zero_string
elif (precision is not None) or (thousands_sep is not None):
return fmt.format(f)
else:
return str(f)
except ValueError:
return f
return float_formatter | 3e0ff275c1c658710c6363c1dc32e111fdc65483 | 97,174 |
import gzip
def read_aids(filename):
"""
Read AIDs from file.
Parameters
----------
filename : str
Filename containing AIDs.
"""
if filename.endswith('.gz'):
f = gzip.open(filename)
else:
f = open(filename)
try:
aids = []
for line in f:
if line.strip():
aids.append(int(line))
finally:
f.close()
return aids | e644a84504080d41a6cfff503a67a5e30d2bed57 | 97,175 |
from datetime import datetime
def GetFilterQuery(query,
time_property,
start_date,
end_date,
property_values=None,
datetime_pattern='%Y-%m-%d'): # pragma: no cover.
"""Gets query with filters.
There are 2 kinds for filters:
(1) The time range filter defined by ``time_property``, ``start_date`` and
``end_date``. Note, the format of ``start_date`` and ``end_date`` should be
consistent with ``datetime_pattern``.
(2) The values of properties set by ``property_values``.
"""
start_date = datetime.strptime(start_date, datetime_pattern)
end_date = datetime.strptime(end_date, datetime_pattern)
if property_values:
for cls_property, value in property_values.iteritems():
if isinstance(value, list):
query = query.filter(cls_property.IN(value))
else:
query = query.filter(cls_property == value)
return query.filter(time_property >= start_date).filter(
time_property < end_date) | 9ed48862a9c2eeaf305eb013be5b7b4c7b8b4159 | 97,176 |
from typing import List
def one_sided(value: float, distribution: List[float]) -> float:
"""Calculate the one-sided probability of getting a value more extreme than the distribution."""
assert distribution
return sum(value < element for element in distribution) / len(distribution) | f631c05fbac917e1b4c55a9abc789f4fa2611f22 | 97,178 |
def hazard_to_survival(interval):
"""Convert hazards (interval probabilities of event) into survival curve
Args:
interval ([pd.DataFrame, np.array]): hazards (interval probabilities of event)
usually result of predict or result from _get_point_probs_from_survival
Returns:
[pd.DataFrame, np.array]: survival curve
"""
return (1 - interval).cumprod(axis=1) | ed578c7676f046f3376021b743a680fa551b05eb | 97,179 |
def fmap(f, functor):
"""Functional form of fmap.
"""
return functor.fmap(f) | 5fe154f248268438fb19da2a1e4037254c367e90 | 97,183 |
def calc_check_digit(number):
"""Calculate the check digit. The number passed should not have the
check digit included."""
s = sum((9 - i) * int(n) for i, n in enumerate(number))
return str((11 - s) % 11 % 10) | 85a0b09e7777be767bc3787e185adbd3cc7ad6cc | 97,188 |
def concat_host(host, port):
"""
将host和port使用 "_" 连接为字符串
Parameters:
host - str
port - int或str
Returns:
str
"""
return host + "_" + str(port) | e62dc3408741fa96643cada0f746b6deaef33ed8 | 97,193 |
def create_outname(options):
""" Creates filename for the output GTF that reflects the input options that
were used. """
outname = options.outprefix + "_talon"
if options.observed == True:
outname = "_".join([ outname, "observedOnly" ])
outname += ".gtf"
return outname | 8b1054952842edeec3f0802d65e8156e07e6de1b | 97,196 |
def get_issue_list_links(soup_object):
"""
Takes in the predefined soup_object as input and then uses that
to do the tasks below
Finds the anchor tags of the unordered list elements.
Then construcsts a list of URLs from the list of issues and their
anchor tags.
Then it adds the base URL of DC Wikia to the list of issue URLs.
It then returns these URLs as a list.
"""
issue_list = soup_object.find("div", id="mw-content-text")
issue_list = issue_list.find("ul").find_all("a")
# Makes a list of the issue_urls
issue_urls = [anchorlink["href"] for anchorlink in issue_list]
# Adds the base URL to each element in the list of issue URLs
base_site_url = "https://dc.fandom.com"
for i in range(0, len(issue_urls)):
issue_urls[i] = base_site_url + issue_urls[i]
return issue_urls | 7f86264f3e0a263a990199bfcc2ccd048b6c6d6a | 97,198 |
async def iterate_roles(author, users: list):
"""Iterates through the author's Discord roles.
Checks whether any of the author's Discord roles or
the author's id itself are in a list of
authorized `users`.
Called by `check_roles`.
Args:
author (discord.Member): the command user
users (list): users to iterate through
Returns:
bool: True if author is authorized; False otherwise
"""
if users and author.id in users:
return True
elif author.roles:
for role in author.roles:
try:
if role.id in users:
return True
except:
pass
return False | 83fda688781b4d67d0332460e79e762f3db0506b | 97,199 |
import math
def distance(origin, destination):
""" Calculate distance between origin and destination
:param tuple origin: 2-tuple x, y coordinates
:param tuple destination: 2-tuple x, y coordinates
"""
ox, oy = origin
dx, dy = destination
return math.sqrt((ox - dx) ** 2 + (oy - dy) ** 2) | 92f205cfb6291066dd1a0888add7967b42045608 | 97,206 |
def change_to_pubs_test(pubs_url):
"""
flips pubs urls to pubs-test urls to work around annoying apache config on test tier
:param pubs_url: a pubs.er.usgs.gov url
:return: a pubs-test.er.usgs.gov url
"""
pubs_test_url = pubs_url.replace('pubs.er', 'pubs-test.er')
return pubs_test_url | 3dbaf86a82f14e69eb63cf9ef99261e831f8536b | 97,209 |
from typing import Optional
import requests
def get_collection_uuid(gc, collection_name: str) -> Optional[str]:
"""Returns the DSA collection uuid from the provided `collection_name`
Args:
gc: girder client
collection_name (string): name of the collection in DSA
Returns:
string: DSA collection uuid. None if nothing matches the collection name or an
error in the get request
"""
try:
get_collection_id_response = gc.get(
f"/collection?text={collection_name}"
)
except requests.exceptions.HTTPError as err:
print(
f"Error in collection id get request: {err.response.status_code}, {err.response.text}"
)
return None
collection_id_dicts = get_collection_id_response
for collection_id_dict in collection_id_dicts:
print("collection_id_dict", collection_id_dict)
if collection_id_dict["name"] == collection_name:
collection_id = collection_id_dict["_id"]
print(
f"Collection {collection_name} found with id: {collection_id}"
)
return collection_id
print(f"Collection {collection_name} not found")
return None | daca3b289c618dacbe13838619d1c4f252c335a7 | 97,213 |
def disable_caching(response):
"""Disable caching on a response and return it."""
response.cache_control.no_cache = True
response.cache_control.no_store = True
response.cache_control.must_revalidate = True
return response | 6436309e010bf0e9cb45b388d1b0e9d588ecd04f | 97,214 |
def py_source_to_app(py_source, env):
"""
Create a Dash app from a string defining the app.
"""
env = env or {}
exec(py_source, env)
return env["app"] | 796f0335d36b70fb3bdf043d3336b63e0ccaf5ae | 97,216 |
def pick_best_base_call( *calls ) -> tuple:
""" Pick the best base-call from a list of base calls
Example:
>>> pick_best_base_call( ('A',32), ('C',22) ) )
('A', 32)
>>> pick_best_base_call( ('A',32), ('C',32) ) )
None
Args:
calls (generator) : generator/list containing tuples
Returns:
tuple (best_base, best_q) or ('N',0) when there is a tie
"""
# (q_base, quality, ...)
best_base, best_q = None, -1
tie = False
for call in calls:
if call is None:
continue
if call[1]>best_q:
best_base= call[0]
best_q=call[1]
tie=False
elif call[1]==best_q and call[0]!=best_base:
tie=True
if tie or best_base is None:
return ('N',0)
return best_base, best_q | db7b67916da281deef2ff7674edbd9400960ed2f | 97,224 |
import torch
def ind2sub(shape, index):
"""
A PyTorch implementation of MATLAB's "ind2sub" function
Parameters
----------
shape : torch.Size | list | tuple
shape of the 2D matrix
index : torch.Tensor
(n,) linear indices
Returns
-------
rows : torch.Tensor
(n,) row subscripts
cols : torch.Tensor
(n,) column subscripts
"""
# checks
assert isinstance(shape, torch.Size) or \
isinstance(shape, list) or \
isinstance(shape, tuple)
assert isinstance(index, torch.Tensor) and len(index.shape) == 1
valid_index = index < shape[0]*shape[1]
assert valid_index.all()
if not len(shape) == 2:
raise NotImplementedError('only implemented for 2D case.')
# compute inds
cols = index % shape[0]
rows = index // shape[0]
return rows, cols | 3c0bb13a81e63dd9b317acbf723300800667def0 | 97,225 |
def binexp(x: int, n: int) -> int:
"""Binary exponentiation
Parameters:
x (int): Base
n (int): Exponent (power)
Returns:
int: Result
"""
res = 1
while n > 0:
if n & 1 > 0:
res *= x
x *= x
n >>= 1
return res | 21379408042991c9f22a7c34e36a77c2fc7e186a | 97,226 |
def unique_by(f):
"""Return only unique elements of a sequence defined by function f
>>> tuple(unique_by(len)(['cat', 'mouse', 'dog', 'hen']))
('cat', 'mouse')
"""
def unique(seq):
seen = set()
for item in seq:
val = f(item)
if val not in seen:
seen.add(val)
yield item
return unique | 7142454f7942506f457aa601621c56d8a0cf2ce9 | 97,227 |
import math
def get_rounded_pruned_element_number(total: int, sparsity_rate: float, multiple_of: int = 8) -> int:
"""
Calculates number of sparsified elements (approximately sparsity rate) from total such as
number of remaining items will be multiple of some value.
Always rounds number of remaining elements up.
:param total: Total elements number.
:param sparsity_rate: Prorortion of zero elements in total.
:param multiple_of: Number of remaining elements must be a multiple of `multiple_of`.
:return: Number of elements to be zeroed.
"""
remaining_elems = math.ceil((total - total * sparsity_rate) / multiple_of) * multiple_of
return max(total - remaining_elems, 0) | 9565c091965689249b80fbe62cc1d67461bf26a1 | 97,229 |
import string
def parse_letternumber(st):
"""
Parse CDMS's two-letter QNs
From the CDMS docs:
"Exactly two characters are available for each quantum number. Therefore, half
integer quanta are rounded up ! In addition, capital letters are used to
indicate quantum numbers larger than 99. E. g. A0 is 100, Z9 is 359. Small
types are used to signal corresponding negative quantum numbers."
"""
asc = string.ascii_lowercase
ASC = string.ascii_uppercase
newst = ''.join(['-' + str(asc.index(x)+10) if x in asc else
str(ASC.index(x)+10) if x in ASC else
x for x in st])
return int(newst) | 2473fb314e5f16b4da81c1ce7535f0c23667ace3 | 97,231 |
def get_table_name(element):
"""
The function will return a fully qualified table name according to the logic applied to the processed element.
In this case, input data already provides the table name directly but it can be extended to more complex use cases.
"""
return 'PROJECT_ID:DATASET.' + element['type'] | 4ab7f0c1bd401524f9426a9740dc2e990407ab54 | 97,235 |
def annot(xcrd, ycrd, zcrd, txt, xancr='left'):
"""
Annotation structure function for plotly
:param xcrd: x position
:param ycrd: y position
:param zcrd: z position
:param txt: annotation name
:param xancr: anchor position
:return: annotation as dict
"""
annotation = dict(showarrow=False, x=xcrd, y=ycrd, z=zcrd, text=txt, xanchor=xancr, font=dict(color='white', size=12))
return annotation | 4f514af6287cccc028ccd2a2ef98f5881d745c06 | 97,236 |
def test_module(client):
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
Args:
client (Client): instance of the Client class
Returns:
'ok' if test passed, anything else will fail the test.
"""
try:
client.get_supported_languages()
return 'ok'
except Exception as e:
return 'Test failed: {}'.format(str(e)) | c52ef76a3db946974b2798f2b20ee3eb6388b72f | 97,240 |
from datetime import datetime
def parse_date(date):
"""Parse date string to date epoch in millisecons."""
date_obj = datetime.strptime(date, "%d/%m/%Y")
return date_obj.timestamp() * 1000 | 47722d947f8ec0a2cf51b6c7b9452135380150f3 | 97,242 |
from typing import OrderedDict
def _ordered_dict(loader, node):
"""
Loads YAML mappings into an ordered dict to preserve key order.
"""
loader.flatten_mapping(node)
return OrderedDict(loader.construct_pairs(node)) | a0c749ecd56bd126452a92f8f87027bbaba4bab1 | 97,247 |
def _uniq(l):
"""Removes duplicates from a list, preserving order."""
r = []
for item in l:
if item not in r: r.append(item)
return r | 7344172661842eeca7329203718b1f20f9421ca6 | 97,251 |
def import_program(client, file_=None, filename=None, dirname=None):
"""Import a program file for a model.
Cannot specify both file and filename parameters.
Args:
client (obj):
creopyson Client.
`file_` (str, optional):
Destination Model name.
Defaults is currently active model, or the model for
the filename parameter if given.
filename (str, optional):
Source file name.
Default is the model name with the appropriate file extension.
dirname (str, optional):
Source directory. Defaults is Creo's current working directory.
Returns:
str: Name of the model updated
"""
data = {}
if file_ is not None:
data["file"] = file_
else:
active_file = client.file_get_active()
if active_file:
data["file"] = active_file["file"]
if filename is not None:
data["filename"] = filename
if dirname is not None:
data["dirname"] = dirname
return client._creoson_post("interface", "import_program", data, "file") | 5e291babf13259e217bfc62b24baaaafaf333ed5 | 97,253 |
def split(l, counts):
"""
>>> split("hello world", [])
['hello world']
>>> split("hello world", [1])
['h', 'ello world']
>>> split("hello world", [2])
['he', 'llo world']
>>> split("hello world", [2,3])
['he', 'llo', ' world']
>>> split("hello world", [2,3,0])
['he', 'llo', ' wo', 'rld']
>>> split("hello world", [2,-1,3])
['he', 'llo world']
"""
res = []
saved_count = len(l) # count to use when encoutering a zero
for count in counts:
if not l:
break
if count == -1:
break
if count == 0:
while l:
res.append(l[:saved_count])
l = l[saved_count:]
break
res.append(l[:count])
l = l[count:]
saved_count = count
if l:
res.append(l)
return res | 2703b713569e8628021b142ff1bbef2bb66c283a | 97,260 |
def prune(body):
"""Removes dictionary keys with value False."""
return {k: v for (k, v) in body.items() if bool(v) is not False} | 742bfcf223845dd79fdad75db04c8de8cbbbe834 | 97,262 |
import pickle
def load_dt_model(pickle_model):
"""
Retrieve model using Pickle binary format.
:param string pickle_model: location of Pickle model
:return: Pickle model for re-use
:rtype: object
"""
return pickle.loads(pickle_model) | fbab60158afaa020467a32c0b1328cd5a7941f5e | 97,264 |
def triangular(n):
"""
The triangular numbers are the numbers 1, 3, 6, 10, 15, 21, ...
They are calculated as follows.
1 = 1
1 + 2 = 3
1 + 2 + 3 = 6
1 + 2 + 3 + 4 = 10
1 + 2 + 3 + 4 + 5 = 15
Returns nth triangular number.
"""
return sum([i for i in range(n+1)]) | 1dea2270de3d27abd5d7fe55d1861253bcda3920 | 97,265 |
def first_dimension_length(array):
"""Returns the length of the first dimension of the provided array or list.
Args:
array (list or numpy.ndarray): An array.
Returns:
int: The length of the first dimension of the array.
"""
if type(array) is list:
return len(array)
else:
return array.shape[0] | 95d37669212db79f920b70e59e2c730dfa33b9da | 97,266 |
def get_child_pages(page):
""" Return the child pages for a given page """
return page.get_children() | 79dd4a92ff81151009b44df108ec6d9ab1ce0ccf | 97,267 |
def orient2d(a, b, c):
"""
The Orient2D geometric predicate.
c can be 2x1 or 2xN: c[0] is x values, c[1] is y values
The output is a scalar number which is:
> 0 if abc forms an angle in (0, pi), turning left,
< 0 if abc forms an angle in (0, -pi), turning right,
= 0 if abc forms an angle equal to 0 or pi, or is straight.
Alternatively, it can be interpreted as:
> 0 if c is to the left of the line ab,
< 0 if c is to the right of the line ab,
= 0 if c is on the line ab,
in all cases seen from above, and along ab.
The algorithm do not use exact arithmetics, and may fail within
machine tolerances.
"""
return (a[0]-c[0])*(b[1]-c[1]) - (a[1]-c[1])*(b[0]-c[0]) | 243d05c50aee58ccd162da220d2e238538cd2ebb | 97,269 |
def get_bbox_center(bbox):
"""Return the center of the bounding box
:param bbox: the player bounding box [top_left x, top_left y, bottom_left x, bottom_left y]
:return: the center x, y of the bounding box
#>>> get_bbox_center([23,12,35,20])
#(29.0, 16.0)
"""
return ((bbox[2]-bbox[0])/2+bbox[0], (bbox[3]-bbox[1])/2+bbox[1]) | 47de23b16462ea073ef23c78fe1e148e706b34e8 | 97,278 |
def set_off_diagonal_to_zero(matrix):
"""Sets the off-diagonal elements of a matrix
"""
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
if i != j:
matrix[i, j] = 0
return matrix | 56898e9e0a7664149161a871573cabb1602431de | 97,282 |
def get_added_lines(patch):
"""
Get lines added with a patch.
(e.g., git diff between two versions of a file)
:param patch: the content of the patch
:return: the lines added by the patch
"""
added_lines = ""
lines = patch.split('\n')
for line in lines:
if line.startswith("+"):
if not added_lines: # empty
added_lines = line[1:]
else: # append
added_lines += line[1:] + "\n"
return added_lines | 8c1658156688616cfdc797666fb90c76e47fa22f | 97,284 |
def domain_to_aol_attr_convert(quad_attr):
"""Convert an attribute from the domain-level syntax (which should be a
valid Python name) to the AOL-level syntax.
"""
if not quad_attr.startswith('is_'):
quad_attr = f'has_{quad_attr}'
return quad_attr.replace('_', '-') | 00215a786a7dfb9d5f42d97eff7c70a7585f8e1b | 97,285 |
def find_in_dict(data, keys):
"""
Finds the value in a potentially nested dictionary by a key or set of
nested keys.
Parameters:
----------
data: :obj:`dict`
The dictionary for which we want to find the value indexed by the
potentially nested keys.
keys: :obj:`list`, :obj:`tuple`, :obj:`str`
Either an iterable of nested keys or a single key for which we want
to locate the associated value in the dictionary for.
Example:
-------
>>> data = {'foo': {'bar': 'banana'}}
>>> find_in_dict(data, 'foo')
>>> {'bar': 'banana'}
>>> find_in_dict(data, ['foo', 'bar'])
>>> 'banana'
"""
if hasattr(keys, '__iter__') and not isinstance(keys, str):
if len(keys) == 1:
return data[keys[0]]
current = data[keys[0]]
for key in keys[1:]:
current = current[key]
return current
return data[keys] | c5da3ee5b37d6f84b85a39f75d15bffee0b9dfe8 | 97,286 |
def close_the_database(p_my_cursor, p_my_connection):
"""Called to close the current open database connection. Returns None."""
p_my_cursor.close()
p_my_connection.close()
return None | e18106497f30d6b5d12a3e4710a66b2671704b5e | 97,288 |
def plan_to_id(plan):
""" Transform plan to string ID.
Args:
plan: plan represented as numpy array
Returns:
plan represented as string
"""
nr_props = plan.shape[0]
properties = [str(plan[i]) for i in range(nr_props)]
return ''.join(properties) | 3ca6fa09ebfb3c700749bb61b7c4ddacc2f113db | 97,300 |
def dfs(labels, graph, start, index):
""" Depth first search using stack
Give same label index for every node in a tree
Input
______
labels: array for labels of each samples in cluster
graph: 2-D matrix for graph
start: initial point in a tree. DFS will search the tree includes this point
index: label index for nodes
Output
______
labels : modified array which records the cluster label for the points in the tree
"""
visited = []
stack = [start]
while stack:
n = stack.pop()
if n not in visited and labels[n] == -1:
visited.append(n)
labels[n] = index
stack += set(graph[n]) - set(visited)
return labels | 2ba9ec9fdaf31cd0de7a7de15103df08a56278bd | 97,302 |
def link(url):
"""Strip the host part of a link."""
api_prefix = 'https://127.0.0.1:5000/api'
if url.startswith(api_prefix):
return url[len(api_prefix):] | 0b5dcc5f36c229fd50c67fe3b748703b10c0ee9e | 97,303 |
def calculate_traffic(incident):
"""Calculates the traffic level at the time of the incident"""
resources = incident['traffic']['resourceSets'][0]['resources']
duration_traffic = sum([r['travelDurationTraffic'] for r in resources])/len(resources)
duration = sum([r['travelDuration'] for r in resources])/len(resources)
incident['traffic_level'] = (duration_traffic - duration)/duration
return incident | a1727d14da1ca8bd3448e01ff27c041307b8dbc1 | 97,304 |
def reorder(df, columns):
"""Place supplied columns to front of dataframe."""
old_order = list(df.columns)
for col in columns:
old_order.remove(col)
new_order = columns + old_order
return df[new_order] | 9952f7a7225bc4822fc08f26d3861732b1a87501 | 97,306 |
from bs4 import BeautifulSoup
def restore_strings(template, strings):
"""
Inserts a list of strings into the template.
This reverses the `extract_strings` function.
Args:
template (str): The HTML template.
strings (list[tuple[StringValue, dict]]): A list of 2-tuples containing a StringValue and HTML attributes dict
for each string to reinsert into the template.
Returns:
str: A HTML blob with the strings inserted into the template.
"""
soup = BeautifulSoup(template, "html.parser")
for text_element in soup.findAll("text"):
string, attrs = strings[int(text_element.get("position"))]
text_element.replaceWith(string.render_soup(attrs))
return str(soup) | 5262d616185635947de03d7ef2232fe1cc33e6f8 | 97,310 |
import math
def angle_of_vector(vector):
""" Calculate the angle of the vector in degrees relative to
a normal 2d coordinate system. This is useful for finding the
angle between two waypoints.
vector: [[x0,y0],[x1,y1]]
"""
rad = math.atan2(vector[1][1] - vector[0][1], vector[1][0] - vector[0][0])
print("angle_of_vector %0.2f radians, %0.2f degrees" % (rad, math.degrees(rad)))
return math.degrees(rad) | 767e7db75291d823f5f7dd734ce737fd9f8bb020 | 97,311 |
import json
def col_json_to_dict(df, cols):
"""Transform the json values inside a column into list of dictionaries
Args:
df(pd.Dataframe): dataframe
cols(list): name of columns with json values
Returns:
A pandas Dataframe with the json columns transformed
"""
transformed_df = df
for col in cols:
transformed_df = transformed_df.assign(**{col: df[col].apply(json.loads)})
return transformed_df | 17e5df9d1cc91660652277456819217ab9f1118b | 97,314 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.