content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def positions_at_t(points, t):
"""
Given a list of Points and a time t, find their positions at time t
:param points: the list of Points
:param t: the time t
:return: a list of pairs indicating the position of the points at time t
"""
return [p.at_time(t) for p in points]
|
750379504392e10994614ec0952d91782c073a58
| 46,826
|
import unicodedata
import re
def reduce_display_name(name):
"""Return a reduced version of a display name for comparison with a
username.
"""
# Strip out diacritics
name = ''.join(char for char in unicodedata.normalize('NFD', name)
if not unicodedata.combining(char))
name = re.sub(r'\s+', '_', name)
name = name.lower()
return name
|
82d4163ff2a3c73eece890d5c94aee1af6ff0a92
| 46,833
|
def is_in(val, lvals):
""" Replace the standard 'in' operator but uses 'is' to check membership.
This method is mandatory if element to check overloads operator '=='.
Args:
val: Value to check
lvals: List of candidate values
Returns:
True if value is in the list of values (presence checked with 'is')
"""
return any(val is v for v in lvals)
|
e667c756e16f35d8dbba8311fa98f193f02a05b9
| 46,837
|
def base_digits_decoder(alist: list, b: int) -> int:
"""
The inverse function of 'to_base'. This
function will take a list of integers, where each
element is a digit in a number encoded in base 'b',
and return an integer in base 10
"""
p = 0
ret = 0
for n in alist[::-1]:
ret += n * b ** p
p += 1
return ret
|
ae136b738716245b93f40668e4045df2ffd38a01
| 46,839
|
import torch
def pytorch_preprocess(batch):
"""
The scaling procedure for all the pretrained models from torchvision is described in the docs
https://pytorch.org/docs/stable/torchvision/models.html
"""
batch = (batch + 1) * 0.5 # [-1, 1] -> [0, 1]
batch_color_transformed = []
batch = torch.stack(batch_color_transformed, 0)
batch = torch.clamp(batch, 0, 1)
mean = torch.tensor([.485, .456, .406], dtype=batch.dtype, device=batch.device)[None, :, None, None]
batch = batch.sub(mean) # subtract mean
std = torch.tensor([.229, .224, .225], dtype=batch.dtype, device=batch.device)[None, :, None, None]
batch = batch.div(std)
return batch
|
008573834a0348cae5229c9d42d7293eb58242ca
| 46,843
|
def bed_map_region_id_to_seq_id(in_bed_file):
"""
Read in .bed file, and store for each region ID (column 4) the sequence
ID (column 1)
Return dictionary with mappings region ID -> sequence ID
>>> test_bed = "test_data/test3.bed"
>>> bed_map_region_id_to_seq_id(test_bed)
{'CLIP2': 'chr1', 'CLIP1': 'chr2', 'CLIP3': 'chr1'}
"""
regid2seqid_dic = {}
# Open input .bed file.
with open(in_bed_file) as f:
for line in f:
cols = line.strip().split("\t")
seq_id = cols[0]
site_id = cols[3]
regid2seqid_dic[site_id] = seq_id
f.closed
return regid2seqid_dic
|
cc0adee384b954b1a33e61ed76f560026b0303e2
| 46,844
|
def is_abundant(number):
"""Determine if sum of divisors of number is greater than number."""
factors = [x for x in range(1, (number // 2) + 1) if number % x == 0]
if sum(factors) > number:
return True
return False
|
d895d562c359cd36e9ee3f7f1785c716475ae736
| 46,859
|
def summarize_repos(events):
"""Generate list of all repos in the iterable of events."""
repos = set(event.repo for event in events)
tmpl = '[{0}/{1}](https://github.com/{0}/{1})'
return [tmpl.format(*repo) for repo in sorted(repos)]
|
524000f40ae6f637fcbb809e110a1b36dee9a103
| 46,860
|
def parse_noun_line(line):
"""
For a Leo dictionary noun entry line:
(aktive) Langzeitverbindung {f}
returns a list of the form:
['Langzeitverbindung', 'f']
"""
gender_start = line.find('{') + 1
gender_end = line.find('}')
word_end = line.find('{') - 1
paren_end = line.find(')')
word_start = paren_end + 2 if paren_end > -1 else 0
word = line[word_start:word_end]
gender = line[gender_start:gender_end]
return [word, gender]
|
f993cd786229a6d02a61ede833bd280300f7550a
| 46,862
|
def filter_seq2seq_output(string_pred, eos_id=-1):
"""Filter the output until the first eos occurs (exclusive).
Arguments
---------
string_pred : list
A list containing the output strings/ints predicted by the seq2seq system.
eos_id : int, string
The id of the eos.
Returns
------
list
The output predicted by seq2seq model.
Example
-------
>>> string_pred = ['a','b','c','d','eos','e']
>>> string_out = filter_seq2seq_output(string_pred, eos_id='eos')
>>> string_out
['a', 'b', 'c', 'd']
"""
if isinstance(string_pred, list):
try:
eos_index = next(
i for i, v in enumerate(string_pred) if v == eos_id
)
except StopIteration:
eos_index = len(string_pred)
string_out = string_pred[:eos_index]
else:
raise ValueError("The input must be a list.")
return string_out
|
fbf3a8900c83dfdd7d8309e1daa262e2d193d3b0
| 46,864
|
import random
def _random_subset(seq, m):
"""Taken from NetworkX
Given a sequence seq, return a ransom subset of size m.
Parameters
----------
seq : list
The population
m : int
The sample size
Returns
-------
set
The sample from seq of size m
"""
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
targets = set()
while len(targets) < m:
x = random.choice(seq)
targets.add(x)
return targets
|
a5c29c4503bab87e193c23f3942642728232b6df
| 46,867
|
def mean_of_targets(dataset):
"""
Returns the mean of the targets in a dataset.
Parameters
----------
dataset : DenseDesignMatrix
Returns
-------
mn : ndarray
A 1-D vector with entry i giving the mean of target i
"""
return dataset.y.mean(axis=0)
|
20f2a8dc05b2747440b388268d05493930599459
| 46,868
|
import sqlite3
def create_connection(db_file):
""" create a database connection to a SQLite database """
try:
connection = sqlite3.connect(db_file)
print("Opened database successfully", type(connection))
except Exception as e:
raise e
return connection
|
c414a49ee3e559869d995ac666a8fc409d5f23be
| 46,869
|
def populate(d, allow_overwrite=True):
"""
Create a decorator that populates a given dict-like object by name.
Arguments
---------
* d: a dict-like object to populate
* allow_overwrite: if False, raise a ValueError if d already has an
existing such key (default=True)
"""
def populator(name):
def populated(fn):
if not allow_overwrite and name in d:
raise ValueError("'{}' is already present".format(name))
d[name] = fn
return fn
return populated
return populator
|
d0d4c254980787f43313dd3d6bd3655975bdf1f9
| 46,870
|
def sequence_generator(n):
"""
Generates sequence of numbers for a given positive integer 'n' by iterative process as specified in Goldbach
conjecture.
:param n: positive
:return: list of numbers in the generated sequence, boolean indicating whether last element of sequence is 1
"""
if not isinstance(n,int):
# Will raise an exception if n cant be converted to int
n = int(n)
if n <= 0:
raise ValueError("Input value has to be at least 1.")
curr_number = n
sequence = [curr_number]
check = False
while curr_number != 1 and not check:
if curr_number%2 == 0:
curr_number = int(curr_number/2)
else:
curr_number = 3*curr_number + 1
if curr_number in sequence:
check = True
sequence.append(curr_number)
break
else:
sequence.append(curr_number)
return sequence, sequence[-1] == 1
|
b70d7820883ab41c1d41b2d5d8bfa7d4671f612c
| 46,872
|
import pyflakes.api
import pyflakes.reporter
def undefined_names(sourcecode):
"""
Parses source code for undefined names
Example:
>>> print(ub.repr2(undefined_names('x = y'), nl=0))
{'y'}
"""
class CaptureReporter(pyflakes.reporter.Reporter):
def __init__(reporter, warningStream, errorStream):
reporter.syntax_errors = []
reporter.messages = []
reporter.unexpected = []
def unexpectedError(reporter, filename, msg):
reporter.unexpected.append(msg)
def syntaxError(reporter, filename, msg, lineno, offset, text):
reporter.syntax_errors.append(msg)
def flake(reporter, message):
reporter.messages.append(message)
names = set()
reporter = CaptureReporter(None, None)
pyflakes.api.check(sourcecode, '_.py', reporter)
for msg in reporter.messages:
if msg.__class__.__name__.endswith('UndefinedName'):
assert len(msg.message_args) == 1
names.add(msg.message_args[0])
return names
|
f1124db4af4ee2a37ba5949660073b4f8b3e651b
| 46,873
|
import math
def solar_radius_vector_aus(eccentricity_earth_orbit, solar_true_anomaly):
"""Returns the Solar Radius Vector.
Measured as distance in Astronomical Units, (AUs).
With Eccentricity of Earth's Orbit, eccentricity_earth_orbit, and Solar
True Anomaly, solar_true_anomaly.
"""
solar_rad_vector_aus = (1.000001018 * (1 - eccentricity_earth_orbit ** 2)) / (
1 + eccentricity_earth_orbit * math.cos(math.radians(solar_true_anomaly))
)
return solar_rad_vector_aus
|
13f2458bea1d878e09cf207629621d812c5e7550
| 46,874
|
def _CppName(desc):
"""Return the fully qualified C++ name of the entity in |desc|."""
return '::'+desc.fqname.replace('.', '::')
|
bd5985321918850bfb1f095c1587028194e9739b
| 46,875
|
def parse_geneseekr_profile(value):
"""
Takes in a value from the GeneSeekr_Profile of combinedMetadata.csv and parses it to determine which markers are
present. i.e. if the cell contains "invA;stn", a list containing invA, stn will be returned
:param value: String delimited by ';' character containing markers
:return: List of markers parsed from value
"""
detected_markers = []
marker_list = ['invA', 'stn', 'IGS', 'hlyA', 'inlJ', 'VT1', 'VT2', 'VT2f', 'uidA', 'eae']
markers = value.split(';')
for marker in markers:
if marker in marker_list:
detected_markers.append(marker)
return detected_markers
|
e0ac1772f540b207272875f544f461ac633422b7
| 46,877
|
def _follow_path(json_data, json_path):
"""Get the value in the data pointed to by the path."""
value = json_data
for field in json_path.split("."):
value = value[field]
return value
|
6c453125ba06a560b77d3f89ec4816a8393cd919
| 46,880
|
def epoch_timestamp_to_ms_timestamp(ts: int) -> int:
""" Converts an epoch timestamps to a milliseconds timestamp
:param ts: epoch timestamp in seconds
:return: timestamp in milliseconds
"""
return int(ts * 1000)
|
485035d7effc0adfa6bbe8bff22df0b3480ec7f3
| 46,882
|
def fst(pair):
"""Return the first element of pair
"""
return pair[0]
|
f31a63338a07548691d7354b6f948399cb9cfae5
| 46,885
|
def read_input(datafile, classfile):
"""Read the data points file and class id of each point.
Args:
datafile (str): Data points file.
classfile (str): Point class file.
Returns:
tuple: Returns a tuple containing a list of points and a list
containing the class of each point.
"""
points = []
with open(datafile, 'r') as f:
for i, line in enumerate(f.readlines()):
x, y = list(map(float,line.split()))
points.append((i, x, y))
classes = []
with open(classfile, 'r') as f:
for i, line in enumerate(f.readlines()):
classes.append((i, int(line)))
return points, classes
|
49ed27b048d754bddd50fd59b521f6f564dc6a95
| 46,889
|
def map_to_range(x: int, from_low: int, from_high: int, to_low: int, to_high: int) -> int:
"""
Re-map a number from one range to another.
A value of fromLow would get mapped to toLow, a value of fromHigh to toHigh, values in-between to values in-between.
Do not constrain values to within the range, because out-of-range values are sometimes intended and useful.
Inspired by https://www.arduino.cc/reference/en/language/functions/math/map/
:param x: The number to map
:param from_low: The lower bound of the value’s current range
:param from_high: The upper bound of the value’s current range
:param to_low: The lower bound of the value’s target range
:param to_high: The upper bound of the value’s target range
:return: The re-mapped value
:type x: int
:type from_low: int
:type from_high: int
:type to_low: int
:type to_high: int
:rtype: int
"""
return int((x - from_low) * (to_high - to_low) / (from_high - from_low) + to_low)
|
ab69a069c9544b8a2546f849f8544e81631a6870
| 46,890
|
import re
def has_cyrillic(text):
"""
This is ensuring that the given text contains cyrllic characters
:param text: The text to validate
:return: Returns true if there are cyrillic characters
"""
# Note: The character range includes the entire Cyrillic script range including the extended
# Cyrillic alphabet (e.g. ё, Є, ў)
return bool(re.search('[\u0400-\u04FF]', text))
|
9556007206003534e8da7e6aa73a3d3a10962c55
| 46,891
|
def str_aligned(results, header=None):
"""
Given a tuple, generate a nicely aligned string form.
>>> results = [["a","b","cz"],["d","ez","f"],[1,2,3]]
>>> print str_aligned(results)
a b cz
d ez f
1 2 3
Args:
result: 2d sequence of arbitrary types.
header: optional header
Returns:
Aligned string output in a table-like format.
"""
k = list(zip(*results))
stringlengths = list()
count = 0
for i in k:
col_max_len = max([len(str(m)) for m in i])
if header is not None:
col_max_len = max([len(str(header[count])), col_max_len])
stringlengths.append(col_max_len)
count += 1
format_string = " ".join(["%" + str(d) + "s" for d in stringlengths])
returnstr = ""
if header is not None:
header_str = format_string % tuple(header)
returnstr += header_str + "\n"
returnstr += "-" * len(header_str) + "\n"
return returnstr + "\n".join([format_string % tuple(result)
for result in results])
|
8945813211548884193fd54a2e939eaabbb75f18
| 46,893
|
def make_prev_next(seg_table):
"""
Function to make two column table into a four column table with prev/next seg
:param seg_table: Input table of form:
They They
don't do|n't
know know
:return: Four column table with prev/next group context columns:
_ don't They They
They know don't do|n't
don't _ know know
"""
prev_group = "_"
segs = [tuple(i.split('\t')) for i in seg_table]
out_segs = []
for i, line in enumerate(segs):
current_group, segmentation = line
if i < len(seg_table) - 1:
next_group = segs[i+1][0]
else:
next_group = "_" # Last group in data
if i > 0:
prev_group = segs[i-1][0]
out_segs.append("\t".join([prev_group, next_group, current_group, segmentation]))
return out_segs
|
f867755e079583c09a6aa47a6f8fc80f56066b07
| 46,895
|
def _coerce_to_number(value):
"""Attempt to coerce to a 'number' value.
Since json schema spec is loose here, we'll return the int value
if it's equal to the float value, otherwise give you a float.
"""
if int(value) == float(value):
return int(value)
return float(value)
|
ec04e8116db7571f9d7edc38df785be49d8ed6fa
| 46,898
|
import re
def getTopBillLevel(dirName: str):
"""
Get path for the top level of a bill, e.g. ../../congress/data/116/bills/hr/hr1
Args:
dirName (str): path to match
Returns:
[bool]: True if path is a top level (which will contain data.json); False otherwise
"""
dirName_parts = dirName.split('/')
return (re.match(r'[a-z]+[0-9]+', dirName_parts[-1]) is not None and dirName_parts[-3]=='bills')
|
9d6622ad45fa78b80a3aa3e8e59418fe4e6bfbee
| 46,899
|
import socket
def record_exists(record: str) -> bool:
"""
Determines whether a DNS record exists
"""
try:
socket.getaddrinfo(record, None)
except socket.gaierror:
return False
return True
|
c9aaa2aaf855aa6e12363acc4da359f8597c9531
| 46,907
|
def convert_to_float(tensor, half=False):
"""
Converts the tensor to either float32
or float16 based on the parameter.
"""
return tensor.half() if half else tensor.float()
|
ff8ccfbc8de91c6eb059cc26c963c438a4adb0a5
| 46,913
|
def generate_words(words):
"""Create a list of words with appended integers from a list of provided words"""
return ["{}{}".format(word, number) for word, number in zip(words, range(len(words)))]
|
946a0558541230d9902540e865da7a3c9eb797fa
| 46,917
|
from typing import Sequence
from typing import Tuple
def get_range(shape: Sequence[int], itemsize: int, strides: Sequence[int]) -> Tuple[int, int]:
"""
Given an array shape, item size (in bytes), and a sequence of strides,
returns a pair ``(min_offset, max_offset)``,
where ``min_offset`` is the minimum byte offset of an array element,
and ``max_offset`` is the maximum byte offset of an array element plus itemsize.
"""
assert len(strides) == len(shape)
# Now the address of an element (i1, i2, ...) of the resulting view is
# addr = i1 * stride1 + i2 * stride2 + ...,
# where 0 <= i_k <= length_k - 1
# We want to find the minimum and the maximum value of addr,
# keeping in mind that strides may be negative.
# Since it is a linear function of each index, the extrema will be located
# at the ends of intervals, so we can find minima and maxima for each term separately.
# Since we separated the offsets already, for each dimension the address
# of the first element is 0. We calculate the address of the last byte in each dimension.
last_addrs = [(length - 1) * stride for length, stride in zip(shape, strides)]
# Sort the pairs (0, last_addr)
pairs = [(0, last_addr) if last_addr > 0 else (last_addr, 0) for last_addr in last_addrs]
minima, maxima = zip(*pairs)
min_offset = sum(minima)
max_offset = sum(maxima) + itemsize
return min_offset, max_offset
|
40e9deb664941dec91cdd5bb08af5ff2de487f69
| 46,919
|
def escape(s):
"""Escape content of strings which will break the api using html entity type escaping"""
s = s.replace("&", "&")
s = s.replace("\r\n", " ")
s = s.replace("\n", " ")
s = s.replace("\r", " ")
s = s.replace("(", "(")
s = s.replace(")", ")")
s = s.replace(",", ",")
s = s.replace("§", "§")
return s
|
efbc4820078d5e7f703c2443c531f01aa5b2d983
| 46,925
|
def add_domain(user):
"""
Helper function that appends @linaro.org to the username. It does nothing if
it is already included.
"""
if '@' not in user:
user = user + "@linaro.org"
return user
|
505561ae6506226e16373b611a4e296351278b68
| 46,935
|
import re
from typing import OrderedDict
def split_tags(tags, separator=','):
"""
Splits string tag list using comma or another separator char, maintain
order and removes duplicate items.
@param tags List of tags separated by attribute separator (default: ,)
@param separator Separator char.
@return Ordered list of tags.
"""
if not tags:
return []
tags = re.sub('\s*{0}+\s*'.format(re.escape(separator)), separator, tags)
tags = re.sub('[\n\t\r]', '', tags)
tags = tags.strip().split(separator)
tags = filter(None, tags)
return OrderedDict.fromkeys(tags).keys()
|
8a5c850146201a801c74c4c102f141c193a20ea9
| 46,940
|
def is_string(atype):
"""find out if a type is str or not"""
return atype == str
|
222398429f641e65b04867f7ef9a130d52881fc8
| 46,942
|
from textwrap import dedent
from datetime import datetime
def generate_header(model_name: str, use_async: bool) -> str:
"""Generates the header for a Python module.
Args:
model_name (str): The name of the system model the header is generated for.
use_async (bool): True if asynchronous code should be generated, false otherwise.
Returns:
str: The Python code for the header.
"""
return dedent("""\
# Generated from '{model}' on {date}
from typing import Tuple
from toptica.lasersdk.{_async}client import UserLevel
from toptica.lasersdk.{_async}client import Client
from toptica.lasersdk.{_async}client import DecopBoolean
from toptica.lasersdk.{_async}client import DecopInteger
from toptica.lasersdk.{_async}client import DecopReal
from toptica.lasersdk.{_async}client import DecopString
from toptica.lasersdk.{_async}client import DecopBinary
from toptica.lasersdk.{_async}client import MutableDecopBoolean
from toptica.lasersdk.{_async}client import MutableDecopInteger
from toptica.lasersdk.{_async}client import MutableDecopReal
from toptica.lasersdk.{_async}client import MutableDecopString
from toptica.lasersdk.{_async}client import MutableDecopBinary
from toptica.lasersdk.{_async}client import Connection
from toptica.lasersdk.{_async}client import NetworkConnection
from toptica.lasersdk.{_async}client import SerialConnection
from toptica.lasersdk.{_async}client import DecopError
from toptica.lasersdk.{_async}client import DeviceNotFoundError
""".format(model=model_name, date=str(datetime.now()), _async='asyncio.' if use_async else ''))
|
ce1d06f5c41cc6aaf15d3f9ef4bbb474e07327de
| 46,943
|
from typing import Sequence
def default_cleaner(quotes: Sequence[str]) -> Sequence[str]:
"""
Default cleaner function used by bot instance.
:param quotes: Sequence of quotes which are to be pre-processed.
:return:
processed quotes.
"""
quotes = [q.strip() for q in quotes if q]
return quotes
|
354b912e9074342c704a176ecec2d8661d209eb4
| 46,944
|
def group_days_by(days, criterion):
"""
Group the given vector of days according to the given criterion.
Parameters
----------
days: pd.DatetimeIndex
criterion: str
Indicates how to group the given days. It can be either "year" or "month" or "season".
(The meteorological seasons are considered, and not the astronomical ones)
Returns
----------
list
List of pairs (i.e. tuples).
Each pair is a group of days.
- The first element is a string which represents the group name (i.e. group label).
- The second element is the vector of days in that group, i.e. it's a pd.DatetimeIndex.
Raises
----------
ValueError
When `criterion` is neither "year" nor "month" nor "season".
Notes
----------
For the sake of completeness, it's important to say that if `criterion` is either "month" or "season", also days of
different years could be grouped together.
"""
days = days.sort_values()
if criterion=="year":
years = days.year.drop_duplicates()
return [(str(year),days[days.year==year]) for year in years]
elif criterion=="month":
def stringify_month(month):
return ["January","February","March","April","May","June","July","August","September",
"October","November","December"][month-1]
months = days.month.drop_duplicates()
return [(stringify_month(month),days[days.month==month]) for month in months]
elif criterion=="season":
def to_season(month):
return ["Winter", "Spring", "Summer", "Fall"][month%12//3]
seasons = days.month.map(lambda month: to_season(month)).drop_duplicates()
return [(season,days[list(days.month.map(lambda month: to_season(month)==season))]) for season in seasons]
else:
raise ValueError("criterion must be either \"year\" or \"month\" or \"season\" ")
|
46e71c3ecdd4dc3b62ddae02a323c214258bc30e
| 46,945
|
def urlpath2(url:bytes) -> bytes:
""" Get url's path(strip params) """
return url.split(b'?', 1)[0]
|
b5464b3617cbd6303f4438c92fd8f5271f6906e1
| 46,949
|
def leading_zeros(val, n):
""" Return string with "n" leading zeros to integer. """
return (n - len(str(val))) * '0' + str(val)
|
eb4d55caf41f71c4d953398c17c8980070892a0c
| 46,953
|
def get_vaf(mutations):
"""
Given the list of mutations in the form
<TRANSCRIPT_1>_X123Y#0.56,<TRANSCRIPT_2>_X456Y#0.91,etc -> for SNVS
<5'TRANSCRIPT_1>-<3'TRANSCRIPT_1>_FUSION_Junction:X-Spanning:Y,\
<5'TRANSCRIPT_2>-<3'TRANSCRIPT_2>_FUSION_Junction:A-Spanning:B,etc -> for FUSIONS
return it
in the form 0.XX for an SNV or 0.0 for a FUSION
:param str mutations: The mutations covered by the input IAR
:return: The VAF
:rtype: float
>>> get_vaf('ENST1231.1_S123K#0.56')
0.56
>>> get_vaf('ENST1231.1_S123K#0.56,ENST1211.1_S143K#0.61')
0.56
>>> get_vaf('ENST1231.1_S123K#0.43_K124S#0.61,ENST1211.1_S143K#0.43_K144S#0.61')
0.43
>>> get_vaf('ENST1231.1-ENST4564.2_FUSION_Junction:5-Spanning:10')
0.0
"""
muts = mutations.split(',')
vaf = 1.0
for mutation in muts:
if 'FUSION' in mutation:
return 0.0
mutation = mutation.split('_')[1:]
for mut in mutation:
vaf = min(vaf, float(mut.split('#')[1]))
return vaf
|
a736f3b40c8de59508f66a9db35833344ef873ca
| 46,956
|
def ishexdigit(c):
"""
>>> ishexdigit('0')
True
>>> ishexdigit('9')
True
>>> ishexdigit('/')
False
>>> ishexdigit(':')
False
>>> ishexdigit('a')
True
>>> ishexdigit('f')
True
>>> ishexdigit('g')
False
>>> ishexdigit('A')
True
>>> ishexdigit('F')
True
>>> ishexdigit('G')
False
"""
return c.isdigit() or ord('a') <= ord(c.lower()) <= ord('f')
|
b450b243bc40ea4f5c84ddfdeddcd8022839def3
| 46,959
|
import time
def get_day_num(dayname) -> int:
"""Converts dayname to 0 indexed number in week e.g Sunday -> 6"""
return time.strptime(dayname, "%A").tm_wday
|
29e2b00066b6a0ca207ae3cced3f7efb7ce2c190
| 46,965
|
def clean(s):
"""Clean up a string"""
if s is None:
return None
s = s.replace("\n", " ")
s = s.replace(" ", " ")
s = s.strip()
return s
|
e706b68c7ed5b78ca54a3fd94af5de35ee142d43
| 46,968
|
def leapdays(y1, y2):
"""
Return number of leap years in range [y1, y2]
Assume y1 <= y2 and no funny (non-leap century) years
"""
return (y2 + 3) / 4 - (y1 + 3) / 4
|
c7e7c3b4650ef1236fc70ba94240f7119d9397c0
| 46,973
|
def truncate_column(data_column, truncation_point):
"""
Abstraction of numpy slicing operation for 1D array truncation.
:param data_column: 1D np.array
:param truncation_point: int of truncation index to test
:return: np.array
"""
assert (len(data_column.shape) == 1) # Assert data_column is a 1D numpy array
return data_column[-1 * truncation_point:]
|
bc444517953228d003fc18851f5c22b2b70f6e55
| 46,984
|
import math
def asinh(x):
"""Get asinh(x)"""
return math.asinh(x)
|
e0086dd83ca8a4dd005deed4ef0e58d11c306d0a
| 46,990
|
import io
def export_python(palette):
"""
Return a string of a Python tuple of every named colors.
Arguments:
palette (dict): Dictionnary of named colors (as dumped in JSON from
``colors`` command)
Returns:
string: Python tuple.
"""
# Open Python tuple
python_palette = io.StringIO()
python_palette.write(u'colors = (\n')
for original_code, values in sorted(palette.items(), key=lambda x:x[1]):
name, from_color = values
python_palette.write(" ('{}', '{}'),\n".format(name, original_code))
# Close Python tuple
python_palette.write(u')\n\n')
output = python_palette.getvalue()
python_palette.close()
return output
|
b32b97412612d36096aab088d6e3671e5059a32b
| 46,994
|
import json
def getStatus(response):
"""
Get the status of the request from the API response
:param response: Response object in JSON
:return: String - statuse
"""
resp_dict = json.loads(response.text)
try:
status = resp_dict["status"]
except KeyError:
print('Retrieval unsuccessful.')
return None
return status
|
a72a056ef574fdf0fb8f5744073d351836c6db07
| 46,997
|
import itertools
def flatten_metas(meta_iterables):
"""
Take a collection of metas, and compose/flatten/project into a single list.
For example:
A: pkg1, pkg2a
B: pkg2b, pkg3
Flattened([A, B]) => [pkg1, pkg2a, pkg3]
Flattened([B, A]) => [pkg1, pkg2b, pkg3]
The resulting list of metas will not be ordered in any particular way.
"""
visited = {}
for metas in meta_iterables:
visited_this_depth = {}
for meta in metas:
if meta.name() not in visited:
visited_this_depth.setdefault(meta.name(), []).append(meta)
for name, metas in visited_this_depth.items():
visited.setdefault(name, []).extend(metas)
return itertools.chain.from_iterable(visited.values())
|
4125a7bea44989140909e91392cd3adb740e26f1
| 47,004
|
def plural(singular, plural, seq):
"""Selects a singular or plural word based on the length of a sequence.
Parameters
----------
singlular : str
The string to use when ``len(seq) == 1``.
plural : str
The string to use when ``len(seq) != 1``.
seq : sequence
The sequence to check the length of.
Returns
-------
maybe_plural : str
Either ``singlular`` or ``plural``.
"""
if len(seq) == 1:
return singular
return plural
|
352d8fca4b2b7fb8139b11296defd65796e0e712
| 47,005
|
def static_feature_array(df_all, total_timesteps, seq_cols, grain1_name, grain2_name):
"""Generate an arary which encodes all the static features.
Args:
df_all (pd.DataFrame): Time series data of all the grains for multi-granular data
total_timesteps (int): Total number of training samples for modeling
seq_cols (list[str]): A list of names of the static feature columns, e.g. store ID
grain1_name (str): Name of the 1st column indicating the time series graunularity
grain2_name (str): Name of the 2nd column indicating the time series graunularity
Return:
fea_array (np.array): An array of static features of all the grains, e.g. all the
combinations of stores and brands in retail sale forecasting
"""
fea_df = (
df_all.groupby([grain1_name, grain2_name]).apply(lambda x: x.iloc[:total_timesteps, :]).reset_index(drop=True)
)
fea_array = fea_df[seq_cols].values
return fea_array
|
58f4664fa318f1026a1c3e48616431f51b224704
| 47,007
|
import re
def check_for_title(line):
"""
Check the current line for whether it reveals the title of a new entry.
:param srtr line: the line to check
:return: tuple (the entry title, the entry type) or (None, None)
"""
re_title = re.compile(
'^(?P<title>.+) \\((?P<type>EPHEMERA OBJECT|SPELL|INCANTATION|OBJECT OF POWER|CONJURATION|INVOCATION|ENCHANTMENT|RITUAL|CHARACTER SECRETS|HOUSE SECRETS|FORTE ABILITY)\\)$')
m = re_title.match(line)
if m:
return m.group('title'), m.group('type')
return None, None
|
73d7b73d29e51b87a37810d434582c975c6d0c07
| 47,015
|
def rotate_right(arr):
"""
Rotate a copy of given 2D list
clockwise by 90 degrees
and return a new list.
:param arr: A 2D-list of arbitrary
dimensions.
:return: A list that is "arr" rotated
90 degree clockwise by its center.
"""
n = len(arr)
m = len(arr[0])
res = [[None for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
res[i][j] = arr[n - j - 1][i]
return res
|
2d393acbb8accaae90dd562b5360199698a032a9
| 47,017
|
def get_dl_dir(cd, t, chan):
"""Get the cached download destination directory for file
Get the path for the destination directory for channel for GOES 16 ABI L1B
is downloaded. It does not check if the destination directory exists.
Args:
cd (pathlib.Path): Root path for cache directory
t (pandas.Timestamp): Timestamp (down to the minute) to which the file
corresponds
chan (int): ABI channel number
Returns:
pathlib.Path object pointing to directory that should contain files
"""
dd = cd / "abi" / t.strftime("%Y/%m/%d/%H") / f"C{chan:>01d}"
return dd
|
33a315189feffe33a3c4ecc9d1851164b5b70e1c
| 47,019
|
def can_receive_blood_from(blood_group):
"""Return allowed blood groups given a specific blood group."""
can_receive_from = {
'A+': ['A+', 'A-', 'O+', 'O-'],
'O+': ['O+', 'O-'],
'B+': ['B+', 'B-', 'O+', 'O-'],
'AB+': ['A+', 'O+', 'B+', 'AB+', 'A-', 'O-', 'B-', 'AB-'],
'A-': ['O-', 'A-'],
'O-': ['O-'],
'B-': ['B-', 'O-'],
'AB-': ['AB-', 'A-', 'B-', 'O-']
}
can_receive_blood_from = can_receive_from[blood_group]
return can_receive_blood_from
|
34c956f829a14c9b1ebe6f664c44d554451b11e0
| 47,021
|
def replace_ref_nan(row):
"""Function to replace nan info values of some references coming from references without PubMed id
with their reference id."""
if isinstance(row["combined"], float):
return row["reference_id"]
else:
return row["combined"]
|
76f5effe6613178055e38721f8a09d459ab0a019
| 47,023
|
def list4ToBitList32(lst):
"""Convert a 4-byte list into a 32-bit list"""
def intToBitList2(number,length):
"""Convert an integer into a bit list
with specified length"""
return [(number>>n) & 1
for n in reversed(range(length))]
lst2 = []
for e in lst:
lst2 += intToBitList2(e,8)
return list([0]*(32-len(lst2)))+lst2
|
ac95e68927b703292913229f7a1d0316941f570e
| 47,024
|
def IpBinaryToDecimal(bin_ip):
"""
:param bin_ip: IPv4 in binary notation, e.g. 00001010000000000000000000000001
:return: IPv4 in decimal notation, e.g. 167772161
"""
return int(bin_ip, 2)
|
20366a1667fd1f9c1f17e7c13c2292bd4a7e74b0
| 47,027
|
def _rescale_0_1(batch):
"""
Rescale all image from batch, per channel, between 0 and 1
"""
for image_id in range(batch.size(0)):
for channel_id in range(batch[image_id].size(0)):
pix_min = batch[image_id][channel_id].min()
pix_range = batch[image_id][channel_id].max() - pix_min
batch[image_id][channel_id].sub_(pix_min).div_(pix_range)
return batch
|
65e70cb6b3779f9ec776568a65fee13f56f0ca21
| 47,031
|
import re
def __safe_file_name(name: str) -> str:
"""
This helper is responsible for removing forbidden OS characters from a certain string.
:param name: String to be converted
:return: Safe string
"""
return re.sub(r'<|>|/|:|\"|\\|\||\?|\*', '', name)
|
6b52ededba763fa48c3e8c1d3f64c1487269e457
| 47,034
|
from pathlib import Path
def get_modality_from_name(sensor_path: Path):
"""Gets the modality of a sensor from its name.
Args:
sensor_path (Path): the Path of the sensor. Ex: CAM_FRONT_RIGHT, LIDAR_TOP, etc.
Returns:
str: the sensor modality
"""
sensor_name_str = str(sensor_path)
if "CAM" in sensor_name_str:
return "camera"
elif "RADAR" in sensor_name_str:
return "radar"
elif "LIDAR" in sensor_name_str:
return "lidar"
else:
return "unknown"
|
4d37ea2bf096032eb824f7c951099fc7caea09fd
| 47,036
|
def constant(value=0):
"""A flat initial condition. Rather boring, you probably want a source or some interesting boundary conditions
for this to be fun!
Args:
value (float): The value the function takes everywhere. Defaults to 0."""
return lambda x: value
|
9286cfe97bdf0a19831fab3fc7d69268e6660674
| 47,037
|
def _csstr_to_list(csstr: str) -> list[str]:
"""
Convert a comma-separated string to list.
"""
return [s.strip() for s in csstr.split(',')]
|
88b2197a6c86839426daf35bbddb212519ef1659
| 47,041
|
import requests
def get_header_contents() -> str:
"""
Get js_ReaScriptAPI header from GitHub as raw string.
Returns
-------
str
"""
root_raw = 'https://raw.githubusercontent.com/'
header = (
'juliansader/ReaExtensions/master/js_ReaScriptAPI/' +
'Source%20code/js_ReaScriptAPI_def.h'
)
r = requests.get("{}{}".format(root_raw, header))
return r.content.decode()
|
b2272dfbc131a422ec2aeb6e57d17a0ad6b41ae2
| 47,044
|
def actions(board):
"""
Returns set of all possible actions (i, j) available on the board.
"""
moves = set()
for row in range(len(board)):
for col in range(len(board[0])):
if board[row][col] == None:
moves.add((row,col))
return moves
|
639a4c6fbf701b7d3afc09198911ab3a1a587460
| 47,046
|
import shutil
def executable_path(*tries):
"""Find path to executable, or throw."""
path = None
for ex in tries:
path = shutil.which(ex)
if path:
break
if not path:
raise Exception(f"Unable to find path to {tries[0]}")
return path
|
825bb53c9b44678e43cdd2b9d68d905932027737
| 47,049
|
def sdp_term_p(f):
"""Returns True if `f` has a single term or is zero. """
return len(f) <= 1
|
1fbefa0f751d0583f4c20b81289df2b4ca4dfca6
| 47,063
|
def coding_problem_46(str):
"""
Given a string, find the longest palindromic contiguous substring. If there are more than one with the maximum
length, return any one. Examples:
>>> coding_problem_46("aabcdcb")
'bcdcb'
>>> coding_problem_46("bananas")
'anana'
"""
for length in range(len(str), 0, -1):
for offset in range(len(str) - length + 1):
sub_str = str[offset:offset + length]
if sub_str == sub_str[::-1]:
return sub_str
|
af4033333274b23961edc048fdba034b29b801a7
| 47,066
|
def tostring(s):
"""
Convert to a string with quotes.
"""
return "'" + str(s) + "'"
|
90c1cd55ecbf0e5562810399ab737f7b53b13305
| 47,067
|
def wifi_code(ssid, hidden, authentication_type, password=None):
"""Generate a wifi code for the given parameters
:ssid str: SSID
:hidden bool: Specify if the network is hidden
:authentication_type str: Specify the authentication type. Supported types: WPA, WEP, nopass
:password Optional[str]: Password. Not required if authentication type is nopass
:return: The wifi code for the given parameters
:rtype: str
"""
hidden = 'true' if hidden else 'false'
if authentication_type in ('WPA', 'WEP'):
if password is None:
raise TypeError('For WPA and WEP, password should not be None.')
return 'WIFI:T:{type};S:{ssid};P:{password};H:{hidden};;'.format(
type=authentication_type, ssid=ssid, password=password, hidden=hidden
)
elif authentication_type == 'nopass':
if password is not None:
raise TypeError('For nopass, password should be None.')
return 'WIFI:T:nopass;S:{ssid};H:{hidden};;'.format(
ssid=ssid, hidden=hidden
)
raise ValueError('Unknown authentication_type: {!r}'.format(authentication_type))
|
a479a2ea8c9a3aed40556e0c94f6dd5c8d67eca7
| 47,070
|
def calculate_polygon_area(corners):
"""Calculate 2D polygon area with given corners. Corners are assumed to be ordered.
Args:
corners (Nx2 array): xy-coordinates of N corner points
Returns:
float: polygon area"""
# Use the shoelace algorithm to calculate polygon's area
psum = 0
nsum = 0
npoints = corners.shape[0]
for i in range(npoints):
j = (i + 1) % npoints
psum += corners[i, 0] * corners[j, 1]
nsum += corners[j, 0] * corners[i, 1]
return abs(1/2*(psum - nsum))
|
87ed4e4c0c8e6655e70b6addf0588c37454f4968
| 47,071
|
def get_global_stats(df):
"""Compute the metadata statistics of a given DataFrame and put them in a 2D list."""
stats = []
stats.append(["Number of rows", df.shape[0] ])
stats.append(["Number of columns", df.shape[1] ])
stats.append(["Number of duplicates", len(df) - len(df.drop_duplicates())])
stats.append(["Number of rows with NaN values", df.isnull().values.ravel().sum() ])
stats.append(["Header", [col for col in df.columns] ])
stats.append(["Data Types", [dtyp.name for dtyp in df.dtypes.values] ])
return stats
|
bf08c620e5937d73f1f0ce3a9ce251253919a516
| 47,075
|
def factorial_zeroes(n):
"""Consider a factorial like 19!:
19! = 1*2* 3*4* 5*6*7*8*9* 10* 11* 12* 13*14*15* 16* 17*18*19
A trailing zero is created with multiples of 10
and multiples of 10 are created with pairs of 5-multiples and 2-multiples.
Therefore, to count the number of zeros, we only need to count the pairs of multiples of 5 and 2.
There will always be more multiples of 2 than 5, though,
so simply counting the number of multiples of 5 is sufficient.
One "gotcha" here is 15 contributes a multiple of 5 (and therefore one trailing zero),
while 25 contributes two (because 25 = 5 * 5).
"""
if n < 0:
raise ValueError("factorial is undefined for negative numbers")
count = 0
i = 5
while int(n / i) > 0:
count += int(n / i)
i *= 5
return count
|
f60b8be459cba3af795d360a51754fbc43959a63
| 47,076
|
def rootsearch(f, a, b, dx):
""" x1,x2 = rootsearch(f,a,b,dx).
Searches the interval (a,b) in increments dx for
the bounds (x1,x2) of the smallest root of f(x).
Returns x1 = x2 = None if no roots were detected.
"""
x1 = a
f1 = f(a)
x2 = a + dx
f2 = f(x2)
#
while f1*f2 > 0.0:
if x1 >= b:
return None, None
x1 = x2
f1 = f2
#x2 = x1 + dx
x2 += dx
f2 = f(x2)
return x1, x2
|
f6bb3ed2183850b2add575953ad6acee298f1a1f
| 47,086
|
def eq_55_activation_of_heat_detector_device(
u: float,
RTI: float,
Delta_T_g: float,
Delta_T_e: float,
C: float
) -> float:
"""Equation 55 in Section 8.9 PD 7974-1:2019 calculates the heat detectors temperature rise.
PARAMETERS:
:param u: m/s, velocity of gases in proximity to heat sensing element.
:param RTI: (m s)^0.5, response time index of heat sensing element.
:param Delta_T_g: deg.C, change in gas temperature. (this is the gas temperature above ambient, i.e.
Delta_T_g = T_g - T_0)
:param C: (m/s)^0.5, conduction factor (delay factor?)
:param Delta_T_e: deg.C, change in temperature of heat sensing element.
:return dTe_dt:
INDICATIVE NOTES
Tsui and Spearpoint [37] quote C factors in the range of 0.33 – 0.65 (m/s)^0.5 depending upon the response type.
RTI values are given in the literature, e.g. [38]. The rated temperature, permitting calculation of ΔTe, can be
found in the relevant manufacturer’s specifications.
REFERENCES
[37] TSUI A. and SPEARPOINT M. J. Variability of sprinkler response time index and conduction factor using the
plunge test. Building Services Engineering Research and Technology, 31 (2) pp. 163–176, 2010.
DOI:10.1177/0143624410363064.
[36] HESKESTAD G. and BILL R. Quantification of thermal responsiveness of automatic sprinklers including
conduction effects. Fire Safety Journal, 14 (1-2) pp. 113–125, 1988.
"""
aa = u ** 0.5 / RTI
bb = Delta_T_g - Delta_T_e * (1 + C / u ** 0.5)
dTe_dt = aa * bb
return dTe_dt
|
480e33a54b769dce6d2167291c6058a8750418d0
| 47,087
|
def unique(hasDupes):
"""
Removes duplicate elements from a list
@param hasDupes : a list with duplicate items
@return: list with duplicates removed
"""
ul = list(set(hasDupes))
ul.sort()
return ul
|
88d3dc491a3519a46b95138f514d57319ccc3f3b
| 47,091
|
def get_matrix_stride(mat):
"""Get the stride between lines of a C matrix"""
itemsize = mat.itemsize
stride = mat.strides[0] // itemsize
assert mat.strides == (stride * itemsize, itemsize)
return stride
|
eec8e2f79b6ff9df449079298829413cdc3d248f
| 47,096
|
from pathlib import Path
def pathwalk(dir: Path) -> list[Path]:
"""Obtain all file paths in a directory and all it's subdirectories.
Function is recursive.
Args:
`dir` (`Path`): The starting, top-level directory to walk.
Returns:
(list): Containing Path objects of all filepaths in `dir`.
"""
# Obtain all folders within dir.
subdir_folders = [item for item in dir.iterdir() if item.is_dir()]
# Obtain all files within dir.
subfiles = [item for item in dir.iterdir() if item.is_file()]
# Use recursion to get paths to all files within dir.
if subdir_folders:
# Obtain file paths from the subdirectories within dir and
# add them to the subfiles list.
for folder in subdir_folders:
subfiles.extend(pathwalk(folder))
# When dir contains no folder, return the subfiles list.
return subfiles
|
e0cefd65166fb28b9c7c39acaf03978e9608c809
| 47,102
|
def kataSlugToKataClass(kataSlug):
"""Tranform a kata slug to a camel case kata class"""
pascalCase = ''.join(x for x in kataSlug.title() if not x == '-')
# Java don't accept digits as the first char of a class name
return f"Kata{pascalCase}" if pascalCase[0].isdigit() else pascalCase
|
92698e9002f42dd6f9abea1f6970b530575e54ef
| 47,104
|
def least_significant_set_bit(n):
"""
Returns least-significant bit in integer 'n' that is set.
"""
m = n & (n - 1)
return m ^ n
|
7813a92e53be724c14cbd68dc0cffd49490dc05e
| 47,105
|
def natural_key_fixed_names_order(names_order):
"""Convert symbol to natural key but with custom ordering of names.
Consider a QAOA ansatz in which parameters are naturally ordered as:
gamma_0 < beta_0 < gamma_1 < beta_1 < ...
The above is an example of natural_key_fixed_names_order in which name 'gamma'
precedes name 'beta'.
Note that unlike natural_key and natural_key_revlex, this function returns
a key, i.e. it is a key factory.
"""
symbol_weights = {name: i for i, name in enumerate(names_order)}
def _key(symbol):
name, index = symbol.name.split("_")
return int(index), symbol_weights[name]
return _key
|
74e20a7e19305716d501da8ced49ec28bf85eac1
| 47,113
|
def find_set(x):
"""Finds representant of the given data structure x."""
if x.parent is None:
return x
x.parent = find_set(x.parent)
return x.parent
|
47b332938e6a648f0d353d027979b63b0c1e8826
| 47,116
|
def not_submitted_count(drafts):
"""
Get count of not-submitted services
Defaults to 0 if no not-submitted services
:param drafts:
:return:
"""
return drafts.get('not-submitted', 0)
|
dda309998234be6c560cf3b4ecafda41c43d20e3
| 47,119
|
def unpack_args(args, num):
"""
Extracts the specified number of arguments from a tuple, padding with
`None` if the tuple length is insufficient.
Args:
args (Tuple[object]): The tuple of arguments
num (int): The number of elements desired.
Returns:
Tuple[object]: A tuple containing `num` arguments, padded with `None` if `len(args) < num`
"""
args += (None, ) * (num - len(args))
return args[0:num]
|
9f0a2ab601a7974f7ae5ba5dc008b04ee07f0678
| 47,121
|
from typing import Iterable
def _variable_or_iterable_to_set(x):
"""
Convert variable or iterable x to a frozenset.
If x is None, returns the empty set.
Arguments
---------
x: None, str or Iterable[str]
Returns
-------
x: frozenset[str]
"""
if x is None:
return frozenset([])
if isinstance(x, str):
return frozenset([x])
if not isinstance(x, Iterable) or not all(isinstance(xx, str) for xx in x):
raise ValueError(
"{} is expected to be either a string or an iterable of strings"
.format(x))
return frozenset(x)
|
36ab763a3a4341c49fefb6cb3b10d88bef040fa8
| 47,123
|
import re
def workdir_from_dockerfile(dockerfile):
"""Parse WORKDIR from the Dockerfile."""
WORKDIR_REGEX = re.compile(r'\s*WORKDIR\s*([^\s]+)')
with open(dockerfile) as f:
lines = f.readlines()
for line in lines:
match = re.match(WORKDIR_REGEX, line)
if match:
# We need to escape '$' since they're used for subsitutions in Container
# Builer builds.
return match.group(1).replace('$', '$$')
return None
|
33a927626a023ba988534afe5b3f8885e18db471
| 47,126
|
import pickle
def load_database(filename):
"""Read in pickled database file."""
with open(filename, 'rb') as fin:
return pickle.load(fin)
|
ee9f55e626585624f75eb17663b7393628941ece
| 47,131
|
import logging
def get_video_bitrate(dict_inf, stream_video):
"""Bitrate search. It may be in one of the 2 possible places.
Args:
dict_inf (dict): video metadata
stream_video (dict): video stream data
Raises:
NameError: If Bitrate is not found in any of the possible places
Returns:
int: video bitrate
"""
try:
video_bitrate = stream_video['bit_rate']
except Exception as e:
try:
video_bitrate = dict_inf['format']['bit_rate']
except Exception as e:
print(f'{e}\n{dict_inf}')
file = dict_inf['format']['filename']
msg_err = "File bellow don't have 'bit_rate' in " + \
f'detail file:\n{file}'
logging.error(msg_err)
raise NameError(msg_err)
return int(video_bitrate)
|
cc7e2423c3288c9f392776cd5b6477973492874c
| 47,134
|
def logic(number: int) -> int:
"""Perform logic for even-odd transformation.
Each even-odd transformation:
* Adds two (+2) to each odd integer.
* Subtracts two (-2) to each even integer.
"""
# Determine the modifier based on number being even or odd.
modifier = -2 if number % 2 == 0 else 2
return number + modifier
|
64d10c9a605f09a1ecaaec695320a98094a63bc3
| 47,142
|
def get_label(timestamp, commercials):
"""Given a timestamp and the commercials list, return if
this frame is a commercial or not. If not, return label."""
for com in commercials['commercials']:
if com['start'] <= timestamp <= com['end']:
return 'ad'
return commercials['class']
|
72d96f35c63d5f6e8859b2c5bc3a0b8dab37fc39
| 47,147
|
import io
def config_to_string(config):
"""
Convert ConfigParser object to string in INI format.
Args:
config (obj): ConfigParser object
Returns:
str: Config in one string
"""
strio = io.StringIO()
config.write(strio, space_around_delimiters=False)
return strio.getvalue()
|
264159206b7367ed584c25ec36c2232a1a558880
| 47,149
|
def count_fn(true_fn_flags, ground_truth_classes, class_code):
"""
Count how many true FN are left in true_fn_flags for class given by class_code
Args
true_fn_flags: list of flags that are left 1 if ground truth has not been detected at all
ground_truth_classes: list of classes corresponding to the ground truths in true_fn_flags
class_code: code of class that is of interest
Returns:
number of 1s left true_fn_flags that correspond to class given by class_code
"""
count = 0
for i in range(len(true_fn_flags)):
if true_fn_flags[i] == 1 and ground_truth_classes[i] == class_code:
count += 1
return count
|
bb68474afe5d60fd30db59a9dc8e640f5ab3f00e
| 47,150
|
def read_IMDB(file_name):
"""
:param file_name: Takes as input the IMDB dataset file name as a string
:return: and outputs a list of lists containg datapoins consisting of a sentence (string) and a target (an integer)
"""
with open(file_name, 'r', encoding="latin-1") as text_file:
l = []
target_string_to_int = {'negative': 0, 'positive': 1}
lines = text_file.readlines()[1:]
for i, line in enumerate(lines):
l.append(line.rstrip().rsplit(',', 1))
# Transforming target variables from stings to integers
l = [(sentence[1:-1], target_string_to_int[target]) for sentence, target in l]
return l
|
72638f65ff1d6f220f299f996ed59b5d24bd6c16
| 47,151
|
def toHexByte(n):
"""
Converts a numeric value to a hex byte
Arguments:
n - the vale to convert (max 255)
Return:
A string, representing the value in hex (1 byte)
"""
return "%02X" % n
|
e6a7ceab39731f38d1c3fdaa4cba847d1ad41929
| 47,154
|
def conj(coll, to_add):
"""
Similar to clojure's function, add items to a list or dictionary
See https://clojuredocs.org/clojure.core/conj for more reading
Returns a new collection with the to_add 'added'. conj(None, item) returns
(item). The 'addition' may happen at different 'places' depending on the
concrete type. if coll is:
[] - appends [1, 2, 3, 4] == conj([1, 2], [3, 4])
() - prepend in reverse ((4, 3, 1, 2) == conj((1, 2), (3, 4))
{} - appends {'a': 'A', 'b': 'B'} == conj({'a':'A'}, {'b':'B'})
Parameters:
coll: collection to add items to
to_add: items to be added to coll
Return:
object of the same type as coll but with to_add items added
"""
ret = coll
if coll is None:
ret = to_add
elif isinstance(coll, list):
ret = coll + to_add
elif isinstance(coll, tuple):
ret = list([])
for item in coll:
ret.append(item)
for item in to_add:
ret.insert(0, item)
ret = tuple(ret)
elif isinstance(coll, dict):
ret = {}
for key in coll:
ret[key] = coll[key]
for key in to_add:
if key not in ret:
ret[key] = to_add[key]
return ret
|
62cca3766c3a4467db73372209d6f173647ed3af
| 47,155
|
import collections
def _format_expand_payload(payload, new_key, must_exist=[]):
""" Formats expand payloads into dicts from dcids to lists of values."""
# Create the results dictionary from payload
results = collections.defaultdict(set)
for entry in payload:
if 'dcid' in entry and new_key in entry:
dcid = entry['dcid']
results[dcid].add(entry[new_key])
# Ensure all dcids in must_exist have some entry in results.
for dcid in must_exist:
results[dcid]
return {k: sorted(list(v)) for k, v in results.items()}
|
ae9af5400f0bf38954c99afca62914e30f42ec32
| 47,158
|
def to_float_list(a):
"""
Given an interable, returns a list of its contents converted to floats
:param a: interable
:return: list of floats
"""
return [float(i) for i in a]
|
8576c649802c9a88694097ceefb55c86ab845266
| 47,161
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.