content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def build_cores(cores, solr_cores):
"""
Builds a list of cores to search based on given parameters.
Raises an exception if any core is not available.
"""
if cores == '':
return solr_cores
if cores == 'test':
return ['test']
core_list = cores.split(',')
invalid_core_list = [core for core in core_list if core not in solr_cores]
if len(invalid_core_list) > 0:
raise ValueError('Invalid type(s) requested: ' + ','.join(invalid_core_list))
return core_list | 0338b2bb424b6126c4e31bdb54155ef82fc3741c | 31,107 |
from warnings import warn
def impute_NA_with_arbitrary(data,impute_value,NA_col=[]):
"""
replacing NA with arbitrary values.
"""
data_copy = data.copy(deep=True)
for i in NA_col:
if data_copy[i].isnull().sum()>0:
data_copy[i+'_'+str(impute_value)] = data_copy[i].fillna(impute_value)
else:
warn("Column %s has no missing cases" % i)
return data_copy | a553d9be70be868f6203c4d9db84b07a6c5c5acd | 31,108 |
def create_node_str(node_id):
"""Creates node for the GML file
:param node_id: The id of the node
"""
node_str = "\tnode\n\t[\n\t\tid " + str(node_id) + "\n\t]\n"
return node_str | cfb69713439eee919e367b009cbf32f09adbd0e1 | 31,109 |
import os
import re
def read_dataset(data_path):
"""
reads the raw dataset and returns all the lines as a list of string
"""
# os.path can be used for seamless path construction across different
# operating systems.
with open(os.path.join(data_path, 'stopwords.txt')) as f:
stop_words = f.readlines()
with open(os.path.join(data_path, 'pos.txt')) as f:
pos_lines = f.readlines()
with open(os.path.join(data_path, 'neg.txt')) as f:
neg_lines = f.readlines()
all_lines = pos_lines + neg_lines
#Remove the stopwords
#stop_words = stopwords.words('english')
words = []
for line in all_lines:
x = re.sub("""[!"#$%&()*+/:;<=>@[\\]^`{|}~\t\n]""", "", str(line))
y = x.strip()
z = y.split()
words.append(z)
data_set_stopwords = []
filtered_words = []
for w in words:
if w in stop_words:
data_set_stopwords.append(w)
continue
filtered_words.append(w)
return list(zip(all_lines, [1]*len(pos_lines) + [0]*len(neg_lines))) | b6f004e045a53b5017070edd5b8d4ef4e9712d9e | 31,110 |
def generate_php_name(schema_name):
"""
Create a PHP style name for a schema name.
:schema_name str:
:return str:
"""
first = True
ret = ''
for chs in schema_name:
if chs == '_':
first = True
else:
if first:
chs = chs.upper()
first = False
else:
chs = chs.lower()
ret += chs
assert len(ret) > 0
return ret | f3c8e53729f9c82538c97ffc38277818e4e8e7d1 | 31,111 |
def connect_char(words): # pragma: no cover
"""connect and switch to list type"""
result = []
buffer = ""
for w in words:
w = w.strip()
if len(w) > 1:
if len(buffer) > 0:
result.append(buffer)
buffer = ""
result.append(w)
elif len(w) == 1:
if not w.isalpha():
if len(buffer) > 0:
result.append(buffer)
buffer = ""
result.append(w)
else:
buffer += w
if len(buffer) > 0:
result.append(buffer)
buffer = ""
return result | 52abacc7dbf8cf8715677990c2d665029865c94f | 31,114 |
from typing import Dict
from typing import List
def get_extra_requires() -> Dict[str, List[str]]:
""" Get Extra Requires: Returns a list of extra/optional packages. """
return {
'opencv': ['opencv-python'],
'opencv-headless': ['opencv-python-headless'],
} | 3b86d16b071547278701fb1173ef355d6e21d5a0 | 31,115 |
def fuel_combustion(mpg: float, gram: bool = True) -> float:
"""Calulates CO2e for US gallon of fuel """
mpg = mpg
rate = 8.8
round_ = 3
if gram is True:
rate = 8.8 * 1000
round_ = 1
co2e_mile = rate/mpg
return round(co2e_mile, round_) | 922bd4a58c2bd97a122db576dbb4114a76e9b1e1 | 31,117 |
def get_timestamp(milliseconds):
"""
Generates timestamp for an amount of milliseconds
Parameters
----------
milliseconds : int
Time in milliseconds
Returns
-------
str
Timestamp (in format H:M:S.milli)
"""
hours = int(milliseconds / (60 * 60 * 1000))
milliseconds = milliseconds - hours * (60 * 60 * 1000)
minutes = int(milliseconds / (60 * 1000))
milliseconds = milliseconds - minutes * (60 * 1000)
seconds = int(milliseconds / 1000)
milliseconds = milliseconds - seconds * 1000
return "%s:%s:%s.%s" % (
str(hours).zfill(2),
str(minutes).zfill(2),
str(seconds).zfill(2),
str(milliseconds).zfill(3),
) | 2fbe332cf0faad775f878960cf021d46f5aee2fa | 31,119 |
from typing import List
import requests
def get_ids(url: str, offset: str = "") -> List[str]:
"""Returns all available dialogs from the agent database."""
resp = requests.get(f"{url}{offset}").json()
dialog_ids, next_offset = resp["dialog_ids"], resp["next"]
if next_offset is not None:
dialog_ids.extend(get_ids(url, next_offset))
return dialog_ids | 13dc8272b003234cf1c56b0cfe2ed4d44de6fa27 | 31,120 |
def get_x_distribution_function(distribution_series):
"""
:type distribution_series:dict
"""
distrib_func = {}
prob = 0
for x in sorted(distribution_series.keys()):
distrib_func[x] = prob
prob += distribution_series[x]
return distrib_func | 54f0f583188711ba684226c182b2710136d662e9 | 31,121 |
def _upnext_curr_ep_info(videoid):
"""Create the Up Next data for the current episode"""
return {
'episodeid': videoid.episodeid,
'tvshowid': videoid.tvshowid,
} | fec8d0d275b983252ffaaf0a8f0855d9d883360f | 31,122 |
def bracketing(node, labeled=True):
"""Generate bracketing ``(label, indices)`` for a given node."""
return (node.label if labeled else '', node.bitset) | 3906e912b3f3db6b9725d0738647c745659decb0 | 31,123 |
from typing import Optional
from typing import Dict
from typing import Any
def make_audit_event(
type: Optional[str] = None,
org: Optional[str] = None,
member: Optional[Dict[str, Any]] = None,
team: Optional[Dict[str, Any]] = None,
repository: Optional[Dict[str, Any]] = None,
count: int = 0,
audit_id: Optional[str] = None,
) -> Dict[str, Any]:
"""
Create an audit event dictionary with a fixed data model
Converts arguments to a dictionary.
The Nones should be present in the dictionary.
"""
return locals() | 5633561519f0083cb151f257cbbea092321ac3e8 | 31,124 |
def getFilename(page, extension=None):
"""
Create a filename that is unique for the page.
@param page: page used to create the new filename
@type page: Page
@param extension: file extension
@type extension: str
@return: filename of <family>-<lang>-<page>.<ext>
@rtype: str
"""
filename = '%s-%s-%s' % (page.site.family.name,
page.site.code,
page.titleForFilename())
if extension:
filename += '.%s' % extension
return filename | 1168cb8899c46b39ce8e471f3dfd687559eb3a12 | 31,125 |
def up_to_first_space(s):
"""Return the substring of s up to the first space, or all of s if it does
not contain a space."""
i = s.find(' ')
if i == -1:
i = len(s)
return s[:i] | f1bdde1e55821c022dde4f8f9112dfb694ebfde8 | 31,126 |
import torch
def _get_anchor_positive_triplet_mask(targets):
"""
Taken from:
https://github.com/omoindrot/tensorflow-triplet-loss/blob/master/model/triplet_loss.py
"""
indexes_not_equal = ~torch.eye(len(targets)).bool().to(targets.device)
# Check if labels[i] == labels[j]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
labels_equal = targets.unsqueeze(0) == targets.unsqueeze(1)
# Combine the two masks
mask = indexes_not_equal & labels_equal
return mask | feb508f45f2a976b328a12ee135115cbf8e49f65 | 31,130 |
def calc_amp_len(x_start, x_end, y_start, y_end):
"""
Determining the amplicon length based on fwd-rev primer blast hits
"""
x_min = min([x_start, x_end])
x_max = max([x_start, x_end])
y_min = min([y_start, y_end])
y_max = max([y_start, y_end])
if y_min > x_max:
return y_max - x_min
else:
return x_max - y_min | e5a7767b931e7265d1527ef4e63dc14e777c228d | 31,131 |
from typing import Dict
def extract_tables_from_base(base) -> Dict[str, type]:
"""Extract tables and sqlalchemy declarative base classes from a base.
Parameters
----------
base : DeclarativeBase
base from which to extract the model classes
Returns
-------
result : dict[str, type]
dictionary from tablename to sqlalchemy model class
"""
tables = {}
registry = base._decl_class_registry
for dbase in registry.data.values():
try:
tablename = dbase().__table__.name
except AttributeError:
continue # ignore _ModuleMarker
else:
modelclass = dbase().__mapper__.class_
tables[tablename] = modelclass
return tables | c5ea8e4e059cf40d8b0128aefae9e420bfc66888 | 31,133 |
def check_for_unittests( lines ):
""" Check python source code for unittests. """
return True | f23329dfe590c7f28d8340f93ec2bca4689a4c83 | 31,134 |
from typing import Counter
def get_sorted_frequent_item_from_data(transactions, min_sup):
"""an item is frequent if and only if
the number of transactions containing that item is greater than min_sup
Args:
transactions (list): list of transactions
min_sup (int): the minimum threshhold for a frequent item
Returns:
dict: a dictionary whose keys are items and values are their frequency
"""
item2freq = Counter()
for transaction in transactions:
for item in transaction:
item2freq[item] += 1
item2freq_list = [(item, item2freq[item])
for item in item2freq if item2freq[item] >= min_sup]
item2freq_list.sort(key=lambda item: item[1], reverse=True)
item2index = [(item2freq_list[i][0], i)
for i in range(len(item2freq_list))]
return dict(item2index) | e827c91de1b9727043d8c3c0d4229cc5b8ab7fcf | 31,135 |
def get_features_list(df, contains:list=[], contains_not:list=[], sort_results = True, verbose=True):
"""
Returns list of continous or categorical features from DataFrame.
contains: must contain all strings in list
contains_not: must not contain any of strings in list
"""
column_list = [col for col in df.columns]
for s in contains: column_list = [col for col in column_list if col.find(s)> -1]
for s in contains_not: column_list = [col for col in column_list if col.find(s)== -1]
if sort_results: column_list = sorted(column_list)
if verbose:
print('found columns:', len(column_list))
diff = len(column_list) - len(list(set(column_list)))
if diff>0: print('found', diff, 'duplicate column names')
return column_list | 53c016f1f8876904cc64500d0666d67efa159291 | 31,137 |
def _circle_slice(lst, start, end):
"""
Slices a list in a loop. Treats the list as a ring and slices from beginning to end,
looping back to the beginning of the list if necessary.
"""
if 0 <= start < end < len(lst):
return lst[start:end]
elif start < 0:
return lst[start+len(lst):] + lst[:end]
elif end >= len(lst):
return lst[start:] + lst[0:end-len(lst)]
elif start > end:
return lst[start:] + lst[:end]
elif start == end:
return lst[start:] + lst[:end]
print("SLICE FAILURE: ", lst, "FROM:", start, "END:", end)
return [] | c2fad61408c08cd9c09bf426a71f5b0060da3021 | 31,139 |
def ahref(text,link):
"""
Wraps text around an <a href=X> tag
"""
return "<a href='"+link+"'>"+text+"</a>" | 4db85e0f119330afe3cdc9c2cb8f1fbc963c7610 | 31,142 |
import math
def cartesian_to_polar(x, y, xorigin=0.0, yorigin=0.0):
"""
Helper function to convert Cartesian coordinates to polar coordinates
(centred at a defined origin). In the polar coordinates, theta is an
angle measured clockwise from the Y axis.
:Parameters:
x: float
X coordinate of point
y: float
Y coordinate of point
xorigin: float (optional)
X coordinate of origin (if not zero)
yorigin: float (optional)
Y coordinate of origin (if not zero)
:Returns:
(r, theta): tuple of 2 floats
Polar coordinates of point. NOTE: theta is in radians.
"""
PI2 = 2.0 * math.pi
PIBY2 = math.pi / 2.0
xdiff = float(x) - float(xorigin)
ydiff = float(y) - float(yorigin)
distsq = (xdiff * xdiff) + (ydiff * ydiff)
r = math.sqrt(distsq)
theta = PIBY2 - math.atan2(ydiff, xdiff)
# Adjust theta to be in the range 0 - 2*PI
while theta < 0.0:
theta += PI2
while theta > PI2:
theta -= PI2
return (r, theta) | c8a14848976673b2d4bbab1f0d587fed2180b3ed | 31,143 |
from unittest.mock import Mock
def fixture_bigquery_job_config():
"""
Mocked Bigquery Job Config
"""
mocked_qc = Mock()
return mocked_qc | 22dd7f8aeb0b072b2455062fe2e31f0aa937287c | 31,145 |
def byte_length(text: str) -> int:
"""
Return the string length in term of byte offset
"""
return len(text.encode("utf8")) | c9d9679e0edbc7266456488b5c4c4e5f33de3973 | 31,146 |
def load_glove_vocab(filename):
"""Loads GloVe's vocab from a file.
Args:
filename (str): path to the glove vectors.
Returns:
set: a set of all words in GloVe.
"""
print('Building vocab...')
with open(filename) as f:
vocab = {line.strip().split()[0] for line in f}
print('- done. {} tokens'.format(len(vocab)))
return vocab | ad36dffb75dec1bb44108de8de2b4ecbd9d066dd | 31,147 |
def calc_score(score):
"""
Convert threatgrid score to dbot score
"""
if not score:
return 0
dbot_score = 1
if score >= 95:
dbot_score = 3
elif score >= 75:
dbot_score = 2
return dbot_score | 153aace2e34a38e1e476e7d592a0280cb55d5798 | 31,149 |
from functools import reduce
from operator import mul
def product(iterable=(), start=1):
""" kata currently supports only Python 3.4.3 """
return reduce(mul, iterable, start) | de1167a22846c553f370d8d9883e740fa499a543 | 31,150 |
import os
def does_cuda_source_exist(sources):
"""
Checks files extensions in a list of files in the ``sources``. If a file
extension ``.cu`` is found, returns ``True``, otherwise returns ``False``.
:param sources: A list of file names.
:type sources: list
"""
has_cuda_source = False
for source in sources:
file_extension = os.path.splitext(source)[1]
if file_extension == '.cu':
has_cuda_source = True
break
return has_cuda_source | fec983e3333fcc9c435e27242c355d097d3271c6 | 31,151 |
def get_luminance(r:int, g:int, b:int) -> float:
"""get_luminance(r, g, b) -> float
source: <https://stackoverflow.com/a/1855903/7517185>
"""
# Counting the perceptive luminance - human eye favors green color...
return (0.299 * r + 0.587 * g + 0.114 * b) / 255 | 5ac524b0264604abf9a48aecf72f17ebaa494c58 | 31,152 |
def high_trust(user, minscore=50):
"""
Conditions for trusting a user
"""
prof = user.profile
cond = prof.is_moderator or prof.score >= minscore
return cond | bfa791d2c8a2de5533c3fb3e0884f48127f2b40e | 31,153 |
def first(records):
""" Return the first record in ``records``, with the same prefetching. """
return next(iter(records)) if len(records) > 1 else records | ff6bbd447d75b74c64c34581ea890b9631ba03a8 | 31,154 |
def cluster_by_diff(data, max_gap):
"""
a function that clusters numbers based on their differences
based off of a stacktrace answer:
http://stackoverflow.com/a/14783998
:param data: any list of floats or ints
:param max_gap: the largest gap between numbers until starting a new cluster
:return: nested list
"""
# since the data needs to be sorted to determine useful differences, sort the data
data.sort()
# initialize the nest list
groups = [[data[0]]]
# iterate through data
for x in data[1:]:
# compare the difference of the first value of the data and the last entry in the groups to the max gap
if abs(x - groups[-1][-1]) <= max_gap:
# not larger than gap, append to last group
groups[-1].append(x)
else:
# make new group if larger
groups.append([x])
return groups | a3fd87f0220d27accdb96eda3faa8a3544bebd2f | 31,155 |
def to_string(num, base):
"""2. Write a Python program to converting an Integer to a string in any base"""
convert_string = "0123456789ABCDEF"
if num < base:
return convert_string[num]
else:
print(num, num // base, num % base)
return to_string(num // base, base) + convert_string[num % base] | 8ff6be7cdb5f96ab851fa13eb3c15fe4a95863a7 | 31,156 |
def sort(iterable):
"""
Selection Sort works by iterating through the entire array and selects
the minimal value of that array, swapping the first element with it.
Then it repeat the same procedure, but ignoring the first element,
which is already sorted.
This version does not works with generators.
Complexity: O(n²)
:param iterable:
:return: Sorted Iterable
"""
for i in range(len(iterable)):
minimum = iterable[i]
min_index = i
for j, v in enumerate(iterable[i:], i):
if v < minimum:
minimum = v
min_index = j
iterable[i], iterable[min_index] = iterable[min_index], iterable[i]
return iterable | c543c35981afb4272cbc0724bcebbe56cc012744 | 31,157 |
def ndec(x, max=3): # --DC
"""RETURNS # OF DECIMAL PLACES IN A NUMBER"""
for n in range(max, 0, -1):
if round(x, n) != round(x, n-1):
return n
return 0 | 60b1b28dbd356480595900a9338205c14068e3d6 | 31,158 |
def collect_semantic_labels_v_kitti(path):
"""
collects the pixel values for a specific
directory of the virtual Kitti dataset,
avoiding repetition due to instance segmentation
for specific classes
"""
car_founded = False
van_founded = False
labels = {}
with open(path) as data:
next(data)
for line in data:
if car_founded and van_founded:
return labels
line = line.replace(":", " ")
line = line.split()
if "Car" in line and not car_founded:
labels['car'] = (int(line[-3]), int(line[-2]), int(line[-1]))
car_founded = True
elif "Van" in line and not van_founded:
labels['van'] = (int(line[-3]), int(line[-2]), int(line[-1]))
van_founded = True
elif line[0] != "Car" and line[0] != "Van":
labels[line[0].lower()] = (int(line[-3]), int(line[-2]), int(line[-1]))
return labels | 2c9bba2e7fdc29e62922d226cafc7e1859b24041 | 31,159 |
async def esi_names_to_lists(response: list) -> tuple:
"""Take the esi_names response and make a ids list and a names list and return them as a tuple.
:param response: The esi_names response.
:return: A tuple with two lists, ids and names.
"""
categories = ['alliance',
'character',
'constellation',
'corporation',
'inventory_type',
'region',
'solar_system',
'station']
ids = []
names = []
for element in response:
ids.append(element.get('id'))
names.append(element.get('name'))
return ids, names | ca767537ee8afb78ddd7c174ed6a3e2e88b52c8e | 31,160 |
import numpy
def simpson_int(a,b,f,N):
""""do a Simpson's integration by breaking up the domain [a,b] into N, considering N is even."""
# MZ -- you were passing in the wrong N, so the xedge was wrong
xedge = numpy.linspace(a,b,N+1)
delta = xedge[1] - xedge[0]
Is = 0.0
n = 0
# MZ: with the proper N, this loop executed too many times
while n < N-1:
Is += (delta/3.0)*(f(xedge[n]) + 4.0*f(xedge[n+1]) + f(xedge[n+2]))
n += 2
return Is | d7f686604c368e247a696935c5a9d5382d11a87f | 31,161 |
def sub_string(string, start_str, end_str):
"""
Dumb search for the first instance of a string wrapped by start_str
and end_str. Could be a regex, but this should be (needlessly) faster.
:param str string:
:param str start_str:
:param str end_str:
:returns str:
"""
substr = string[string.find(start_str) + len(start_str):]
substr = substr[:substr.find(end_str)]
return substr | 8cae1612eba7944f82c32fc1a5e1ee2cb71fc805 | 31,162 |
import argparse
def argument_handler():
"""
Function used to parse arguments passed into the script.
Returns
-------
args.start bool
Switch for starting the systemd service
- True if selected
- False otherwise
args.stop bool
Switch for stopping the systemd service
- True if selected
- False otherwise
args.reload bool
Switch for reloading the systemd service
- True if selected
- False otherwise
args.chains [str]
List of chains to preserve
Raises
------
Exception If user tries to restore a rule file based on new chains.
The intended use case is to selectively save a rule file
based on an arbitrary chain and restore the save version
in its entirety.
Exception If user tries to save a rule file but doesn't spacify at
least one chain.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--chains",
help="Do further operations for which chains?",
nargs='+')
arg_group = parser.add_mutually_exclusive_group(required=True)
arg_group.add_argument("-s", "--save",
help="Save the rules for the desired chains",
action='store_true')
arg_group.add_argument("-r", "--restore",
help="Restore the rules", action='store_true')
args = parser.parse_args()
if args.restore and args.chains:
raise Exception("Can't selectively restore a rule file. You should " +
"instead selectively save a rule file and restore it" +
" in its entirety.")
if args.save and not args.chains:
raise Exception("Must choose a chain to selectivly save the rules. " +
"Use regular iptables-save if no specific is needed " +
"to be saved")
return (args.save, args.restore, args.chains) | d5ca7319f975594e32fd452d994a0f6fb1428b09 | 31,163 |
def myinc(a):
"""Increase a
"""
return a + 1 | ebf28605974ad30b2c221944e7431dab5a30d387 | 31,165 |
def trim(value):
"""Removes whitespaces around the string.
Example usage: {{ value|trim }}
"""
return value.strip() | 61bfcd402144bdf51648ad043446dafe9405fb09 | 31,166 |
def turn_on_off_event(b, name: str):
"""Turn on or off light
Parameter:
b -> bridge
name -> name of lights"""
b.connect()
lights = b.get_light_objects('name')
# boolen
if lights[name].on:
lights[name].on = False
return 'Light turned off'
else:
lights[name].on = True
return 'Light turned on' | 72613b97dd6e663c726baea599c32a782d3b9db5 | 31,167 |
def get_vc_version(session):
"""Return the dot-separated vCenter version string. For example, "1.2".
:param session: vCenter soap session
:return: vCenter version
"""
return session.vim.service_content.about.version | 09e13254302a3e9ff784c2c2fbe1a2e637d7330f | 31,168 |
def get_messages_str(messages):
"""
From messeges list returns a string with all
conversation between client and server
Arguments:
messages {list} -- list of messages from get_messages
Returns:
str -- string of conversation
"""
messages_str = ""
for i in messages:
messages_str += "{}: {}\n".format(i[0], i[1])
return messages_str.strip("\n\t ") | f2cad328c920fff85491e98dc5478392c8c2e453 | 31,170 |
import torch
def convert_to_numpy(*inputs):
"""
Coverts input tensors to numpy ndarrays
Args:
inputs (iteable of torch.Tensor): torch tensor
Returns:
tuple of ndarrays
"""
def _to_numpy(i):
assert isinstance(i, torch.Tensor), "Expected input to be torch.Tensor"
return i.detach().cpu().numpy()
return (_to_numpy(i) for i in inputs) | 32cd0a3a68c47646e180f205f8120ed6df70e863 | 31,171 |
def parse(stdin):
"""
Calculate the gaps between a sorted sequence of integers, starting at 0.
"""
adapters = [
int(line) for line in stdin.read().strip().split("\n")
]
adapters.sort()
gaps = []
prev = 0
for adapter in adapters:
gaps.append(adapter - prev)
prev = adapter
return gaps | 44e5491fada781bb9a4c5ee1bcf4c311c48fd38c | 31,173 |
def is_string_matched_in_regular_expression_objects(string, regex_objects):
""" param regex_objects contains regular expression objects compiled from patterns
searches string for any occurence of each regex_object
"""
for regex_object in regex_objects:
if regex_object.search(string):
return True
return False | 7f2574911675307ced3bbafb67c3ade14dc1687b | 31,174 |
def GetModelDir(options):
"""Returns the model directory for a given model."""
model_dir = ""
if options["gan_type"] in ["MultiGAN", "MultiGANBackground"]:
if options["n_blocks"] == 0 and options["n_heads"] == 0:
model_dir = "Independent"
model_dir += "%s-%d" % (options["gan_type"], options["k"])
else:
model_dir = "GAN"
return model_dir | bced7fb911aef625b228179c4546826021525def | 31,175 |
def htmldec(text):
"""Decode HTML entities in the given text."""
chunks = text.split('&#')
for i in range(1, len(chunks)):
number, rest = chunks[i].split(';', 1)
chunks[i] = chr(int(number)) + rest
text = ''.join(chunks)
text = text.replace('\xa0', ' ')
text = text.replace(' ', ' ')
text = text.replace('<', '<')
text = text.replace('>', '>')
text = text.replace('"', '"')
text = text.replace('&', '&')
return text | 3543fa84477d35db3fa1cf00ea36e8c1ed82ea62 | 31,176 |
import numpy
def varsv(p, q, rp, rsv, d, iwat):
"""
Find variables cosP, cosQ, sinP, sinQ...
for Rayleigh-wave.
"""
pr = numpy.real(p)
pi = numpy.imag(p)
qr = numpy.real(q)
qi = numpy.imag(q)
pex = pr
svex = 0.0
# Fluid layer
if iwat == 1:
epp = 0.5 * (numpy.cos(pi) + numpy.sin(pi) * 1j)
epm = numpy.conj(epp)
pfac = numpy.exp(-2.0 * pr) if pr < 30.0 else 0.0
cosp = numpy.real(epp + pfac * epm)
sinp = epp - pfac * epm
rsinp = numpy.real(rp * sinp)
sinpr = d if numpy.abs(pr) < 1.0e-5 and numpy.abs(rp) < 1.0e-5 else sinp / rp
cosq = 1.0
rsinq = 0.0
sinqr = 0.0
# Elastic layer
else:
svex = qr
epp = 0.5 * (numpy.cos(pi) + numpy.sin(pi) * 1j)
epm = numpy.conj(epp)
pfac = numpy.exp(-2.0 * pr) if pr < 30.0 else 0.0
cosp = numpy.real(epp + pfac * epm)
sinp = epp - pfac * epm
rsinp = numpy.real(rp * sinp)
sinpr = d if numpy.abs(pr) < 1.0e-5 and numpy.abs(rp) < 1.0e-5 else sinp / rp
eqp = 0.5 * (numpy.cos(qi) + numpy.sin(qi) * 1j)
eqm = numpy.conj(eqp)
svfac = numpy.exp(-2.0 * qr) if qr < 30.0 else 0.0
cosq = numpy.real(eqp + svfac * eqm)
sinq = eqp - svfac * eqm
rsinq = numpy.real(rsv * sinq)
sinqr = d if numpy.abs(qr) < 1.0e-5 and numpy.abs(rsv) < 1.0e-5 else sinq / rsv
return cosp, cosq, rsinp, rsinq, sinpr, sinqr, pex, svex | 6be8543accfa5a57df8a026254b99e4ef578a770 | 31,177 |
def flatten_corner(corner_kick, game_id):
"""Flatten the schema of a corner kick."""
ck_id = corner_kick[0]
ck_data = corner_kick[1]
return {'game_id': game_id,
'ck_id': ck_id,
'time_of_event(min)': (ck_data['t']['m'] + (ck_data['t']['s'] / 60 )),
# 'assist': ck_data.get('assBy', None),
'player_id': float(ck_data['plyrId']),
'ck_coord_x1': ck_data['coord']['1']['x'],
'ck_coord_y1': ck_data['coord']['1']['y'],
'ck_coord_z1': ck_data['coord']['1']['z'],
'ck_coord_x2': ck_data['coord']['2']['x'],
'ck_coord_y2': ck_data['coord']['2']['y'],
'ck_coord_z2': ck_data['coord']['2']['z']} | 385bcb73348c997d447f87333e5d141f2b96e0d3 | 31,178 |
def exists(env):
"""Standard library tar function should always exist."""
return True | 472cc9581595c281657703737c376c33a797822a | 31,179 |
def _split_to_slist(s):
"""Splits coverage language string to a list of strings. Here we
make sure that parentheses () will be separate items in list.
???Consider: it would be better to separate only those parentheses
which are not escaped with backslash; this would allow using
(escaped) parentheses in the regular expressions."""
return s.replace("("," ( ").replace(")"," ) ").split() | ef5bde087f82dd261bbccaf7152956302dc72430 | 31,181 |
def are_bools(tuple_arg):
"""
are_bools
---------
- Param: tuple_arg. Tuple, required.
- Returns: bool. True if all items in tuple_arg are booleans. False otherwise.
"""
if type(tuple_arg) != tuple:
raise TypeError(
"Class type not supported; tuple expected."
)
item_not_bool = False
for item in tuple_arg:
if type(item) != bool:
item_not_bool = True
break
return not item_not_bool | 1277d351ac9c48f661c3c73be9258d1bc10b10d8 | 31,182 |
def get_ranks(keywords, script):
"""Return ranks of queried keyword in a given script.
Parameters
----------
keywords : str[]
Array of keywords to search in the script.
script : dict[]
JSON object containing ranks of different keywords.
Returns
-------
ranks : int[]
Array of integers in the same order as their respective keywords
"""
ranks = []
# Populate list of ranks
for keyword in keywords:
for d in script:
if d['keyword'] == keyword:
ranks.append(d['rank'])
break
# If no rank has been specified for a word, set its rank to 0
else:
ranks.append(0)
return ranks | 558a5faf1e9a237f183ad0c255dd682f7e3329d5 | 31,183 |
import os
def is_files_exists(files):
"""Return True if at least one file (in files) already exists."""
for file in files:
if os.path.isfile(file):
return True
return False | 462fc9411902f425f2d92429bb6517bde7bdb7c0 | 31,184 |
def determine_eam_setfl_pairs(symbols):
""" determines the order of eam pairs
the order for eam pairs is different than for the rest of pypospack
Args:
symbols (list): a list of symbols
Returns:
(list): a list of tuples
"""
pairs = []
for i1, s1 in enumerate(symbols):
for i2, s2 in enumerate(symbols):
if i1 >= i2:
pairs.append((s1,s2))
return pairs | aa1ea97131dec296e39ea7c8519902bc7880cf5c | 31,191 |
def only1(l):
"""
Checks if the list 'l' of booleans has one and only one True value
:param l: list of booleans
:return: True if list has one and only one True value, False otherwise
"""
true_found = False
for v in l:
if v:
if true_found:
return False
else:
true_found = True
return true_found | ce8c938ab00f546916fdeb86fb8cb942dd36afce | 31,193 |
def fields_to_batches(d):
"""
The input is a dict whose items are batched tensors. The output is a list of dictionaries - one
per entry in the batch - with the slices of the tensors for that entry. Here's an example.
Input:
d = {"a": [[1, 2], [3,4]], "b": [1, 2]}
Output:
res = [{"a": [1, 2], "b": 1}, {"a": [3, 4], "b": 2}].
"""
# Make sure all input dicts have same length.
lengths = [len(x) for x in d.values()]
assert len(set(lengths)) == 1
length = lengths[0]
keys = d.keys()
res = [{k: d[k][i] for k in keys} for i in range(length)]
return res | 2840591ad2def849c5b9ffbc5fb59e17776ab0c2 | 31,194 |
def num_spectral_coeffs_up_to_order(b):
"""
The SO(3) spectrum consists of matrices of size (2l+1, 2l+1) for l=0, ..., b - 1.
This function computes the number of elements in a spectrum up to (but excluding) b - 1.
The number of elements up to and including order L is
N_L = sum_{l=0}^L (2l+1)^2 = 1/3 (2 L + 1) (2 L + 3) (L + 1)
:param b: bandwidth
:return: the number of spectral coefficients
"""
L_max = b - 1
assert L_max >= 0
return ((2 * L_max + 1) * (2 * L_max + 3) * (L_max + 1)) // 3 | 39a8645519f2fa681537c8d9ee9843fafbaa5bdb | 31,195 |
import logging
def metrics(time_dur, extremes, count, mean_hr, list_of_times,
time, voltages):
"""Create a dictionary with the specified metrics
Once all of the metrics have been determined, it is necessary to compile
them all together. This is done through the generation of a dictionary.
In the assignment, it is specified that the dictionary should contain
the following information:
duration: time duration of the ECG strip
voltage_extremes: tuple in the form (min, max) where min and max are
the minimum and maximum lead voltages found in the data file.
num_beats: number of detected beats in the strip, as a numeric
variable type.
mean_hr_bpm: estimated average heart rate over the length of the strip
beats: list of times when a beat occurred
This function reads in each of these metrics and places each one into their
respective keys as mentioned above. Then, once all of the information
has been added to the dictionary, it is returned.
Parameters
----------
time_dur : float
Contains duration of ECG strip in seconds
extremes : tuple
Contains the minimum and maximum voltages from the ECG strip
count : int
Value containing the number of peaks present
mean_hr : float
Contains the average heart rate (beats per minute)
list_of_times : list
Contains floats that match the times at which peaks occurred
Returns
-------
dictionary
Contains all of the metrics necessary
"""
logging.info("Dictionary being established")
metrics_dict = {"duration": time_dur,
"voltage_extremes": extremes,
"num_beats": count,
"mean_hr_bpm": mean_hr,
"beats": list_of_times,
"times": time,
"voltages": voltages}
logging.info("Dictionary filled")
return metrics_dict | e9188d3b6119c44463a3403c570f6f1d567c50f7 | 31,196 |
import re
def remove_html_tags(text_obj, pandas_apply_mode=True):
"""
The purpose of this function is to clean up the strings of article
contents by removing any HTML tags present.
:param text_obj: This object specifies what will be cleaned. It can
either be a single string that represents the
content for a single article or the row of a Pandas
DataFrame that represent the content of a single
article that belongs to a collection of articles.
If it is a DataFrame row, then that means that this
funciton is being used in the `.apply()` method of
a Pandas DataFrame.
:type text_obj: str or row of a Pandas DataFrame.
:param pandas_apply_mode: This Boolean controls whether or not this
function is being used to clean a single
string or an entire column of a DataFrame
(which would be the case if this parameter
is set to True which is its default
value).
:type pandas_apply_mode: Bool
:returns: The function itself returns a string the represents the
cleaned text. Of course, if this function is used with the
`.apply()` DataFrame method, then you will get a Pandas
Series that contains of all the cleaned content
strings.
:rtype: str
**Notes**
1. https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.apply.html
"""
# Instantiate object that will look for text within html tags.
cleanr = re.compile("<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});")
# Determine how we want to go about the cleaning.
if pandas_apply_mode:
# If the user is using this function to clean multiple strings
# that live in a column of a Pandas DataFrame.
content_str = text_obj.content
cleantext = re.sub(cleanr, "", content_str)
else:
# If the user is simply trying to use this function to clean out
# a single string.
# removes anything between <> and any other unneeded html tags
cleantext = re.sub(cleanr, "", text_obj)
return cleantext | f4f9c2d5776e532fcd8e750f7f9dce3bcf2780fc | 31,197 |
def CalculateSimilarityPybel(fp1,fp2):
"""
#################################################################
Calculate Tanimoto similarity between two molecules.
Usage:
result=CalculateSimilarityPybel(fp1,fp2)
Input: fp1 and fp2 are two DataStructs.
Output: result is a Tanimoto similarity value.
#################################################################
"""
intersection = set(fp1[1].keys())& set(fp2[1].keys())
union = set(fp1[1].keys()) | set(fp2[1].keys())
tanimoto = len(intersection) / float(len(union))
return round(tanimoto,3) | e53b150c09e140259d3c15bb046c3def6e44a50a | 31,198 |
def is_string_blank(string: str) -> bool:
"""Checks if a string is empty/blank or not.
Parameters:
----------
string (str): String to be checked.
Returns:
-------
bool: Bool value whether the string is empty or not.
"""
if string and string.strip():
return False
return True | d04a9bd33fb4f254a75202de2e1a06cafb1f67e9 | 31,199 |
import pandas
def session_detail_to_dataframe(metric, response_json):
""" Creates a Pandas DataFrame for non-time-series session detail.
The session summary object contains multiple inner dictionaries. This method
should be used to get one of those metrics and put the values into a DataFrame.
:param metric: The specific dictionary needed from the response json (eg: app, device, metric).
:param response_json: response json returned from a session
request.
:return: Pandas DataFrame generated by a single session request
json dictionary.
"""
return pandas.DataFrame(data=[response_json[metric]]) | c1e01ff15fa2fedf8dda858dd53f98981947db18 | 31,200 |
def file_to_list(path_to_file, keep_duplicates=True, encoding='utf-8'):
"""Transforms a plain text (txt or csv) file to Python :obj:`list`.
Args:
path_to_file (str): A path to plain text file.
keep_duplicates (bool): If set to False, the output list will not
contain duplicates. Defaults to True.
encoding (str): File's encoding. Defaults to "utf-8".
Returns:
list: The processed plain text file as a Python :obj:`list`.
"""
handler = open(path_to_file, "r", encoding=encoding)
lines = [line.replace("\n", "") for line in handler.readlines()]
handler.close()
return lines if keep_duplicates else list(dict.fromkeys(lines)) | dd98720f3d24d761f229094ee4dec70468e1fd70 | 31,201 |
import os
def bak_file(path):
"""
bak_file will return a path to a file that has the same name but has it's extension changed to ".bak"
:param path: (unicode) String path.
:return: (unicode) String path with the extension changed to ".bak"
"""
name, ext = os.path.splitext(path)
new_ext = ".bak"
return name + new_ext | 4f443abe7f0bbbc0b321a9a6514c20fed4a6d72a | 31,202 |
def get_crash_id(line):
"""
Takes a raw CSV line and returns a crash_id
:param str line: The raw CSV line
:return str: The Crash ID
"""
try:
return line.strip().split(",")[0]
except Exception as e:
print("Error: " + str(e))
return "" | 68188ff4290173deddd3608ceb0534ce6ca2df18 | 31,203 |
def combine(*dicts):
"""Given multiple dicts, merge them into a new dict as a shallow copy."""
super_dict = {key: val for d in dicts for key, val in d.items()}
return super_dict | 9747f4dd6cb3f930e31d088d417ffe10ceed168f | 31,204 |
import numpy
import math
def getTemperaturesForModel(model, Tmin, Tmax, Tcount):
"""
Returns an array of temperatures based on the interpolation `model`,
minimum and maximum temperatures `Tmin` and `Tmax` in K, and the number of
temperatures `Tcount`. For Chebyshev polynomials a Gauss-Chebyshev
distribution is used; for all others a linear distribution on an inverse
temperature domain is used. Note that the Gauss-Chebyshev grid does *not*
place `Tmin` and `Tmax` at the endpoints, yet the interpolation is still
valid up to these values.
"""
if model[0].lower() == 'chebyshev':
# Distribute temperatures on a Gauss-Chebyshev grid
Tlist = numpy.zeros(Tcount, numpy.float64)
for i in range(Tcount):
T = -math.cos((2*i+1) * math.pi / (2*Tcount))
T = 2.0 / ((1.0/Tmax - 1.0/Tmin) * T + 1.0/Tmax + 1.0/Tmin)
Tlist[i] = T
else:
# Distribute temperatures evenly on a T^-1 domain
Tlist = 1.0/numpy.linspace(1.0/Tmax, 1.0/Tmin, Tcount)
return Tlist | 0bc8eb42514535c611fce22ad9a83ef8bae34fdf | 31,205 |
def get_area(ptlist):
""" Calculate the area of a polygon defined by a list of points.
The variable ptlist is a list of (x, y) point pairs. Be careful,
the implementation can give unexpected results with self-intersecting
polygons.
The output will always be non-negative.
Created: 2015 April 29, msswan
"""
I = lambda pt1, pt2: (pt2[1] + pt1[1]) * (pt2[0] - pt1[0]) / 2.0
area = I(ptlist[-1], ptlist[0])
for idx in range(0, len(ptlist)-1):
area += I(ptlist[idx], ptlist[idx+1])
return abs(area) | f33f08f26206e08fa3fdcf047038f58b3a90df58 | 31,206 |
import re
def __dxf_trans_main_body(raw_main_body):
"""
转换mainBody部分(若爬虫更改则需重写)
:param raw_main_body: 未处理的mainBody部分
:return: 格式转换后的mainBody
"""
def handle_title(title):
try:
title = re.findall(u'(第[零一二三四五六七八九十百千万亿]+[编章条节]之*[零一二三四五六七八九十百千万亿]*\s|附则)(.*)$', title)[0]
except:
return [title]
title = list(title)
title[0] = title[0].strip()
if len(title) == 2:
title[1] = title[1].replace(u'\u3000', u'')
if title[1] == '':
del title[1]
return title
def handle_content(content):
# return re.sub('\s*', '', content)
return content # 保留条文中的换行符
main_body = {}
for piece in raw_main_body:
piece_num = piece["pieceNum"]
piece_content = piece["content"]
piece_num_split = handle_title(piece_num)
if piece_num_split[0] not in main_body:
main_body[piece_num_split[0]] = {}
main_body[piece_num_split[0]]["content"] = {}
if len(piece_num_split) == 2:
main_body[piece_num_split[0]]["title"] = piece_num_split[1]
for chapter in piece_content:
chapter_title = chapter["title"]
chapter_content = chapter["content"]
chapter_title_split = handle_title(chapter_title)
if chapter_title_split[0] not in main_body[piece_num_split[0]]["content"]:
main_body[piece_num_split[0]]["content"][chapter_title_split[0]] = {}
main_body[piece_num_split[0]]['content'][chapter_title_split[0]]["content"] = {}
if len(chapter_title_split) == 2:
main_body[piece_num_split[0]]['content'][chapter_title_split[0]]["title"] = \
chapter_title_split[1]
for item in chapter_content:
item_title = item["title"]
item_text = item["text"]
item_title_split = handle_title(item_title)
if item_title_split[0] not in main_body[piece_num_split[0]]["content"][chapter_title_split[0]]:
main_body[piece_num_split[0]]['content'][chapter_title_split[0]]["content"][
item_title_split[0]] = {}
if len(item_title_split) == 2:
main_body[piece_num_split[0]]['content'][chapter_title_split[0]]["content"][
item_title_split[0]]["title"] = \
item_title_split[1]
main_body[piece_num_split[0]]['content'][chapter_title_split[0]]["content"][
item_title_split[0]][
'content'] = handle_content(item_text)
return main_body | 2b4fcb4281d94e6d48e28f65aaac0dfa5e1e98ac | 31,209 |
def auto_int(x):
"""Convert a string into an integer.
:param x: String to convert
:return: value in x
:rtype: int
"""
return int(x, 0) | ac212129ef19d11c9035ac87a769adf85d0b59d9 | 31,210 |
def map_transactions(transactions, mapping):
"""
Compare all transactions to a dictionary with payees and categories
"""
mappedtransactions = []
mapcounter = 0
for date, amount, desc, payee, category in transactions:
for identifier in mapping.keys():
if identifier.lower() in desc.lower():
payee, category = mapping[identifier]
mapcounter += 1
break
mappedtransactions.append([date, amount, desc, payee, category])
print(f'Mapped {mapcounter} out of {len(transactions)} ' +
f'transactions ({100.*mapcounter/len(transactions):.0f}%)')
return mappedtransactions | 2c0eec1f454829348d8e0c9ade2930fcf067b66b | 31,211 |
def xor(m, i, j):
""" XOR rows during GJE"""
for e in range(len(m[0])):
m[j][e] ^= m[i][e]
return m | 569784a5389c0ae455a17030c32984b680ac3b54 | 31,213 |
from typing import Callable
def patch_http_client(monkeypatch) -> Callable:
"""
Patch HTTP client
"""
client_path: str = 'booking_sites_parser.http_client.HttpClient.client.get'
def _make_patch(response: Callable):
monkeypatch.setattr(client_path, lambda x, headers: response(x))
return _make_patch | 08d1d812d72d623fb3bc3ac5f705303f5e87bfd0 | 31,214 |
def get_cycle_end(year):
"""Round year up to the last year of the two-year election cycle. Used
when querying partitioned itemized data for election cycle.
"""
return year if year % 2 == 0 else year + 1 | cf6f8b30031ec72ee668dcd337f245d89083d3ac | 31,216 |
def different_local_with_stage(stage: str, ssm_env: dict, local_env:dict, filter_env: list = []):
"""returns differences between ssm stage and local env"""
for x in filter_env:
ssm_env.pop(x, None)
local_env.pop(x, None)
different_env_values = {k: {'stage': ssm_env[k], "local": local_env[k]} for k in local_env
if k in ssm_env and local_env[k] != ssm_env[k]}
return different_env_values | b0616516c10441cfffd8be0d027d9474e5d11d74 | 31,217 |
def score1(closest):
""" average of all distances """
score = 0.5
dists = [dist for t1, (t2, dist) in closest]
if len(closest) != 0:
score = sum(dists) / float(len(dists))
return score | bbbd9d7c1297b4384911ba5bb556ff8c0e91cf39 | 31,218 |
import sh
def rfasc(bin_fname,
asc_fname,
clk_fname,
station_id='NONE',
header='',
bin='~/src/emtf/EMTF/RF/RFASC/rfasc'):
"""
"""
cmd = sh.Command(bin)
input_str = f'{station_id}\n{bin_fname}\n{header}\n{asc_fname}\n{clk_fname}\nn\nn\n'
output = cmd(_in=input_str)
return bin_fname | a28498f4676cb2f13ccaea311e7d36f1eea6af4f | 31,219 |
import inspect
def is_coroutine(coro):
"""Returns true if the argument is a coroutine or coroutine function."""
return inspect.iscoroutine(coro) or inspect.iscoroutinefunction(coro) | c4b659fd469b4d50a93019001b83729de693a09f | 31,221 |
import attr
def struct(*args, **kw):
"""
Wrapper around ``attr.s``.
Sets ``slots=True`` and ``auto_attribs=True``. All other arguments are
forwared to ``attr.s``.
"""
return attr.s(*args, slots=True, auto_attribs=True, **kw) | 1eee1044ee340b2fe9387773304751d4efca209f | 31,222 |
import dataclasses
def is_missing(value):
"""
Used to check whether a dataclass field has ever been assigned.
If a field without default value has never been assigned, it will have a special value ``MISSING``.
This function checks if the parameter is ``MISSING``.
"""
# MISSING is not singleton and there is no official API to check it
return isinstance(value, type(dataclasses.MISSING)) | 5b5ce45382e655d32afcdc2ef80c8868efaa3ed5 | 31,223 |
def health():
"""
Returns OK.
"""
return 'OK' | 209429722fe07439b4458ab5c2daacc4696045a3 | 31,225 |
def exb_effective_parallel_heat_convection(
density, temperature, effective_parallel_exb_velocity, norm
):
"""
The effective parallel heat convection due to the ExB velocity (computed as the poloidal ExB component multiplied
by the inverse magnetic field pitch)
"""
return (2.5 * density * temperature * effective_parallel_exb_velocity).assign_attrs(
norm=(norm.n0 * norm.c_s0 * temperature.norm).to("MW/m^2")
) | ea39ad8d6335962385f080795508aac556b5c89d | 31,226 |
def genAliases(name):
"""
Generates aliases for metabolite names, e.g.:
val --> set(['Val-L', 'Val', 'val', 'val-L'])
"""
name = name.replace('-L','').replace('-l','')
output = []
output.append(name)
output.append(name.lower())
output.append(name.lower()+'-L')
output.append(name.lower()+'_L')
output.append(name.capitalize())
output.append(name.capitalize()+'-L')
output.append(name.capitalize()+'_L')
return output | 28b88a35588197765e296528fd3b05f34baa1351 | 31,227 |
import numpy
def translateit_fast_2d(image, offset, fill_value=0):
"""
the funtion translate the content of a one image
with the offset paremetre
"""
newimg = numpy.zeros_like(image)
if offset[0] > 0:
newimg[offset[0]:,:,:] = image[:-offset[0],:,:]
elif offset[0] < 0:
newimg[:offset[0],:,:] = image[-offset[0]:,:,:]
else:
newimg=image
if offset[1] > 0:
newimg[:,offset[1]:,:] = newimg[:,:-offset[1],:]
elif offset[1] < 0:
newimg[:,:offset[1],:] = newimg[:,-offset[1]:,:]
else:
pass
return newimg | e71a30185ed6411b8f5c490106f74a26784f06c7 | 31,228 |
from datetime import datetime
def str2datetime(st):
"""Convert a ISO 8601 string to datetime object"""
if '.' in st:
return datetime.strptime( st, "%Y-%m-%dT%H:%M:%S.%f")
else:
return datetime.strptime( st, "%Y-%m-%dT%H:%M:%S") | b0a011f2417ab72d48f37d1e797121a2dc446523 | 31,229 |
from typing import Optional
def binary_usage(a: Optional[str], op: str, b: Optional[str], add_brackets=True):
"""
Utility for generating usage strings for binary operators.
"""
no_nones = [x for x in (a, b) if x is not None]
usage = op.join(no_nones)
if len(no_nones) > 1 and add_brackets:
usage = f"[{usage}]"
return usage or None | 7061934fc6e4a459b4cc80145fbcbfb19befa9a4 | 31,230 |
import re
def ocd_type_id(text, strip_leading_zeros=True):
"""
Format a string in a way that's suitable for an OCD type ID
Args:
text: String to format.
strip_leading_zeros: Remove leading zeros from name. Default is True.
For example, '08' would become '8'.
Returns:
Formatted string. See https://github.com/opencivicdata/ocd-division-ids
for more on the Open Civic Data divsion identifier spec.
* Valid characters are lowercase UTF-8 letters, numerals (0-9),
period (.), hyphen (-), underscore (_), and tilde (~).
* Characters should be converted to UTF-8.
* Uppercase characters should be converted to lowercase.
* Spaces should be converted to underscores.
* All invalid characters should be converted to tildes (~).
* Leading zeros should be dropped unless doing so changes the meaning
of the identifier.
"""
# Convert to lowercase
text = text.lower()
text = text.replace('(','')
text = text.replace(')','')
# Convert spaces to underscores
text = re.sub(r'\s', '_', text)
text = re.sub(r'[^\w.\-~]', '~', text)
if strip_leading_zeros:
# Remove leading zeros
text = text.lstrip('0')
return text | ac1a7659d3dba44b6cb5fc51ace6a36ec4f7b1e8 | 31,232 |
import itertools
def self_cross_product(it, length=2):
"""
Return all unordered permutations with repeated elements of `it`
with itself.
>>> self_cross_product(('happy', 'feet'))
(
('happy', 'happy'),
('happy', 'feet'), # wombo combo
('feet', 'happy'),
('feet', 'feet')
)
"""
return tuple(itertools.product(it, repeat=length)) | 9e1d42ff387d0687d56749402ebc1364b1365272 | 31,233 |
def min_for_key(key, client):
"""
Get the minimum score for a completion on this key
"""
minimum = client.zrange(key, 0, 0, withscores=True)
if minimum:
return minimum[0][1]
else:
return 0 | 6ec0d625453df0a21b4fdef44cc7af885e45bd37 | 31,234 |
def get_index_data(computed_values, snapshot_tag_map):
"""Store new computed values in full text index table."""
data = []
for attr, objects in computed_values.iteritems():
for obj, computed_value in objects.iteritems():
value = (computed_value["value_datetime"] or
computed_value["value_string"] or "")
tags = u""
if obj[0] == "Snapshot":
tags = snapshot_tag_map.get(obj[1], u"")
data.append({
"key": obj[1],
"type": obj[0],
"tags": tags,
"property": attr.attribute_definition.name,
"content": value,
"subproperty": u"",
})
return data | 50420b3e65690596fcdcd53db06204dc6cc41977 | 31,235 |
import torch
def to_value(v):
"""
Convert where appropriate from tensors to numpy arrays
Args:
v: an object. If ``torch.Tensor``, the tensor will be converted to a numpy
array. Else returns the original ``v``
Returns:
``torch.Tensor`` as numpy arrays. Any other type will be left unchanged
"""
if isinstance(v, torch.Tensor):
return v.cpu().data.numpy()
return v | 571e027a59a1060663785e9be6094093bc1a8115 | 31,237 |
def script_js_templates():
"""
Returns the javascript templates which are used by the scripts.
Returns
-------
An empty context
"""
return {} | 89edec896b0c689858a8e2ce3d5025fa0a631422 | 31,239 |
import torch
def _set_coords(num_shots, num_per_shot, nx, dx, location='top'):
"""Create an array of coordinates at the specified location."""
ndim = len(nx)
coords = torch.zeros(num_shots, num_per_shot, ndim)
coords[..., 0] = torch.arange(num_shots * num_per_shot)\
.reshape(num_shots, num_per_shot)
if location == 'top':
pass
elif location == 'bottom':
coords[..., 0] = (nx[0] - 1).float() - coords[..., 0]
elif location == 'middle':
coords[..., 0] += int(nx[0] / 2)
else:
raise ValueError("unsupported location")
for dim in range(1, ndim):
coords[..., dim] = torch.round(nx[dim].float() / 2)
return coords * dx | c38d05784dc802c502bb8123057fa1370dbb76a2 | 31,240 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.