content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def _get_resources(raml):
"""Gets relevant resources from RAML
"""
# only dealing with "get" method resources for now
usable_methods = ["get", "post"]
usable_rs = [
r for r in raml.resources if r.method in usable_methods
] # r.path == name and
rs_without_resource_id = [
r for r in usable_rs if not r.uri_params and not r.body
]
rs_with_resource_id = [r for r in usable_rs if r.uri_params]
rs_file_upload = [
r
for r in usable_rs
if r.body and r.body[0].mime_type == "multipart/form-data"
]
if (
len(usable_rs) == 0
or len(usable_rs) > 3
or len(rs_without_resource_id) > 1
or len(rs_with_resource_id) > 1
or len(rs_file_upload) > 1
):
raise ValueError(
(
"RAML must contain one to three resources with a method of '{}'. "
"At most one resource each with and without uri parameter (resource id) "
"or one file upload resource.\n"
"There are {} resources with matching methods. Resources in RAML: {}"
).format(usable_methods, len(usable_rs), raml.resources)
)
res_normal = rs_without_resource_id[0] if rs_without_resource_id else None
res_with_id = rs_with_resource_id[0] if rs_with_resource_id else None
res_file = rs_file_upload[0] if rs_file_upload else None
return res_normal, res_with_id, res_file
|
4fb89e549dea5e3c8222054073f37a90c0c2c40e
| 24,136
|
import requests
from bs4 import BeautifulSoup
def map_url(s):
"""Takes a url as a string and returns a word count dictionary"""
c = {}
r = requests.get(s)
soup = BeautifulSoup(r.text)
for script in soup(["script", "style"]):
script.extract()
for word in soup.get_text().split():
word.strip()
if word not in c:
c[word] = 0
c[word] += 1
return c
|
920467e787f45baf852456648ca53aa766a08029
| 24,139
|
def split_join_text(text, n, prefix, joiner):
"""Splits the text into chunks that are n characters long, then joins them up again."""
return joiner.join(f"{prefix}{text[i:i+n]}" for i in range(0, len(text), n))
|
a44c5944c6718360af287db2b2520ebd643aaf76
| 24,141
|
import os
def mkdir(path, parents=False):
"""
parents: no error if existing, make parent directories as needed
"""
func = os.makedirs if parents else os.mkdir
try:
return func(path)
except:
if not parents:
raise
|
2981496d23779dcd05b308777a16a11677c26ee8
| 24,142
|
def linear(x, old_range, new_range):
"""
Rescale each channel to the new range as following:
new = data/old_range*new_range
Parameters
----------
data : DataFrame
data to be rescaled
new_range : float | array | Series
Maximal data value after rescaling
old_range : float | array | Series
Maximal data value before rescaling
(If old range is not given use the one specified in self.meta['_channels_']['$PnR'])
Deprecated!!!
"""
y = x / old_range * new_range
return y
|
509c6ea28c0adcbbe4da8e6f3c6cacf68a795cc9
| 24,143
|
def moving_average(input_list):
"""Compute the moving average with window size of 3
Args:
input_list (list of floats of ints): list of numbers
Returns:
list of floats or ints: moving average with window size of 3
Example:
moving_average([2,3,4,5,6]) = [3,4,5]
"""
N = 3
output = []
for i in range(len(input_list)):
if i < N-1:
continue
else:
tmp_sum = 0
for k in range(N):
tmp_sum+= input_list[i-k]
output.append(tmp_sum/N)
return output
|
e0412ffb9c348c60c0e471405708757cd7917cfe
| 24,145
|
def bumpVersion(oldVersion):
""" Bump the ver number. Is dumb and expects 0.0. Will bump by .1 """
dot = oldVersion.rfind('.')
prefix = oldVersion[0:dot + 1]
decimal = int(oldVersion[dot + 1:])
decimal += 1
return prefix + str(decimal)
|
d52f07ec14262bd484c7158638479b5b493b5b92
| 24,147
|
import os
def png_filename(infile, label):
"""Return filename with 'label' and suffix 'png'"""
(filename, _ending) = os.path.splitext(infile)
return filename + "_" + label + ".png"
|
6902fcc75f51a3dd46d1dbae30286dbffe3e781b
| 24,148
|
def estimate_am_time(model):
""" estimate the time required for analytic marching
Args:
model (torch.nn.Module): our specified nn module (i.e. loaded by load_model function)
Returns:
time (float): estimated am_time (sec.)
"""
nodes = model.nodes
depth = len(nodes) - 2
width = sum(nodes[1:-1]) / depth
a, b, c, d, e, f = 1.94452188, 0.13816182, -0.14536181, 0.59338494, -1.20459825, 1.17841059
fn = lambda l, n: (a * n) ** (b * l + c) * (n ** d) * (l ** e) * f
time = fn(depth, width)
return time
|
019ae9d3ed020c080283dca6a55fbb672661768a
| 24,149
|
def resolve_package_module(module, pkg, level, default=None):
"""Returns a 2-tuple of package and module name, even for relative
imports
"""
if level == 0:
p, _, m = module.rpartition(".")
elif level == 1:
p = pkg
m = module or default
else:
p = m = None
return p, m
|
07495144e6f52526e484d381f77fa36ff5733898
| 24,150
|
def keypoints_scale(keypoints, scale_x, scale_y):
"""Scales a keypoint by scale_x and scale_y.
Args:
keypoints (tuple): A keypoint `(x, y, angle, scale)`.
scale_x (int): Scale coefficient x-axis.
scale_y (int): Scale coefficient y-axis.
Returns:
A keypoint `(x, y, angle, scale)`.
"""
keypoints[:, :2] = keypoints[:, :2] * (scale_x, scale_y)
return keypoints
|
c5c8deae6ab834eb966889c971575749145ba671
| 24,152
|
import os
def checkversion(GEOS_dir):
"""check geos C-API header file (geos_c.h)"""
try:
f = open(os.path.join(GEOS_dir,'include/geos_c.h'))
except IOError:
return None
geos_version = None
for line in f:
if line.startswith('#define GEOS_VERSION'):
geos_version = line.split()[2]
return geos_version
|
2da5698d2fefaa7d80fbe7f028f8335dcab0afbd
| 24,154
|
def checkElementInArrayLast(array:list, x:int, n:int=0):
"""
Check for the element `x` in the list `array`. If
there are multiple `x` present, give index of last
occurence using Recursion and starting from index=0
"""
if len(array) == n:
return -1
lastIndex = checkElementInArrayLast(array, x, n+1)
if lastIndex == -1:
if array[n] == x:
return n
else:
return -1
else:
return lastIndex
|
df7fe7261f1443b2ce739ff062ab6a767e3d2491
| 24,155
|
def get_users_as_list(users):
"""
Retrieves all Users as a list.
"""
return [(user.pk, u'{}: {}'.format(user.username, user.get_full_name())) for user in users]
|
41fd27f6988c7327bafcd3cdc518dd6ffbfb00ff
| 24,156
|
import re
import os
def resolve_paths(libs, excludes):
"""
Ensure all paths are realpaths -- includes soft-link resolution even
though this is typically not needed
"""
rplibs = []
rplinks = {}
for lib in libs:
skip = False
for exclude in excludes:
if re.search(exclude, lib) is not None:
print("--> excluding '{}' matching regex '{}'".format(lib, exclude))
skip = True
break
if skip is True:
continue
reallib = os.path.realpath(lib)
rplibs.append(reallib)
if reallib != lib:
lname = os.path.basename(lib)
ltarget = os.path.basename(reallib)
rplinks[ltarget] = lname
return rplibs, rplinks
|
267f15e782028563cc93f6e5dfa7c73ec9cf72b3
| 24,157
|
import socket
def get_container_hostname(device):
"""Maps a device to its container hostname."""
this_host = socket.gethostname().split('.')[0]
if device.physical_port is not None:
return '%s--device%d' % (this_host, device.physical_port)
else:
return '%s--%s' % (this_host, device.serial)
|
c1ee92ddc7dc2fd310b56a657098f42e9645abd5
| 24,158
|
def setActiveTab(session_a_tab):
"""Determines what tab should be open initially"""
a_tab = {'orders': True, 'p_agg': False, 'suppliers': False,
'invoices': False, 'hold': False} # <-- default value
if session_a_tab == 'supplier':
a_tab.update({
'orders': False, 'p_agg': False, 'suppliers': True,
'invoices': False, 'hold': False})
elif session_a_tab == 'p_agg':
a_tab.update({
'orders': False, 'p_agg': True, 'suppliers': False,
'invoices': False, 'hold': False})
elif session_a_tab == 'invoices':
a_tab.update({
'orders': False, 'p_agg': False, 'suppliers': False,
'invoices': True, 'hold': False})
elif session_a_tab == 'hold':
a_tab.update({
'orders': False, 'p_agg': False, 'suppliers': False,
'invoices': False, 'hold': True})
return a_tab
|
67d6eb421b76507271eb43464697fb9b4a104e2d
| 24,160
|
def getNode(head, positionFromTail):
"""Function that gets the node information as specified from the position from the tail of the list
Args:
head (SinglyLinkedListNode): The head of a linked list to retrieve data from
positionFromTail (int): The integer position from the tail of the list which we want to retrieve data from
Returns:
(int): The data for the specified node
"""
counter = 0
traverse = head
while traverse.next is not None:
counter += 1
traverse = traverse.next
pos = counter - positionFromTail
for i in range(pos):
head = head.next
return head.data
|
d131c0b391cb6867744653e3ae94593ddf63cc7a
| 24,161
|
def object_list_check_any_has_attribute(object_list,attr_name):
"""
check if any object in the list has the attribute.
"""
unique = False
for obj in object_list:
if hasattr(obj,attr_name):
unique = True
break
else:
pass
return unique
|
cac90dab958b4bfc25493a2dba86b72454230222
| 24,162
|
def notfound_view(request):
"""Will route people to the 404 not-found page."""
# import pdb; pdb.set_trace()
request.response.status = 404
return {}
|
4b7355cc759ce0fb208be56a88b9a5f007351f4c
| 24,163
|
def _ppos_to_cpos(_, start, end=None):
"""Compute the equivalent protein position for a CDS position.
Args:
start (int): amino acid position
end (int): optional, second amino acid position
Returns:
tuple of int: CDS position of the first base of the codon
"""
convert = lambda x: (x - 1) * 3 + 1
cstart = convert(start)
# add 2 to the end position to get the end of the codon
if end:
cend = convert(end) + 2
else:
cend = cstart + 2
return cstart, cend
|
fe7bf74bc918c1d50550bfe91c8a395a58cbb183
| 24,164
|
import re
def file_to_array(file_name):
"""Read file and extract lines to list.
Parameters
----------
file_name : str
Path of file to read.
Returns
-------
array : list
List of lines contained in file.
"""
with open(file_name, 'r') as f:
array = [line.strip() for line in f]
array = [line for line in array if line]
array = [line for line in array if not re.match('\#',line)]
return array
|
762e96b2098d6a5435af549d2a72c3033d12ae33
| 24,166
|
def dotProduct(D1, D2):
"""
Returns the dot product of two documents
"""
Sum = 0.0
for key in D1:
if key in D2:
Sum += (D1[key] * D2[key])
return Sum
|
d3d81029a85d34269de3454511a193aa8e433370
| 24,167
|
def normal_from_lineseg(seg):
""" Returns a normal vector with respect to the given line segment. """
start, end = seg
x1, y1 = start
x2, y2 = end
dx = x2 - x1
dy = y2 - y1
return (dy, -dx)
|
a64d18a9bd82ee8a7f28ac96f84403bac4ea2981
| 24,168
|
def escape_backslash(s: str) -> str:
"""Replaces any \\ character with \\\\"""
return s.replace('\\', '\\\\')
|
e185c40d4cfe52eeed77e5fab2ee40a15c6a760c
| 24,169
|
def is_collection(v):
"""
Decide if a variable contains multiple values and therefore can be
iterated, discarding strings (single strings can also be iterated, but
shouldn't qualify)
"""
# The 2nd clause is superfluous in Python 2, but (maybe) not in Python 3
# Therefore we use 'str' instead of 'basestring'
return hasattr(v,'__iter__') and not isinstance(v,str)
|
e51ee293566e0be9f7143524abb055da0e35671e
| 24,170
|
def format_node(cluster_name, node):
"""Formats a string representation of a node."""
return '<{0}[{1}]>'.format(cluster_name, node)
|
7a8b35dd2d8845d3fdf9484769d8ee2db819aaa6
| 24,171
|
def ts_since_two_per_country(df):
"""
Abre serie de tiempo global por pais y extrae series de tiempo
para cada pais desde que el valor es superior a 2.
Parametros
----------
df : Pandas Dataframe
Dataframe que contiene una serie de tiempo por pais.
Returns
-------
countries: list of Dataframes per country
"""
df = df.loc[:,~df.columns.duplicated()]
countries = [df[df[country] > 2][[country]] for country in df.columns[:]]
return countries
|
4076a97958d1556daf261f9ee9f98e6ecbabdcdb
| 24,172
|
import hashlib
def sha256(s):
""" Return the SHA256 HEX digest related to the specified string.
"""
m = hashlib.sha256()
m.update(bytes(s,"utf-8"))
return m.hexdigest()
|
e868e345cf942127c1bbd0b56402d3a007bb8ff6
| 24,173
|
def ListaAnadirSinRepetir(primera_lista , segunda_lista):
"""
Esta funcion permite unir listas sin que los elementos se repitan
Parameters
----------
primera_lista : [], obligatorio
Lista con los primeros elements.
segunda_lista : [], ogligatorio
Lista con los segundos elementos.
Returns
-------
[]
Lista unida sin que los elementos se repitan .
"""
en_primera = set(primera_lista)
en_segunda = set(segunda_lista)
en_segunda_pero_no_en_primera = en_segunda - en_primera
return primera_lista + list(en_segunda_pero_no_en_primera)
|
d9a28bf154e9702184e94ff19e14d9c3e387c200
| 24,174
|
def zusammensetzen(int1, int2):
"""setzt zwei Zahlen zusammen und gibt die zusammengesetzte Zahl zurück"""
str1 = str(int1)
str2 = str(int2)
return int(str1 + str2)
|
d2b6cb963ba8e10f71228de276256053c7ab6a6e
| 24,176
|
def get_glyph_horizontal_advance(font, glyph_id):
"""Returns the horiz advance of the glyph id."""
hmtx_table = font['hmtx'].metrics
adv, lsb = hmtx_table[glyph_id]
return adv
|
75af73d3dd824391c9058d501fa0883ebdce8bcf
| 24,177
|
import json
def assignment_string(lhs, rhs):
"""
inputs:
lhs: name of variable to be assigned value
rhs: python object that will be assigned
returns a string
"""
if isinstance(rhs, bool):
rhs_str = "True" if rhs else "False"
else:
rhs_str = json.dumps(rhs)
return f"{lhs} = {rhs_str}\n"
|
e15f045e8a1583c15d4657075cf997e9987f362a
| 24,178
|
import os
def _override_where():
"""Helper function"""
# change this to match the location of cacert.pem
return os.path.abspath(
os.path.join("venv", "Lib", "site-packages", "certifi", "cacert.pem")
)
|
7e3757b5f7761822c34afeb4c1dbd0c036bed166
| 24,181
|
def mp_tmpdir(tmpdir):
"""
For multiprocessing, sharing the same tmpdir across all processes
"""
return tmpdir.make_numbered_dir()
|
caa7b311b9310043b5bf238c2a04ae3d200f6afc
| 24,183
|
def _is_array(data):
"""Return True if object implements all necessary attributes to be used
as a numpy array.
:param object data: Array-like object (numpy array, h5py dataset...)
:return: boolean
"""
# add more required attribute if necessary
for attr in ("shape", "dtype"):
if not hasattr(data, attr):
return False
return True
|
72e8225d9fa74ac31f264d99764d94c62013cb06
| 24,185
|
def unify_size(h, w, ms):
"""
统一最长边的尺寸
:h 高
:w 宽
:ms 最长尺寸
"""
# 最长边修改为标准尺寸
if w > h:
r = ms / w
else:
r = ms / h
h = int(h * r)
w = int(w * r)
return h, w
|
92236acbb0b71e8a4e4d0fd17b7daa5527ab3e64
| 24,186
|
from typing import Iterator
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
|
4a3ee7d15b150cc3cbf1ee22b254c224783ecece
| 24,187
|
def update_AcmeOrderless_deactivate(ctx, dbAcmeOrderless):
"""
`deactivate` should mark the order as:
`is_processing = False`
"""
dbAcmeOrderless.is_processing = False
dbAcmeOrderless.timestamp_updated = ctx.timestamp
ctx.dbSession.flush(objects=[dbAcmeOrderless])
return True
|
4bed69bf7f0bb5870c7e8300dab3327f58c49142
| 24,190
|
import logging
def get(name=""):
"""
Get the specified logger
:param name: the name of the logger
:return: the logger
"""
return logging.getLogger(f"wafflebot{'.' + name if name else ''}")
|
8102a2f55d4b9badf290fec68d63e11434bcd45d
| 24,191
|
import torch
def val_loss(model, valid_dl, loss_fn, use_gpu=False):
"""
Controls validation loop for MatchNN
:param model: pytorch model
:param valid_dl: pytorch dataloader on top of MatchDataset
:param loss_fn: loss function
:param use_gpu: boolean for whether or not GPU will be used
:return: list with loss, precision and recall
"""
model.eval()
total = 0
sum_loss = 0
false_positives = 0
true_positives = 0
false_negatives = 0
for x1, x2, y in valid_dl:
if use_gpu:
x1, x2, y = x1.cuda(), x2.cuda(), y.cuda()
current_batch_size = y.shape[0]
out = model(x1, x2)
loss = loss_fn(out, y)
sum_loss += current_batch_size * (loss.item())
total += current_batch_size
pred = torch.round(out)
true_positives += (pred[y == 1] == 1).float().sum().item()
false_positives += (pred[y == 0] == 1).float().sum().item()
false_negatives += ((y == 1).float().sum().item()) - (pred[y == 1] == 1).float().sum().item()
precision = true_positives / max(1, (true_positives + false_positives))
recall = true_positives / max(1, (true_positives + false_negatives))
print("valid loss %.3f, precision %.3f, recall %.3f" % (sum_loss / total, precision, recall))
return sum_loss / total, precision, recall
|
7f8352ede299577f63c89ff0dd357f0b5cf9b838
| 24,192
|
import pandas
def sort(table_df, col_key, descending=False):
""" sort a table by column
"""
table_df = pandas.DataFrame.sort_values(
table_df, by=col_key, ascending=not descending)
return table_df
|
10897c760e303bd7cfb98a84095666cf19534084
| 24,193
|
def fmt_percent(x, total):
""" Compute percent as x/total, format for a table cell. """
if total == 0:
percent = 0
else:
percent = float(x) / total * 100
return str(round(percent, 2)) + '%'
|
0346a40b9c6bbf5223829da0db39ce3585f0c730
| 24,194
|
import subprocess
import shlex
def syn_tree(text):
"""
get text, process it with syntax stanford parser and return the generated trees.
if text is composed of one sentence, one tree will be returned
"""
with open('/Users/erick/B2W/IntelligentSearch/temp/tokenized_query.txt','w') as temp:
temp.write(text)
#import ipdb; ipdb.set_trace()
command_LX_Parser = '''java -Xmx256m -cp /Users/erick/B2W/IntelligentSearch/LX_Parser/stanford-parser-2010-11-30/stanford-parser.jar edu.stanford.nlp.parser.lexparser.LexicalizedParser -tokenized -sentences newline -outputFormat oneline -uwModel edu.stanford.nlp.parser.lexparser.BaseUnknownWordModel /Users/erick/B2W/IntelligentSearch/LX_Parser/stanford-parser-2010-11-30/cintil.ser.gz /Users/erick/B2W/IntelligentSearch/temp/tokenized_query.txt'''
with open('/Users/erick/B2W/IntelligentSearch/temp/parsed_query.txt','w') as file_out:
p = subprocess.call(shlex.split(command_LX_Parser), stdout=file_out)
with open('/Users/erick/B2W/IntelligentSearch/temp/parsed_query.txt','r') as syntax_trees:
trees = syntax_trees.readlines()
return trees
|
77c735497747a48ccd6aaacff35ed5f455dc89d8
| 24,199
|
def add_columns(filtered_df):
"""This function adds additional columns for TITEL, BETREUER, VORTRAG for each student with a fixed place, the details can be manually filled after the Master Sheet is generated"""
print("adding additional columns for TITEL, BETREUER, VORTRAG")
extended_df = filtered_df.assign(TITEL='', BETREUER='', VORTRAG='')
return extended_df
|
322d896a84dac419279a67be7733734e1872a1d3
| 24,200
|
def success_response(data):
""" When an API call is successful, the function is used as a simple envelope for the results,
using the data key, as in the following:
{
status : "success",
data : {
"posts" : [
{ "id" : 1, "title" : "A blog post", "body" : "Some useful content" },
{ "id" : 2, "title" : "Another blog post", "body" : "More content" },
]
}
required keys:
status: Should always be set to "success
data: Acts as the wrapper for any data returned by the API call. If the call returns no data (as in the last example), data should be set to null.
"""
return {'status': 'success','data': data}
|
26ca231fcb9e0204c80307b7eebf03b434faca70
| 24,201
|
def solve(n, ar):
"""
Given an integer array ar of size n, return the decimal fraction of the
number of positive numbers, negative numbers and zeroes. Test cases are
scaled to 6 decimal places.
"""
num_pos = 0
num_neg = 0
num_zero = 0
for i in ar:
if i > 0:
num_pos += 1
elif i < 0:
num_neg += 1
else:
num_zero += 1
# Return the ratios but cast to float to ensure preservation of precision.
return (float(num_pos) / float(n),
float(num_neg) / float(n),
float(num_zero) / float(n))
|
b61b179c357dcf70ac887af20588324624a84ea3
| 24,202
|
from datetime import datetime
def datetime_f(dttm):
"""Formats datetime to take less room when it is recent"""
if dttm:
dttm = dttm.isoformat()
now_iso = datetime.now().isoformat()
if now_iso[:10] == dttm[:10]:
dttm = dttm[11:]
elif now_iso[:4] == dttm[:4]:
dttm = dttm[5:]
return "<nobr>{}</nobr>".format(dttm)
|
00f24b17b4c38e04ae046cfab77b107aa32a56af
| 24,203
|
from typing import Counter
def get_timeline_stats(timeline):
"""
:type timeline list[now_playing_graph.timeline.TimelineEntry]
:rtype: dict
"""
top_artists = Counter()
top_songs = Counter()
longest_songs = dict()
longest_artists = Counter() # artist whose songs are played for the longest time
for entry in timeline:
top_artists.update((entry.artist_name,))
top_songs.update((entry.song_title,))
if entry.song_title not in longest_songs:
longest_songs[entry.song_title] = entry.duration
longest_artists += Counter({entry.artist_name: entry.duration})
return dict(
top_artists=top_artists.most_common(10),
top_songs=top_songs.most_common(10),
longest_songs=Counter(longest_songs).most_common(10),
longest_artists=Counter(longest_artists).most_common(10),
)
|
3a441c2bf1d5e9194e7ba65c1f24fb774134debb
| 24,204
|
def create_phone_number_v2(n):
"""
Turns an array of integers into phone number form.
:param n: an array of integers.
:return: numbers in phone number form.
"""
return "({}{}{}) {}{}{}-{}{}{}{}".format(*n)
|
659daecdbb8a0e8a1f02826c35fc26eadef14598
| 24,205
|
def isInSequence(word):
"""
Checks if the string passed is a sequence of digits logically connected ("e.g. 369")
"""
if len(word)<3:
return False
else:
increment = int(word[0]) - int(word[1])
for i in range(len(word) - 2):
if int(word[i+1]) - int(word[i+2]) != increment:
return False
return True
|
d4749bc5cd656ec2fd2886743ad1848062d3e1db
| 24,206
|
import sys
from typing import OrderedDict
def is_ordered_dict(d):
"""
Predicate checking for ordered dictionaries. OrderedDict is always
ordered, and vanilla Python dictionaries are ordered for Python 3.6+
"""
py3_ordered_dicts = (sys.version_info.major == 3) and (sys.version_info.minor >= 6)
vanilla_odicts = (sys.version_info.major > 3) or py3_ordered_dicts
return isinstance(d, (OrderedDict))or (vanilla_odicts and isinstance(d, dict))
|
2a5684a54a83b816337a622e68c7fac73e3d36c0
| 24,208
|
def get_file_id(string):
"""
Returns file_id from a information string like Ferrybox CMEMS: <file_id>
:param string:
:return:
"""
return string.split(':')[-1].strip()
|
a5e58147e4a6e08298ecb6cd34614cff99e7bdac
| 24,209
|
def excel_cache(df, name='summary'):
"""outfile = excel_cache(tr.summary, 'summary'); ! open $outfile"""
outfile = f'{name}.xlsx'
df.to_excel(outfile, index=None)
return(outfile)
|
d54691205221f311c30eb60b025d2a05e3b826ec
| 24,210
|
def make_ticks_int(tick_list):
"""
Converts axis ticks to integers.
:param tick_list: Iterable of the axis ticks to be converted. Should be sortend in the order they shall be put on \
the axis.
:return:
"""
return [int(tick) for tick in tick_list]
|
84eeb8cd31724c5c849233586709937506cfe464
| 24,211
|
from datetime import datetime
import calendar
def month_bounds(date=None):
"""
Return month start and end datetimes
"""
if date is None:
date=datetime.now()
firstday,lastday=calendar.monthrange(date.year,date.month)
start=datetime(date.year,date.month,firstday,0,0,0)
end=datetime(date.year,date.month,lastday,23,59,59)
return (start,end)
|
f007bbea414f4ea5756c22ba9228ff74e93a5a09
| 24,212
|
def config_section(config_dict):
"""A section with test data"""
return config_dict.gps
|
fa0181bafcb19260bd95c4ad63717a557c33fb55
| 24,213
|
def get_job_result(ibs, jobid):
"""
Web call that returns the result of a job
"""
result = ibs.job_manager.jobiface.get_job_result(jobid)
return result
|
ee3481b3625c241382c0a4fc3f948dffb225945f
| 24,214
|
def mean_abs_deviation(data, axis=0):
"""
计算MAD平均绝对离差
"""
return ((data - data.mean(axis=axis)).abs()).mean(axis=axis)
|
dfeb17e4b9290d59ddf8361233453019e1061713
| 24,215
|
import pickle
def load_model(filepath="models/"):
"""Load model into variable
Args:
filepath (string): path to model (e.g. models/LinRegression.sav)
"""
loaded_model = pickle.load(open(filepath, "rb"))
return loaded_model
|
169ed225a45cd403f9b4f500c68ae2d27cce9816
| 24,216
|
from typing import List
import os
def extract_additional_words(file_name: str) -> List[str]:
"""
Extracts words from an additional text file for the purpose
of extending the lexicon to words that there is no sound data for.
:param file_name: the name of the file to extract words from.
:return: a list of words
"""
words = []
if os.path.exists(file_name):
with open(file_name, "r") as f:
print(f"Extracting additional words from {file_name}")
for line in f.readlines():
new_words = line.strip().split(" ")
words += [word for word in new_words]
else:
print(f"WARNING: Additional word list file at {file_name} does not exist, skipping!")
return words
|
6eb0805195fe4777fcaf6eb85cacb1452bf223ed
| 24,218
|
import re
def enum_calculate_value_string(enum_value):
"""
Calculate the value of the enum, even if it does not have one explicitly
defined.
This looks back for the first enum value that has a predefined value and then
applies addition until we get the right value, using C-enum semantics.
Args:
enum_value: an EnumValue node with a valid Enum parent
Example:
<enum>
<value>X</value>
<value id="5">Y</value>
<value>Z</value>
</enum>
enum_calculate_value_string(X) == '0'
enum_calculate_Value_string(Y) == '5'
enum_calculate_value_string(Z) == '6'
Returns:
String that represents the enum value as an integer literal.
"""
enum_value_siblings = list(enum_value.parent.values)
this_index = enum_value_siblings.index(enum_value)
def is_hex_string(instr):
return bool(re.match('0x[a-f0-9]+$', instr, re.IGNORECASE))
base_value = 0
base_offset = 0
emit_as_hex = False
this_id = enum_value_siblings[this_index].id
while this_index != 0 and not this_id:
this_index -= 1
base_offset += 1
this_id = enum_value_siblings[this_index].id
if this_id:
base_value = int(this_id, 0) # guess base
emit_as_hex = is_hex_string(this_id)
if emit_as_hex:
return "0x%X" %(base_value + base_offset)
else:
return "%d" %(base_value + base_offset)
|
b6df34ea221cd485f1ca730192bc1fda3fa34a98
| 24,221
|
import os
def get_video_names(containing_folder="./AlgonautsVideos268_All_30fpsmax"):
"""
Returns a list of all video names
"""
video_names = []
for file in os.listdir(containing_folder):
if file.endswith(".mp4"):
video_names.append(file)
#remove file extension
for i in range(len(video_names)):
video_names[i] = video_names[i][:-4]
return video_names
|
13d89a9f17f5600cf7bc6397010ce1a4513af555
| 24,222
|
def mean_annual_hydrograph(ds):
"""Return a graphic showing the discharge simulations and observations."""
basin_name = ds.basin_name.values[0] # selected basin name
mq_sim = ds.q_sim.groupby("time.dayofyear").mean()
g = mq_sim.hvplot.line(
x="dayofyear",
line_width=1.5,
label="Mean simulation",
ylabel="Mean streamflow (m³/s)",
xlabel="Day of year",
title=basin_name,
)
# Plot the observed streamflows if available
if hasattr(ds, "q_obs"):
mq_obs = ds.q_obs.groupby("time.dayofyear").mean()
g *= mq_obs.hvplot.line(
x="dayofyear", line_width=2, color="k", label="Mean observations"
)
return g
|
e13432f5b1d47540906e8db4ab369f51a352e777
| 24,223
|
def get_parent_column_ref(table_name, table_ref, schema, relation="child"):
"""
Get column name where the child table is being referenced
Returns column name
"""
table = schema.ovs_tables[table_name]
for column_name, reference in table.references.iteritems():
if reference.ref_table == table_ref and reference.relation == relation:
return column_name
|
fe032a61a1e347164585b6065fb701ff2373bb85
| 24,224
|
def _is_gs_offloader(proc):
"""Return whether proc is a gs_offloader process."""
cmdline = proc.cmdline()
return (len(cmdline) >= 2
and cmdline[0].endswith('python')
and cmdline[1].endswith('gs_offloader.py'))
|
d8a8bd1ec03bcdef05b683e2e7cb88b4521de0ab
| 24,225
|
import os
def getTopPathNotExist(topDirPath):
"""
function: find the top path to be created
output: tmpDir
"""
tmpDir = topDirPath
while True:
# find the top path to be created
(tmpDir, topDirName) = os.path.split(tmpDir)
if (os.path.exists(tmpDir) or topDirName == ""):
tmpDir = os.path.join(tmpDir, topDirName)
break
return tmpDir
|
af6d507e21ea0b615fb6934b4d5fa56fa474a006
| 24,227
|
from typing import List
def split_annotated_data(input_sentences: List[str], desired_output_sentences: List[str],
split_point: int=120):
"""
Splits data into training and validation part.
Task: Filter out the correct sentences from a list of input sentences.
Input: input_sentences is such a set of input sentences
Input: desired_output_sentences are those among the input sentences which
should NOT be filtered out, i.e. which survive the filtering.
Input: split_point: The index where the dataset should be split into two.
"""
train_input = input_sentences[:input_sentences.index(desired_output_sentences[split_point])]
train_output = desired_output_sentences[:split_point]
valid_input = input_sentences[input_sentences.index(desired_output_sentences[split_point]):]
valid_output = desired_output_sentences[split_point:]
return train_input, train_output, valid_input, valid_output
|
f34352984e419f3ddf09aded1bf5452d61220bc8
| 24,228
|
import os
def os_mkdir(dir_path, name):
"""
创建目录, 如果存在则不创建
最终返回目录的绝对路径
:param dir_path:
:param name:
:return: your dir path
"""
path = os.path.join(dir_path, name)
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
return path
else:
return path
|
0a0bbe06bcc350ed5c87605105d29269baccf8bf
| 24,230
|
def drop_features(df, to_drop):
"""Drop unwanted features
Parameters
----------
df : panda dataframe
to_drop : array with name of features to be dropped
Returns
-------
Original dataframe with all original features but those in to_drop
"""
return df.drop(to_drop, axis=1)
|
30d6c270a7c4ac2c63a9472362b844a11eb5c119
| 24,232
|
def RGBtoHSL(r, g, b):
"""
Function manually written to convert RGB to HSL (not used anywhere)
"""
r, g, b = r / 255, g / 255, b / 255
min_channel = min(r, g, b)
max_channel = max(r, g, b)
delta = max_channel - min_channel
hue, saturation, lightness = 0, 0, 0
# Calculating hue
if delta == 0:
hue = 0
elif max_channel == r:
hue = ((g - b) / delta) % 6
elif max_channel == g:
hue = (b - r) / delta + 2
else:
hue = (r-g) / delta + 4
hue = int(hue * 60)
if hue < 0:
hue += 360
# Calculating lightness
lightness = (max_channel + min_channel) / 2
# Calculating saturation
if delta == 0:
saturation = 0
else:
saturation = delta/(1 - abs(2*lightness - 1))
saturation = abs(int(saturation*100))
lightness = abs(int(lightness*256))
return hue, saturation, lightness
|
b0a87e6fc705a5adb20783f4dd747fb9455f1008
| 24,233
|
def alt_file_map_xxx():
"""
Sample alternate file map.
"""
return """
abc 123xxxMY NEW FILE 1
xyz 999xxxMY NEW FILE 2
"""
|
d4ef3d5f45c2bb3502e564411bfe953043f03c93
| 24,234
|
def first(collection, callback):
"""
Find the first item in collection that, when passed to callback, returns
True. Returns None if no such item is found.
"""
return next((item for item in collection if callback(item)), None)
|
ebecfb1b4264a17dc24c4aeedb8c987e9ddde680
| 24,235
|
import numpy
def _split_cmc_scores(
score_lines,
real_id_index,
probe_name_index=None,
claimed_id_index=0,
score_index=-1,
):
"""Takes the output of :py:func:`four_column` or :py:func:`five_column` and return cmc scores."""
if probe_name_index is None:
probe_name_index = real_id_index + 1
# extract positives and negatives
pos_dict = {}
neg_dict = {}
# read four column list
for line in score_lines:
which = pos_dict if line[claimed_id_index] == line[real_id_index] else neg_dict
probe_name = line[probe_name_index]
# append score
if probe_name not in which:
which[probe_name] = []
which[probe_name].append(line[score_index])
# convert to lists of tuples of ndarrays (or None)
probe_names = sorted(set(neg_dict.keys()).union(set(pos_dict.keys())))
# get all scores in the desired format
return [
(
numpy.array(neg_dict[probe_name], numpy.float64)
if probe_name in neg_dict
else None,
numpy.array(pos_dict[probe_name], numpy.float64)
if probe_name in pos_dict
else None,
)
for probe_name in probe_names
]
|
778660855ed28049692ee91460344e2a5b073092
| 24,236
|
def reverse(x):
"""
:type x: int
:rtype: int
"""
if x == 0:
return 0
flag = 0
if x<0:
x = -x
flag = 1
a_ = 0
ans = []
Max = 0xffffffff
while True:
m = divmod(x,10)
x = m[0]
ans.append(m[1])
if x == 0:
break
#print(ans)
while ans[0] == 0:
ans.remove(ans[0])
#print(ans)
b = len(ans)
c = 1
for i in ans:
a_ += i*(10**(b-c))
c+=1
if a_ > Max:
return 0
if flag:
a_ = -a_
return a_
|
166e5ebad1dc40d8d6b49b62c0e96f063d7c90fb
| 24,239
|
import re
def _MakeXMLSafe(s):
"""Escapes <, >, and & from s, and removes XML 1.0-illegal chars."""
# Note that we cannot use cgi.escape() here since it is not supported by
# Python 3.
s = s.replace('&', '&').replace('<', '<').replace('>', '>')
# Remove characters that cannot appear in an XML 1.0 document
# (http://www.w3.org/TR/REC-xml/#charsets).
#
# NOTE: if there are problems with current solution, one may move to
# XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;).
s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x80-\xff]', '', s)
# Convert non-ascii characters to entities. Note: requires python >=2.3
s = s.encode('ascii', 'xmlcharrefreplace') # u'\xce\x88' -> 'uΈ'
return s
|
d64a0c51d79f2384fd14dae8c46ef26fc49c888c
| 24,240
|
def deg_to_arcmin(angle: float) -> float:
"""
Convert degrees to arcminutes.
Args:
angle: Angle in units of degrees.
Returns:
Angle in units of arcminutes.
"""
return float(angle) * 60.
|
2c075362f4163cb70587eb9fac69ef669c337e2d
| 24,241
|
def load_to_array(name):
"""
reads into list of lists - should use AIKIF cls_datatable
"""
arr = []
with open (name, 'r') as f:
for row in f:
if row:
arr.append([r.strip('\n').strip('"') for r in row.split(',')])
return arr
|
b9e40fdbfaf99ec6b6858166ce0ad9d7cdbd7a07
| 24,242
|
def _get_datetime_beginning_of_day(dt):
"""
Truncates hours, minutes, seconds, and microseconds to zero on given datetime.
"""
return dt.replace(hour=0, minute=0, second=0, microsecond=0)
|
12ddcaed68db08740e4edc851a299aa08c23f91c
| 24,243
|
def _llvm_get_formatted_target_list(repository_ctx, targets):
"""Returns a list of formatted 'targets': a comma separated list of targets
ready to insert in a template.
Args:
repository_ctx: the repository_ctx object.
targets: a list of supported targets.
Returns:
A formatted list of targets.
"""
fmt_targets = []
for target in targets:
fmt_targets.append(' "' + target + '",')
return "\n".join(fmt_targets)
|
85b53fe4c72202c3fe7cee7a21fa0814b80598ed
| 24,244
|
def catdog(char_list):
"""Solution to exercise P-1.29.
Write a Python program that outputs all possible strings formed by using
the characters c , a , t , d , o , and g exactly once.
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
A permutation of n elements without repetition has n! possible
permutations. 'Catdog' should have 6! = 720 unique permutations. Use
recursion to swap characters and produce every possible permutation.
"""
str_container = []
n = len(char_list)
def permutations(chars, step=0):
if step == n:
str_container.append("".join(chars))
for idx in range(step, n):
c_copy = chars.copy() # Pass copy to avoid swapping same list
c_copy[idx], c_copy[step] = c_copy[step], c_copy[idx]
permutations(c_copy, step+1)
permutations(char_list)
return str_container
|
2f32b793b4657c39f5e2c599242cc79995eb18a0
| 24,246
|
def ordinal(n):
"""Translate a 0-based index into a 1-based ordinal, e.g. 0 -> 1st, 1 -> 2nd, etc.
:param int n: the index to be translated.
:return: (*str*) -- Ordinal.
"""
ord_dict = {1: "st", 2: "nd", 3: "rd"}
return str(n + 1) + ord_dict.get((n + 1) if (n + 1) < 20 else (n + 1) % 10, "th")
|
e6b8583bfb9fbb8a2b2443bc5fad233b1f7a9038
| 24,247
|
def bresenham(p0, p1):
"""
Bresenham's line algorithm is a line drawing algorithm that determines the
points of an n-dimensional raster that should be selected in order to form
a close approximation to a straight line between two points. It is commonly
used to draw line primitives in a bitmap image (e.g. on a computer screen),
as it uses only integer addition, subtraction and bit shifting, all of
which are very cheap operations in standard computer architectures.
Args:
p0 (np.array): Starting point (x, y)
p1 (np.array): End point (x, y)
Returns:
A list of (x, y) intermediate points from p0 to p1.
"""
x0, y0 = p0
x1, y1 = p1
dx = abs(x1 - x0)
dy = abs(y1 - y0)
sx = 1.0 if x0 < x1 else -1.0
sy = 1.0 if y0 < y1 else -1.0
err = dx - dy
line = []
while True:
line.append([x0, y0])
if x0 == x1 and y0 == y1:
return line
e2 = 2 * err
if e2 > -dy:
# overshot in the y direction
err = err - dy
x0 = x0 + sx
if e2 < dx:
# overshot in the x direction
err = err + dx
y0 = y0 + sy
|
8948059454213c35b11917ac02a846d1d22e26e3
| 24,248
|
import re
def extract_start_page(string):
"""Extracts the start page from the information extracted from Zoobank.
This is a very similar function to extract_end_page().
Args:
string (str): String being passed by the Item Pipeline
Returns:
str: String with the available start page
"""
# This finds the ':' on the provided string, which is the bibliographic info
# only, and start retrieving from there. This often works because page ran-
# ges are provided after volume(issue), with a preceding ':' and a space.
page_range = string[string.find(':'):].strip()
# This regex recipe attempts to find any separator in the remaining string
sep = re.search('[\-|\–]', page_range)
# If there are the ':', and the separator actually found something, then
# returns only the first of the two numbers available.
if ':' in string and sep != None:
return page_range[1:sep.start()].strip()
# If there are the ':' but no separator, return the entire number.
elif ':' in string:
return string[string.find(':'):].strip()
# If none of these were present, then return an empty string
else:
return ''
|
22ddac2a7b50de86fae3fb974b6c1318d5b632d7
| 24,250
|
def c_git_commit_handler(value, **kwargs):
"""
Return a git VCS URL from a package commit.
"""
return {f'vcs_url': f'git+http://git.alpinelinux.org/aports/commit/?id={value}'}
|
e9036241be3816a0296caf895a9b323cb297f35a
| 24,251
|
import requests
from bs4 import BeautifulSoup
def get_movies_list(url):
"""
This function will help us to get the list of movies that are present in the given url
This function takes an input url, and get the list of all movies present in the url.
It will return the movies with its corresponding rating and links, so that we can
get our review.
Return Type : Dictionary
because we have make seperate link and rating for each movie, so that we don't get confuse while watching the data.
If we use list instead of dict, we won't understand what is there in the data.
"""
# sending request to access the particular url
response = requests.get(url)
soup = BeautifulSoup(response.content, 'lxml')
content = soup.find_all('tbody', class_ = "lister-list")
# We have got our movie names using list comprehension
movies_names = [content[0].find_all('tr')[i].find('td', class_ = "titleColumn").a.text for i in range(len(content[0].find_all('tr')))]
# here we have not use list comprehension because there are some movies which don't have their ratings
rating = []
for i in range(len(content[0].find_all('tr'))):
try:
rating.append(content[0].find_all('tr')[i].find('td', class_ = "ratingColumn imdbRating").strong.text)
except:
# Here, we mark that rating will be empty if no rating is present, later while performing any task,
# we will fill this value by proper techniques
rating.append(" ")
# Links for each movie
links = [content[0].find_all('tr')[i].find('td', class_ = "titleColumn").a['href'] for i in range(len(content[0].find_all('tr')))]
# here we have created movies dictonary in which all the data of each movie is present.
movies = {}
for i in range(len(content[0].find_all('tr'))):
if movies.get(movies_names[i]) is None:
movies[movies_names[i]] = {}
link = "https://www.imdb.com" + links[i]
movies[movies_names[i]] = (rating[i], link)
else:
link = "https://www.imdb.com" + links[i]
movies[movies_names[i]] = (rating[i], link)
return movies
|
3b25e8eea55566cd66c6e750828d0a9a9707f7e0
| 24,252
|
def psi_ising(x_1,x_2,alpha):
""" Ising potential function
:param float x_1: first argument of the potential, eventually ndarray.
:param float x_2: second argument, eventually ndarray of the same size that x_1.
:param float alpha: granularity parameter
:returns: **res** *(ndarray)* - output of the potential, eventually ndarray.
"""
res = alpha * (1.-2.*(x_2==x_1))
return res
|
071d6256f400b836c392eef739e60a9e1eb4cbbe
| 24,253
|
def map_obj_to_string(obj):
"""
Function to create a string version from an object.
Parameters
----------
obj
Some python object
Returns
-------
str_version
Examples
--------
>>> map_obj_to_string(1)
"1"
>>> map_obj_to_string("1")
"1"
>>> map_obj_to_string([1,"2"])
["1", "2"]
>>> map_obj_to_string(("1", 2))
("1", "2")
>>> map_obj_to_string({"a" : 1, "b" : "2"})
{"a" : "1", "b" : "2"}
"""
if isinstance(obj, list):
rv = list(map_obj_to_string(i) for i in obj)
elif isinstance(obj, tuple):
rv = tuple(map_obj_to_string(i) for i in obj)
elif isinstance(obj, dict):
rv = dict((k,map_obj_to_string(v)) for k, v in obj.items())
else:
rv = str(obj)
return rv
|
913e2c99021976a745fcf5ede5c224060defabc1
| 24,256
|
def check_tile(board, row, col, size):
""" Check if a queen could be placed at board[x][y]
Arguments:
board{array[int]} -- [ matrix of size nxn]
row {[int]} -- [row position of tile]
col {[int]} -- [col position of tile]
size {[int]} -- [size of the board nxn]
Returns:
[type] -- [description]
"""
#check on the left side
for y in range(col):
if board[row][y] == 1:
return False
#check left upper diagonal
x, y = row, col
while x >= 0 and y >= 0:
if board[x][y] == 1:
return False
x -= 1
y -= 1
#check lower diagonal on left side
x, y = row, col
while x < size and y >= 0:
if board[x][y] == 1:
return False
x += 1
y -= 1
return True
|
8768313366c2b0434e969be129b4db563bf6516a
| 24,257
|
def find_gist_by_id(github, _id):
"""Find a gist by _id"""
dest = None
for gist in github.iter_gists():
if _id == gist.id or _id == gist.html_url:
dest = gist
break
if dest is None:
raise SystemExit('The gist ID or URL is not found, is it right?')
return dest
|
bbef60282a2818f866e8779f0dcffed9d414aced
| 24,258
|
def clear_grid(ax, pos=["x","y"]):
"""Clear a grid
Args:
ax (Axes) : The ``Axes`` instance.
pos (list) : Positions to clean a grid
Examples:
>>> from pyutils.matplotlib import clear_grid, FigAxes_create
>>> fig,ax = FigAxes_create(nplots=1)[0]
>>> ax = clear_grid(ax=ax, pos=["x", "y"])
>>> ax = clear_grid(ax=ax, pos=list("ltrb"))
"""
if isinstance(pos, str):
pos = [pos]
for p in pos:
if p in ["x", "b", "bottom"]:
ax.tick_params(labelbottom=False, bottom=False)
ax.set_xticklabels([])
elif p in ["y", "l", "left"]:
ax.tick_params(labelleft=False, left=False)
ax.set_yticklabels([])
elif p in ["r", "right"]:
ax.tick_params(labelright=False, right=False)
elif p in ["t", "top"]:
ax.tick_params(labeltop=False, top=False)
elif p == "all":
ax.set_axis_off()
return ax
|
1f5a148f2e8c885735aaef0c676355299f1a21a4
| 24,261
|
def queenCanTattack(board, size, row, column):
"""Check if the new queen will not be able to attack an other one.
Args:
board (array): board on which the queen will be
size (int): size of the board
row (int): row position on the board
column (int): column position on the board
Returns:
[boolean]: True, if unable to attack
"""
can_t_attack = True
# check cardinals
for idx_row in range(size):
if idx_row != row and board[idx_row][column] == 1:
return not can_t_attack
for idx_column in range(size):
if idx_column != column and board[row][idx_column] == 1:
return not can_t_attack
# check diagonals
for idx_row, idx_column in zip(range(row - 1, -1, -1),
range(column + 1, size)):
if board[idx_row][idx_column] == 1:
return not can_t_attack
for idx_row, idx_column in zip(range(row + 1, size),
range(column + 1, size)):
if board[idx_row][idx_column] == 1:
return not can_t_attack
for idx_row, idx_column in zip(range(row - 1, -1, -1),
range(column - 1, -1, -1)):
if board[idx_row][idx_column] == 1:
return not can_t_attack
for idx_row, idx_column in zip(range(row + 1, size),
range(column - 1, -1, -1)):
if board[idx_row][idx_column] == 1:
return not can_t_attack
return can_t_attack
|
b06c60b797c928caf1fed405f820e8b45a2a61f0
| 24,262
|
import csv
def get_writer(file_, delimiter=',', quotechar='"'):
""" Return a writer object """
return csv.writer(
file_,
delimiter=delimiter,
quotechar=quotechar,
quoting=csv.QUOTE_MINIMAL
)
|
446aed257684fbf7a4436e8577ee6d56f55a9607
| 24,263
|
def xor(bytes_1: bytes, bytes_2: bytes) -> bytes:
"""
Return the exclusive-or of two 32-byte strings.
"""
return bytes(a ^ b for a, b in zip(bytes_1, bytes_2))
|
1f9eb00848ab963536c01d0c0321321f0c3be887
| 24,265
|
def residual_variance(y_true, y_pred):
"""
Manual calculation of residual variance. It is obtained calculating the square error for every entry of every point,
summarizing all of them and then dividing the sum for n-2, where n is the length of the array.
Please refer to this specification: https://www.coursera.org/lecture/regression-models/residual-variance-WMAET
:param y_true: ground truth
:param y_pred: the estimated y
:return: float, residual variance of our model
"""
sum = 0
for idx, value in enumerate(y_true):
sum += (value - y_pred[idx]) ** 2
return float(sum / len(y_true))
|
013f2b0d220ce3bd4e26c506302d4a843f8fdd95
| 24,266
|
from typing import Callable
from typing import Any
from typing import Sequence
from typing import Tuple
from typing import Optional
def _find_last(condition: Callable[[Any], bool], sequence: Sequence) -> Tuple[Optional[int], Any]:
"""条件に合う要素をシーケンスの末尾から順に探す。
複数存在する場合、もっとも index が大きいものが返される。
Args:
condition: 条件
sequence: 要素を探すシーケンス
Returns:
要素が見つかればその index と 要素。見つからなければ None と None。
"""
target_list = tuple(sequence)
for i in reversed(range(len(target_list))):
if condition(target_list[i]):
return i, target_list[i]
return None, None
|
362575e13e1f0a64560b4c29cf4e97fbff8a248b
| 24,267
|
import subprocess
def get_modified_files(commit="HEAD^"):
"""
Return all files that changed since `commit`.
"""
return [
file
for file in (
subprocess.check_output(
["git", "diff", "HEAD", commit, "--name-only"],
stderr=subprocess.DEVNULL,
)
.decode()
.split("\n")
)
if len(file)
]
|
5c68efafab8d09ba3b42d58e7896b978cff74efb
| 24,268
|
def _defaultHeaders(token):
"""Default headers for GET or POST requests.
:param token: token string."""
return {'Accept': '*/*',
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token}
|
33125604bcb3c7af3d71ae2c7c21f0dd82d141ac
| 24,270
|
def first(seq, key=lambda x: bool(x), default=None, apply=lambda x: x):
"""Give the first value that satisfies the key test.
Args:
seq (iterable):
key (callable): test for each element of iterable
default: returned when all elements fail test
apply (callable): applied to element before return, but not to default value
Returns: first element in seq that passes key, mutated with optional apply
Examples:
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional `key` argument specifies a one-argument predicate function
like that used for `filter()`. The `key` argument, if supplied, must be
in keyword form. For example:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
"""
return next((apply(x) for x in seq if key(x)), default() if callable(default) else default)
|
635fb2a216e815c7b458fad46f7d1b34f847e42f
| 24,271
|
def min_(a, b, operator):
""":yaql:min
Returns min from a and b.
:signature: min(a, b)
:arg a: input value
:argType a: number
:arg b: input value
:argType b: number
:returnType: number
.. code::
yaql> min(8, 2)
2
"""
if operator(b, a):
return a
return b
|
26561ebf2e91774f5921a19b3d43625d215edcb3
| 24,272
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.