content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def safe_template_context(ticket):
"""
Return a dictionary that can be used as a template context to render
comments and other details with ticket or queue parameters. Note that
we don't just provide the Ticket & Queue objects to the template as
they could reveal confidential information. Just imagine these two options:
* {{ ticket.queue.email_box_password }}
* {{ ticket.assigned_to.password }}
Ouch!
The downside to this is that if we make changes to the model, we will also
have to update this code. Perhaps we can find a better way in the future.
"""
context = {
'queue': {},
'ticket': {},
}
queue = ticket.queue
for field in ( 'title', 'slug', 'email_address', 'from_address', 'locale'):
attr = getattr(queue, field, None)
if callable(attr):
context['queue'][field] = attr()
else:
context['queue'][field] = attr
for field in ( 'title', 'created', 'modified', 'submitter_email',
'status', 'get_status_display', 'on_hold', 'description',
'resolution', 'priority', 'get_priority_display',
'last_escalation', 'ticket', 'ticket_for_url',
'get_status', 'ticket_url', 'staff_url', '_get_assigned_to'
):
attr = getattr(ticket, field, None)
if callable(attr):
context['ticket'][field] = '%s' % attr()
else:
context['ticket'][field] = attr
context['ticket']['queue'] = context['queue']
context['ticket']['assigned_to'] = context['ticket']['_get_assigned_to']
return context | ebc907bb69f458f6aa91608043e7696488ddce54 | 34,796 |
def coords_dict_to_coords_string(coords):
"""
Given a dict of long/lat values, return a string,
rounding to 2 decimal places.
"""
longitude, latitude = None, None
for k,v in coords.items():
if "at" in k:
latitude = v
if "ong" in k:
longitude = v
if not longitude and latitude:
print("Unable to identify longitude and latitude keys")
return ""
coords_string = "{:.2f}_{:.2f}".format(longitude, latitude)
return coords_string | 5b85c3bd3908f7fc5f925810efc10e231b0b143a | 34,798 |
def get_inheritance_models(variant, family_id, inheritance_keyword):
"""Return the genetic models found for this family in this variant"""
models_found = set([])
family_models = variant['info_dict'].get(inheritance_keyword, None)
if family_models:
#This is a string on the form 'fam_1:AR_hom,fam_2:AR_hom|AR_hom_dn
for family_info in family_models:
splitted_family = family_info.split(':')
if splitted_family[0] == family_id:
models_found = set(splitted_family[1].split('|'))
return models_found | e039a205008174b4a88de2859d4c7c00195164de | 34,799 |
import random
def set_rnd(cookie_string, prefix='', rconn=None):
"""Sets a random number against the cookie, return the random number on success,
None on failure"""
if rconn is None:
return
if not cookie_string:
return
cookiekey = prefix+cookie_string
rnd = random.randint(10000000, 99999999)
# set a random_number
try:
if not rconn.exists(cookiekey):
return
# set the random number into the database
rconn.lset(cookiekey, 1, str(rnd))
except:
return
return rnd | 1c3ed14476e795f7d6fa5072de4c75f2de2f1e1f | 34,801 |
import math
def largest_prime_factor_square_optimized(number):
"""
Every number n can at most have one prime factor greater than n.
If we, after dividing out some prime factor, calculate the square root of the remaining number
we can use that square root as upper limit for factor.
If factor exceeds this square root we know the remaining number is prime.
"""
factors = []
factor = 2
if number % factor == 0:
number = number // factor
factors.append(factor)
while number % factor == 0:
number = number // factor
factor = 3
max_factor = math.sqrt(number)
while number > 1 and factor <= max_factor:
if number % factor == 0:
factors.append(factor)
number = number // factor
while number % factor == 0:
number = number // factor
max_factor = math.sqrt(number)
factor += 2
return factors | 5d55de7e9eca6c5e1a8e1a097db0390978496471 | 34,802 |
import struct
import socket
def ip2num(ip):
"""
>>> ip2num('1.1.1.1')
16843009
"""
return struct.unpack('>I', socket.inet_aton(ip))[0] | 86ba7891ffd27c7b4100faefbe7b0878f2a4d3f5 | 34,803 |
def modified_ylim(ylim, mag, ypos):
"""
Return a special ylim (tuple) taking account mag and ypos.
You're not required to understand this formula. Just obtained a linear
function which conforms to the relation between 'y' and 'x'.
"""
width = (ylim[1] - ylim[0]) / mag
new_ylim_0 = -(width / 2) * (ypos + 1)
return (new_ylim_0, new_ylim_0 + width) | f2d04a7fc4a47d803f695f6a97614e6106c63456 | 34,805 |
from math import sqrt
def circle (radio1, radio2, radio3):
"""
The Kiss Precise
"""
s1 = 1.0/radio1
s2 = 1.0/radio2
s3 = 1.0/radio3
a = s1 + s2 + s3
b = s1*s1+s2*s2+s3*s3
s41 = a + sqrt(2*a*a - 2*b)
s42 = a - sqrt(2*a*a - 2*b)
radio41 = 1.0/ s41
radio42 = 1.0/ s42
return radio41, abs(radio42) | 6c473ee4a477bad92d46393ec83c289a3792f616 | 34,806 |
def get_colour(image, p):
"""
Returns a char with the colour of given point
"""
# print('\n\n', image[p[0]+3, p[1]+3])
return '' | 02278dd312c282efa49caa5eade3caa03e48371e | 34,807 |
from unittest.mock import Mock
def get_response_mock(url):
"""Get a mock representing a response to a request.
:param url: response URL
:returns: an instance of mock representing a response
"""
response = Mock()
response.url = url
return response | 4af8b016fc1c1f227b83ab33edff248eef833e61 | 34,808 |
import torch
def draw_samples_from(gen, device, N=128, rescale=False):
"""
Draws samples from the generator network.
If normalize is True, image pixels are rescaled to [0, 255].
"""
gen.eval()
with torch.no_grad():
noise = torch.randn(N, gen.z_dim, 1, 1, device=device)
image = gen(noise)
if rescale:
image += 1.0
image /= 2.0
image *= 255.
image = torch.clamp(image, 0., 255.).byte().cpu()
return image | 07914208f99ffae22d6029ffca25edc76dc96ed4 | 34,810 |
def hexToRgb(hex):
"""
Converts hex colour codes eg. #FFF or #00FF0F to rgb array
Args:
hex (string): colour code # followed by 3 or 6 hexadecimal digits
Returns:
Array [r, g, b] each in the range of 0 - 255 inclusive
"""
# strip '#'
if hex[0] == "#":
hex = hex[1:]
if len(hex) == 3:
# Expand shorthand form (e.g. "03F") to full form (e.g. "0033FF")
return [int(hex[i] * 2, 16) for i in (0, 1, 2)]
return [int(hex[i : i + 2], 16) for i in (0, 2, 4)] | dbcbde5feda73b6c9a03f0758aa06be14731e86f | 34,811 |
import numpy
def getWeights(K):
"""
It gets the weights of the Gauss points.
Arguments
----------
K: int, number of Gauss points per element. (1, 3, 4, and 7 are supported)
Returns
--------
w: K-size array, weights of the Gauss points.
"""
# yapf: disable
w = numpy.zeros(K)
if K==1:
w[0] = 1
if K==3:
w[0] = 1/3.
w[1] = 1/3.
w[2] = 1/3.
if K==4:
w[0] = -27./48
w[1] = 25./48
w[2] = 25./48
w[3] = 25./48
if K==7:
w[0] = 0.225
w[1] = 0.125939180544827
w[2] = 0.125939180544827
w[3] = 0.125939180544827
w[4] = 0.132394152788506
w[5] = 0.132394152788506
w[6] = 0.132394152788506
return w
# yapf: enable | 595f39fff20ca0af307493d891987908a06385eb | 34,812 |
import re
def regex_prettifier(scraped_data, regex):
"""Prettify the scraped data using a regular expression
Positional Arguments:
scraped_data (list): data scraped from a website
regex (str): a regular expression
Return:
list: the regex modified data
"""
data_list = []
for data in scraped_data:
data_list.append(re.sub(regex, '', data))
return data_list | 5eb42d0df2a0f93dbc14ec5fb5dd68bc6fe127ca | 34,814 |
def image_mode(image):
"""
Show mode of image is 'RGB' or 'RGBA'
image: an Image object
return: mode
"""
mode = image.mode
return mode | 4295d60664b4555fd4990105545dccb0a7d0f766 | 34,815 |
import numpy
def _check_list_of_string(obj, objName):
"""
Check that object is a list of strings
Parameters
----------
obj: str or list
Object to check
objName: str
Name of the object
Returns
-------
obj: list
List of strings
"""
if obj is not None:
obj = numpy.asarray(obj, dtype="str")
if obj.ndim == 0:
obj = obj.reshape(1)
elif obj.ndim > 1:
raise TypeError("Invalid `{}`".format(objName))
return obj | bb3ce3e670d8ff8c4cdd7d90ab63058e3664c013 | 34,816 |
def alias(*aliases):
"""Decorator to add aliases for Cmdln.do_* command handlers.
Example:
class MyShell(cmdln.Cmdln):
@cmdln.alias("!", "sh")
def do_shell(self, argv):
#...implement 'shell' command
"""
def decorate(f):
if not hasattr(f, "aliases"):
f.aliases = []
f.aliases += aliases
return f
return decorate | ea94335cfeb1f1e4f02a67df39c834041df41fcf | 34,817 |
def get_linear(from_interval, to_interval):
""" Get linear transformation that maps one interval to another
Parameters
----------
from_interval : ndarray, tuple or list
sequence of len=2 (llim, ulim) that defines the domain-interval
to_interval : ndarray, tuple or list
sequence of len=2 that defines the image-interval
Returns
-------
function
linear transformation
"""
# compute coeffs of the mapping
llim, ulim = from_interval
new_llim, new_ulim = to_interval
slope = (new_ulim - new_llim) / (ulim - llim)
intercept = new_llim - slope * llim
# define the map
def linear(x):
""" Transformation
"""
return slope * x + intercept
return linear | f9ffc4a9d76b9e177d86f730ea7cdabbc18a3b9e | 34,819 |
from typing import List
def const_evaluate(population: List, value) -> List:
"""An evaluator that assigns a constant fitness to every individual.
This ignores the `Problem` associated with each individual for the
purpose of assigning a constant fitness.
This is useful for algorithms that need to assign an arbitrary initial
fitness value before using their normal evaluation method. Some forms of
cooperative coevolution are an example.
"""
for ind in population:
ind.fitness = value
return population | 110e054a0eef875d58135c814ae99c17fd960ee7 | 34,820 |
import codecs
def is_known_encoding(encoding: str) -> bool:
"""
Return `True` if `encoding` is a known codec.
"""
try:
codecs.lookup(encoding)
except LookupError:
return False
return True | 2914728aa14ec295fa647051141ed897875f153c | 34,821 |
from typing import List
def get_column_names_types(table: List[List[str]]) -> List[List[str]]:
"""
Given a table in list of lists representation, output the lists for
column names and column types.
:param table: markdown table representation as list of lists (rows)
:return: list of lists representaion of data in provided table
"""
# Check if types are provided?
column_names, types = table[:2]
return [column_names, types] | 38230484b0fc32702b8eab48c2b56df1db29ee0a | 34,822 |
import argparse
def create_parser():
"""Create an argument parser."""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='[Flow] Generates capacity diagrams for the bottleneck.',
epilog="python capacity_diagram_generator.py </path/to/file>.csv")
parser.add_argument('file', type=str, help='path to the csv file.')
return parser | 2422eb7e5b9e4ab460bfa192f9d491fc4d69218d | 34,824 |
import os
def _GetInputByPath(path, sources):
"""Get the source identified by a path.
Args:
path: str, A path to a file that identifies a source.
sources: An iterable collection of source objects.
Returns:
The source from sources identified by path, if found. Converts to
absolute paths for comparison.
"""
for js_source in sources:
# Convert both to absolute paths for comparison.
if os.path.abspath(path) == os.path.abspath(js_source.GetPath()):
return js_source | 9b481a608d6c6c64f87d462c48dbf4609844ecdd | 34,825 |
import os
def get_logger_name_for_module(module, exclude_module_name=False):
"""
Retrieve fully qualified logger name for current module (e.g. st2common.cmd.sensormanager).
:type: ``str``
"""
module_file = module.__file__
base_dir = os.path.dirname(os.path.abspath(module_file))
module_name = os.path.basename(module_file)
module_name = module_name.replace('.pyc', '').replace('.py', '')
split = base_dir.split(os.path.sep)
split = [component for component in split if component]
# Find first component which starts with st2 and use that as a starting point
start_index = 0
for index, component in enumerate(reversed(split)):
if component.startswith('st2'):
start_index = ((len(split) - 1) - index)
break
split = split[start_index:]
if exclude_module_name:
name = '.'.join(split)
else:
name = '.'.join(split) + '.' + module_name
return name | 278fa3fc59ad1c98c3fdf7fec1f0eeafcb4b58d1 | 34,827 |
import re
def maxxmatches(regex, text, x):
"""Returns the substrings of length x from text, matching regex."""
reg = re.compile(regex)
result = reg.findall(text)
return list(filter(lambda string: len(string) <= x, result)) | d361a828e941465f138188051a431eb3a81cc5a2 | 34,828 |
import os
import pickle
def annots_from_node(annot_dir, node_id):
"""
Fetch annots from a node id.
"""
graph_path = os.path.join(annot_dir, node_id[0].replace('.nx', '_annot.p'))
return pickle.load(open(graph_path, 'rb')) | e9c4dbd3c81601cd3551ad6450a683662e047cbe | 34,831 |
import torch
def boolean_mask(img, color):
"""
Returns a Boolean mask on a image, based on the presence of a color.
Arguments:
img {torch.Tensor} -- image tensor [shape = (..., 3)]
color {torch.Tensor} -- RGB color tensor [shape = (3, )]
Returns:
torch.BoolTensor -- boolean mask of image [shape = (..., )]
"""
dim = len(img.shape) - 1
return torch.all(img == color.view(*([1] * dim), 3), dim=dim) | 39d7a75ac1a47574ebb333b1247813761986e636 | 34,832 |
import io
def _read_file(file_):
"""Reads a file, returns the stripped contents."""
with io.open(file_, "r", encoding="utf-8") as openfile:
return openfile.read().strip() | 66d0dcf2454ea259ea46cb5295edd7c7b4ffec77 | 34,834 |
def clip(value_before_switch, value_after_switch, t_switch, t):
"""
logical function of time. Changes value at threshold time t_switch.
"""
if t <= t_switch:
return value_before_switch
else:
return value_after_switch | 103a5aede1c1d0589e0acfc9ef058e011813f789 | 34,835 |
from functools import reduce
def join_bits(byteseq) -> int:
"""
Given a sequence of 0/1 or True/False bits altogether representing a
single byte, joins said bits into an int of the same magnitude
>>> join_bits([1, 1, 0, 1])
13
"""
return reduce(lambda acc, bit: (acc << 1) | int(bit), byteseq) | 6cb925c4d5acc99e656802738565a957471af62f | 34,836 |
def metersToInches(meters):
"""Convert meters to inches."""
return meters * 39.3701 | 27061202cb72e5a98be6230e491bef148f6dbd13 | 34,838 |
import json
def check_results(test_results):
"""
Checks the AWS run results and returns non-zero if there were failures (not "errored")
"""
# {
# "run" : {
# ...
# "counters": {
# "total": 645,
# "passed": 352,
# "failed": 0,
# "warned": 0,
# "errored": 293,
# "stopped": 0,
# "skipped": 0
# }
# }
# }
with open(test_results) as f:
results = json.load(f)
try:
counters = results["run"]["counters"]
print(counters)
failure_count = counters["failed"]
if failure_count != 0:
print("Detected", failure_count, "test failures :(")
return 1
except:
pass
return 0 | 724595ab180e0b04c238cd6a8f7a99b7547429c5 | 34,839 |
def isInteger(n, epsilon=1e-6):
"""
Returns True if n is integer within error epsilon
"""
return (n - int(n)) < epsilon | 8ef0960cffadc063317830dca77d1177569ad178 | 34,840 |
def maximalSquare(matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
m, n, side = len(matrix), len(matrix[0]), 0
# INICIALIZAMOS OTRA MATRIZ(dp) CON LAS DIMENSIONES DE LA ORIGINAL
dp = [[0]*(n+1) for row in range(m+1)]
for i in range(m):
for j in range(n):
if matrix[i][j] == '1':
# A partir del índice (0,0), por cada 1 encontrado en la matriz original, actualizamos el valor del elemento actual
dp[i+1][j+1] = min(dp[i][j+1], dp[i+1][j], dp[i][j]) + 1
if side < dp[i+1][j+1]:
side = dp[i+1][j+1]
return side*side | 900048693299dbe1d54be08fd468e3eb30b0c81a | 34,841 |
def add_pkg_to_pkgs(pkg, pkgs):
"""Add package to dictionary of packages.
"""
name = pkg["Source"]
version = pkg["Version"]
pkgs[name][version] = pkg
return pkgs | 296cfd39f56858c548171cddf464435a9832ae74 | 34,842 |
def actual_bilinear_interp(field,x0,y0,x,y,len_x,len_y,x_index,y_index):
"""!This is a numba accelerated bilinear interpolation. The @numba.jit decorator just above this function causes it to be compiled just before it is run. This introduces a small, Order(1 second), overhead the first time, but not on subsequent calls.
"""
field_interp = ((field[y_index-1,x_index-1]*(x[x_index] - x0)*(y[y_index] - y0) +
field[y_index-1,x_index]*(x0 - x[x_index-1])*(y[y_index] - y0) +
field[y_index,x_index]*(x[x_index] - x0)*(y0 - y[y_index-1]) +
field[y_index,x_index]*(x0 - x[x_index-1])*(y0 - y[y_index-1]))/
((y[y_index] - y[y_index-1])*(x[x_index] - x[x_index-1])))
return field_interp | d34e6eed0e508a0d938f137f9181e5b25b6ff00f | 34,843 |
def bitboard(state):
"""This function takes a python-chess board of type chess.Board
and returns a 768 (8 rows x 8 columns x 6 piece types x 2 colors)
entry long 1-D numpy array that represents the positions, class, and color
of each piece on the board. The returned string is just concatenations of
64 entry long vectors. Each vector counts the whole board like [a1, b1, ...
h1, a2, b2, ..., h2, ... a8, b8, ..., h8].
The order of pieces is as follows: White pawn, knight,
bishop, rook, queen, king. Then, black pawn, knight,
bishop, rook, queen, king."""
bitboard = []
for k in range(2):
for i in range(6):
piece = list(state.pieces(i+1, 1-k))
piece_bits = [0] * 64
for pos in piece:
piece_bits[pos] = 1
bitboard.extend(piece_bits)
return bitboard | 7a1b809e436320133b4e914f8770d7842ca83710 | 34,845 |
def getNodesByName(parent, name):
"""
Return a list of all of the child nodes matching a given local name
"""
childNodes = parent.xpath("*[local-name() = '%s']" % name)
return childNodes | f171c4642b3a129c5ccc092f26ee6d402451873f | 34,846 |
def degree(G):
""" Auxiliary function to calculate the degree of each element of G. """
return G.degree() | f1fd4f0903c7d03f7ae406c567620a2289d256a8 | 34,848 |
def optimize_distance_with_mistake(distance: float, mistake: float) -> float:
""" Using mistake to optimize the walk during runtime
Using mistake to shorten or lengthen the walk, but never more then a single hop
"""
distance_diff = (min(mistake, 1) - 0.5) / 0.5
return distance + distance_diff | 0062a2263764dbaa1b5ee11740be0ea727a85a0f | 34,849 |
import random
def random_bytes(n):
"""Return a random bytes object of length n."""
return bytes(random.getrandbits(8) for i in range(n)) | 61af296f8d2272b5100988b345942f21dc78603f | 34,850 |
def get_serie_group(serie_change_bool):
"""
from boolean serie, make cumulative sum returning serie int
true, false, false, true, false
1, 1, 1, 2, 2
"""
return serie_change_bool.cumsum() | 038195657fd33eb9a626344b335c7faefaa48a50 | 34,851 |
import subprocess
import sys
def _run_subprocess_split(command):
"""Run command in subprocess and return the splited output.
Returns:
str: Splited output from command execution.
"""
output = subprocess.check_output(command, shell=False)
if sys.version_info >= (3, 0):
output = str(output, 'utf-8').strip()
else:
output = unicode(output, 'utf-8').strip() # noqa, pylint: disable=undefined-variable
return output | 21276487122f60f1c6f53b8ecab309a2788097d8 | 34,853 |
import sys
import os
def read_pipe_input():
"""return pipe inputs"""
#line=fileinput.input('-'))# blok if nothing is provided by the pipe
line=sys.stdin.readline()#blok if nothing is provided by the pipe
return line.rstrip(os.linesep) | b106146f807931309f276ab373ca6d68ac4e54c0 | 34,854 |
def parse_bool(section, optionname):
"""
Parses a string option as bool. Possible options are "True"/"False",
"yes"/"no", "1"/"0".
"""
string = section.dict[optionname]
if string.lower() == "true" or string.lower() == "yes":
return True
elif string.lower() == "false" or string.lower() == "no":
return False
elif string.isdigit():
return bool(int(string))
else:
raise ValueError("Option " + optionname + " in section " + section.name
+ " is not a valid boolean!") | a16c7eb9169c04bc6cf03309c0b7d7dbfbdd511c | 34,855 |
import pathlib
import os
def search_req_file_until_root() -> str:
"""Attempts to locate a requirements.txt file in the current directory
and all parent directories until root. Returns `requirements.txt` as a
fallback if no file could be found"""
req_file = pathlib.Path("requirements.txt")
if not req_file.is_file():
# Iterate through all parent dirs until no more parents exist (root)
for cur_dir in pathlib.Path(os.getcwd()).parents:
cur_path = pathlib.Path(cur_dir, req_file)
if cur_path.is_file():
return str(cur_path)
return str(req_file) | 3db5ff1b678b7d6a3a28b6d521dadc8186f920be | 34,857 |
def normPts(pts, shape):
"""
normalize pts to [-1, 1]
:param pts:
tensor (y, x)
:param shape:
tensor shape (y, x)
:return:
"""
pts = pts/shape*2 - 1
return pts | b074d679591a57eff40c06c2429e9b74eea8cd1b | 34,859 |
import json
def get_image2anno(json_path):
"""
从json里解析出{img_index_in_images:[anno1,anno2],img2:[anno1,anno2],...]
可以直接从images字段里解析出对应图片的信息
Returns:
"""
with open(json_path, 'r') as f:
jf = json.load(f)
imid2img_index = {}
img_index2annos = {}
for index, img in enumerate(jf['images']):
imid2img_index[img['id']] = index
for anno in jf['annotations']:
img_index = imid2img_index[anno['image_id']]
if img_index not in img_index2annos:
img_index2annos[img_index] = []
img_index2annos[img_index].append(anno)
return img_index2annos | 105456a24f7e6e991e20520f43419c79e9182861 | 34,862 |
def _create_image_path(image_path, image_id):
"""Generates path to a specific image.
Args:
image_path: String with path to the folder containing training images.
image_id: String representing name of the file.
Returns:
String with path to the specific image.
"""
return image_path + image_id | 2d122666d4dbf1a8efae210dd022bf25af78df87 | 34,863 |
def suggest_tags_comments_column_name(df):
""" Checks if the column names 'comments' and 'tags' are not used in the data frame. If not used
suggests them as names for comments and tags column. Suggests empty strings otherwise.
Args:
df (DataFrame): DataFrame of tuple pairs
Returns:
[tags_col, comments_col] (str, str): Suggestions for Tags column name and Comments column name.
Raises:
"""
comments_col = ""
tags_col = ""
if "comments" not in df.columns:
comments_col = "comments"
if "tags" not in df.columns:
tags_col = "tags"
return [tags_col, comments_col] | 250e8e1ea9107f084104fbb7b29b7a76195a8e94 | 34,864 |
def set_type(values, new_type):
"""Convert string values to integers or floats if applicable. Otherwise, return strings.
If the string value has zero length, none is returned
Args:
values: A list of values
new_type: The type to coerce values to
Returns:
The input list of values modified to match their type. String is the default return value. If the values are
ints or floats, returns the list formatted as a list of ints or floats. Empty values will be replaced with none.
"""
if new_type == str:
coerced_values = [str(x) for x in values]
elif new_type == int or new_type == float:
float_values = [float(x) for x in values]
if new_type == int:
coerced_values = [int(round(x)) for x in float_values]
else:
coerced_values = float_values
else:
raise ValueError("{} not supported for coercing types".format(new_type.__name__))
return coerced_values | a1aa1cc74800a1add464e8ac124e0873a455f59a | 34,866 |
def verbose_print(verbose: bool):
"""
Verbose printing
Parameters
----------
verbose
Returns
-------
"""
return print if verbose else lambda *a, **k: None | d23f68e28357076cadb0b371a93316f9b33692fb | 34,867 |
import json
import requests
def get_server_version(conn):
"""
:type conn: KairosDBConnection
:param conn: a connection object
:rtype: string
:return: String containing the version of the KairosDB server.
"""
version_path = "api/v1/version"
return json.loads(requests.get("{0.schema}://{0.server}:{0.port}/{1}".format(conn, version_path)).content)['version'] | cdff75b423321ced462d70dcdc87b5c5324d2cec | 34,868 |
def retrieve_terminal_exon(tcons, oxford, flair, talon):
"""
Places terminal exon positions of transcripts into dictionary by
matching transcript id.
:param tcons: tcons_XXXX (key) : [[transcript_id 1 ],
[transcript_id 2 ],
[transcript_id 3]] (value)
:param oxford: key = transcript_id, value = [(exon_start, exon_end)]
:param flair: key = transcript_id, value = [(exon_start, exon_end)]
:param talon: key = transcript_id, value = [(exon_start, exon_end)]
:return: tcons_XXXX(key):[[(exon_start, exon_end), (exon_start, exon_end)],
[(exon_start, exon_end), (exon_start, exon_end)],
[(exon_start, exon_end), (exon_start, exon_end)]] (value)
"""
tcons_exon = {}
for tcon_id, transcript_ids in tcons.items():
# no need to select terminal exons for single-exon transcripts
# data only has three-matches and all transcripts have the same number of exons
if len(oxford[transcript_ids[0]]) == 1:
# Get exons that belong to transcript_id
oxford_exons = oxford[transcript_ids[0]]
flair_exons = flair[transcript_ids[1]]
talon_exons = talon[transcript_ids[2]]
# add exons to tcon_exon dictionary
tcons_exon[tcon_id] = [oxford_exons, flair_exons, talon_exons]
# Slice terminal exons for multi-exon transcripts
else:
oxford_terminal_exons = [oxford[transcript_ids[0]][0],
oxford[transcript_ids[0]][-1]]
flair_terminal_exons = [flair[transcript_ids[1]][0],
flair[transcript_ids[1]][-1]]
talon_terminal_exons = [talon[transcript_ids[2]][0],
talon[transcript_ids[2]][-1]]
tcons_exon[tcon_id] = [oxford_terminal_exons, flair_terminal_exons, talon_terminal_exons, ]
return tcons_exon | 2fd14584f540fc7c7fbe8a80ac8cad31e25e3e27 | 34,870 |
import math
def lookup_hosts_with_cpu_and_memory(admin_session, harvester_api_endpoints,
cpu, memory):
"""Lookup nodes that satisfies the given CPU and memory requirements"""
resp = admin_session.get(harvester_api_endpoints.list_nodes)
assert resp.status_code == 200, 'Failed to list nodes: %s' % (resp.content)
nodes_json = resp.json()['data']
nodes = []
for node in nodes_json:
# look up CPU usage for the given node
resp = admin_session.get(harvester_api_endpoints.get_node_metrics % (
node['metadata']['name']))
assert resp.status_code == 200, (
'Failed to lookup metrices for node %s: %s' % (
node['metadata']['name'], resp.content))
metrics_json = resp.json()
# NOTE: Kubernets CPU metrics are expressed in nanocores, or
# 1 billionth of a CPU. We need to convert it to a whole CPU core.
cpu_usage = math.ceil(
int(metrics_json['usage']['cpu'][:-1]) / 1000000000)
available_cpu = int(node['status']['allocatable']['cpu']) - cpu_usage
# NOTE: Kubernets memory metrics are expressed Kibibyte so convert it
# back to Gigabytes
memory_usage = math.ceil(
int(metrics_json['usage']['memory'][:-2]) * 1.024e-06)
# NOTE: we want the floor here so we don't over commit
allocatable_memory = int(node['status']['allocatable']['memory'][:-2])
allocatable_memory = math.floor(
allocatable_memory * 1.024e-06)
available_memory = allocatable_memory - memory_usage
if available_cpu >= cpu and available_memory >= memory:
nodes.append(node['metadata']['name'])
return nodes | dc4fa48a681666bb4abb852f1e1819cb5aee2e2b | 34,871 |
import re
def quarter_to_months(when):
"""Manually handles the form 'YYYY-QX'."""
quarter = re.match(r'^(\d\d\d\d-)[qQ]([1-4])$', when)
if not quarter:
return None
prefix = quarter.group(1)
# Convert the quarter into 3 months group.
base = (int(quarter.group(2)) - 1) * 3 + 1
return ['%s%02d' % (prefix, i) for i in range(base, base+3)] | b66917d15d157bebf1c590e5781a89205ab64aa7 | 34,872 |
def xyxy2xywh(box):
"""
Convert bounding box from xyxy to xywh format
:param box: array-like, contains (x1, y1, x2, y2)
"""
x1, y1, x2, y2 = box
w, h = x2 - x1, y2 - y1
x_c = x1 + w / 2
y_c = y1 + h / 2
return x_c, y_c, w, h | af8b5d4568dfc29a71164ccef58f15b9c06f695a | 34,874 |
from typing import Any
from typing import Type
def ensure_namespace(obj: Any, name: str = 'orphan') -> Type:
"""Convert a ``dict`` to an object that provides ``getattr``.
Parameters
----------
obj : Any
An object, may be a ``dict``, or a regular namespace object.
name : str, optional
A name to use for the new namespace, if created. by default 'orphan'
Returns
-------
type
A namespace object. If ``obj`` is a ``dict``, creates a new ``type``
named ``name``, prepopulated with the key:value pairs from ``obj``.
Otherwise, if ``obj`` is not a ``dict``, will return the original
``obj``.
Raises
------
ValueError
If ``obj`` is a ``dict`` that contains keys that are not valid
`identifiers
<https://docs.python.org/3.3/reference/lexical_analysis.html#identifiers>`_.
"""
if isinstance(obj, dict):
bad_keys = [str(k) for k in obj.keys() if not str(k).isidentifier()]
if bad_keys:
raise ValueError(
f"dict contained invalid identifiers: {', '.join(bad_keys)}"
)
return type(name, (), obj)
return obj | ea83ba109520f2da68ea2b3d823c66fa5c2a6a82 | 34,875 |
def tap_type_to_target_type(mysql_type, mysql_column_type):
"""Data type mapping from MySQL to Snowflake"""
return {
'char': 'VARCHAR',
'varchar': 'VARCHAR',
'binary': 'BINARY',
'varbinary': 'BINARY',
'blob': 'VARCHAR',
'tinyblob': 'VARCHAR',
'mediumblob': 'VARCHAR',
'longblob': 'VARCHAR',
'geometry': 'VARCHAR',
'text': 'VARCHAR',
'tinytext': 'VARCHAR',
'mediumtext': 'VARCHAR',
'longtext': 'VARCHAR',
'enum': 'VARCHAR',
'int': 'NUMBER',
'tinyint': 'BOOLEAN' if mysql_column_type == 'tinyint(1)' else 'NUMBER',
'smallint': 'NUMBER',
'mediumint': 'NUMBER',
'bigint': 'NUMBER',
'bit': 'BOOLEAN',
'decimal': 'FLOAT',
'double': 'FLOAT',
'float': 'FLOAT',
'bool': 'BOOLEAN',
'boolean': 'BOOLEAN',
'date': 'TIMESTAMP_NTZ',
'datetime': 'TIMESTAMP_NTZ',
'timestamp': 'TIMESTAMP_NTZ',
'time': 'TIME',
'json': 'VARIANT'
}.get(mysql_type, 'VARCHAR') | ffcc66a1f31a45eb875cd8bf5d5a09f725edfc8d | 34,876 |
import sys
def get_main_globals():
"""
Return the main global namespace
EXAMPLES::
sage: from sage.misc.misc import get_main_globals
sage: G = get_main_globals()
sage: bla = 1
sage: G['bla']
1
sage: bla = 2
sage: G['bla']
2
sage: G['ble'] = 5
sage: ble
5
This is analogous to :func:`globals`, except that it can be called
from any function, even if it is in a Python module::
sage: def f():
... G = get_main_globals()
... assert G['bli'] == 14
... G['blo'] = 42
sage: bli = 14
sage: f()
sage: blo
42
ALGORITHM:
The main global namespace is discovered by going up the frame
stack until the frame for the :mod:`__main__` module is found.
Should this frame not be found (this should not occur in normal
operation), an exception "ValueError: call stack is not deep
enough" will be raised by ``_getframe``.
See :meth:`inject_variable_test` for a real test that this works
within deeply nested calls in a function defined in a Python
module.
"""
depth = 0
while True:
G = sys._getframe(depth).f_globals
if G["__name__"] == "__main__" and G["__package__"] is None:
break
depth += 1
return G | 55397b986121f7bb0943372b6bb7bf1579558a5e | 34,877 |
import numpy
def h2exp(hmag, sn=100, exptime=15.0):
""" This function takes in a hmag and given signal to noise and spits back
the required time. Based on Hmag = 11 at S/N 100 in an hour.
"""
# Scale the hmag based on t = (1 hour)*10^(0.4*(H-11))
# Then I cut it up into 15 minute exposures.
time = 60 * (sn**2 / 100.0**2) * 10**(0.4 * (hmag - 11))
nexp = numpy.array(numpy.round(time / exptime))
# Min value is 1
nexp[(nexp == 0)] = 1
# Set Nan's to nan
nexp[numpy.isnan(hmag)] = numpy.nan
return(nexp) | d556889c9d61362f158130b44e6b9e51f98173ca | 34,881 |
from typing import Mapping
from typing import Any
def format_like_dict(
mapping: Mapping[Any, Any],
) -> str:
"""
Formats mapping into dict-like format
that is readable for human being.
:param mapping: values to be formatted
"""
return ", ".join((f"{key}={value!r}" for key, value in mapping.items())) | 16cacd5e1537f0c5e9b5e2adffe0f2d5372845a2 | 34,883 |
def main(args=None):
"""Console script for t3."""
# with click.Context(main) as ctx:
# click.echo(main.get_help(ctx))
return 0 | bde0c2511a92ee155dd2d1ad9adf0a182acd1514 | 34,884 |
import re
def fmtlog(txt):
"""
Reformat the text of the one-line log as LaTeX.
Arguments:
txt: string to reformat.
Returns:
A LaTeX formatted version of the input.
"""
# Replace TeX special characters in the whole text.
specials = ("_", "#", "%", r"\$", "{", "}")
for s in specials:
txt = re.sub(r"(?<!\\)" + s, "\\" + s, txt)
# Remove periods at the end of lines.
txt = re.sub(r"\.$", "", txt, flags=re.MULTILINE)
lines = txt.split("\n")
# Remove reference to HEAD
lines[0] = re.sub(r"\(.*\) ", "", lines[0])
# Use typewriter font for the commit id.
lines = [r"\texttt{" + re.sub(" ", r"} ", ln, count=1) for ln in lines if ln]
return "\\\\\n".join(lines) | fbd49446b027c58303edabd60f96978ce19b2c57 | 34,887 |
def login_sysadmin_superuser(self):
"""
Login as a sysadmin superuser.
"""
self.client.login(username='supersysadmin', password='supersysadmin')
return self | bd5c14ccb3e917a54897dc3ae1405d0f8739a600 | 34,888 |
import psutil
import os
def mem():
""" Returns currently used memory in mb """
process = psutil.Process(os.getpid())
return process.memory_info().rss / (1024 * 1024) | bdbd548d995edce3058bbc3ea26737f5fee4085e | 34,889 |
def goodness_of_fit(ac):
"""return aggregated metric describing quality of fit"""
sse = {}
for fit_group, df in ac.df_model_fits.items():
sse[fit_group] = (df.fit_sse_y / df.fit_n).sum()
return sse | e5501965928d9cedec3c0d25eadfc3a38e374632 | 34,890 |
def get_longest_orf(orfs):
"""Find longest ORF from the given list of ORFs."""
sorted_orf = sorted(orfs, key=lambda x: len(x['sequence']), reverse=True)[0]
return sorted_orf | de5ce7f112aa8b91e5b09c9d2fa63b1da8f3bfd5 | 34,891 |
def is_protected_variable(name: str) -> bool:
"""
Checks if variable has protected name pattern.
>>> is_protected_variable('_protected')
True
>>> is_protected_variable('__private')
False
>>> is_protected_variable('__magic__')
False
>>> is_protected_variable('common_variable')
False
"""
return name.startswith('_') and not name.startswith('__') | b484222b655ce8676f4b26b3037dba2041cba84d | 34,892 |
def truncate(ys, n):
"""Trims a wave array to the given length.
ys: wave array
n: integer length
returns: wave array
"""
return ys[:n] | 8b632ce326fa25875645fa1bab80d59341183a53 | 34,895 |
def f(L):
""" Iterable[tuple[Number,tuple[Number,Number,tuple[Number,Number,Number], Number], Number]] -> Number """
# a : Number
a = 0
# g : Number
# e : Number
for (b, (c, d, (e, _, f), _), g) in L:
a = a + b + c + d + e + f + g
return a | 5c551a4885557234744faaf806a604afadfdb380 | 34,896 |
def split_wo(s):
"""Remove -seg from WO"""
return s.str.split('-', expand=True)[0] | 6c9edaf131f38be823fc76e1c9963086705f42d4 | 34,897 |
def check_login(db, usernick, password):
"""returns True if password matches stored"""
flowTowCursor = db.cursor()
flowTowCursor.execute("SELECT password FROM users WHERE nick = ?", [usernick])
passwordStored = flowTowCursor.fetchone()
if passwordStored:
return True if db.encode(password) == passwordStored[0] else False
else:
return False | 548d74acc34868dfa51805576460e26d6e3a16ee | 34,900 |
import errno
import glob
import os
def usb_device_by_serial(arg_serial, sibling_port = None, *fields):
"""Given a device with a given USB serial number, the sysfs path to
it and maybe the contents of a list of its sysfs fields
Optionally, do it for one of its siblings (devices connected in
another port of the same hub); this is mainly use to be able to
pinpoint devices that have no USB serial number (shame) to
uniquely identify if we know they are going to be connected next
to one that does.
:param str arg_serial: USB serial number
>>> usb_device_by_serial("4cb7b886a6b0")
>>> '/sys/bus/usb/devices/1-3.2'
:param int sibling_port: (optional) work instead on the device
that is in the same hub as the given device, but in this port
number.
eg: given the serial number *4cb7b886a6b0* and the port 4 in this
configuration::
$ lsusb.py
...
1-7 8087:0a2b e0 2.00 12MBit/s 100mA 2IFs (Intel Corp.)
1-10 2386:4328 00 2.01 12MBit/s 96mA 1IF (Raydium Corporation Raydium Touch System)
1-3 0bda:5411 09 2.10 480MBit/s 0mA 1IF (Realtek Semiconductor Corp.) hub
1-3.4 0bda:5400 11 2.01 12MBit/s 0mA 1IF (Realtek BillBoard Device 123456789ABCDEFGH)
1-3.2 06cb:009a ff 2.00 12MBit/s 100mA 1IF (Synaptics, Inc. 4cb7b886a6b0)
....
>>> usb_device_by_sibling("4cb7b886a6b0", sibling_port = 4)
>>> '/sys/bus/usb/devices/1-3.4
'
Here it would return */sys/bus/usb/devices/1-3.4*, since that
device is connected in port 4 in the same hub as the USB device
with serial number *4cb7b886a6b0*.
:param str fields: (optional) list of field from the sysfs
directory whose values we want to return
>>> usb_device_by_sibling("4cb7b886a6b0", 4, "busnum", "devnum")
:return: tuple with USB path and values of fields; if *None*, the
device in said port does not exist. If the values for the fields
are *None*, those fields do not exist.
"""
def _sysfs_read(filename):
try:
with open(filename) as fr:
return fr.read().strip()
except IOError as e:
if e.errno != errno.ENOENT:
raise
# Look for the serial number, kinda like:
#
## $ grep -r YK18738 /sys/bus/usb/devices/*/serial
## /sys/bus/usb/devices/1-3.4.3.4/serial:YK18738
for fn_serial in glob.glob("/sys/bus/usb/devices/*/serial"):
serial = _sysfs_read(fn_serial)
if serial == arg_serial:
devpath = os.path.dirname(fn_serial)
if sibling_port != None:
# We are looking for a sibling, so let's find it and
# modify devpath to point to it.
# Replace the last .4 in the directory name by our
# port number in the arguments and look at that
# top level devices are BUSNUM-PORTNUMBER, vs after they
# are BUSNUM-PORTNUMBER.[PORTNUMBER[.PORTNUMBER...]]
if '.' in devpath:
separator = "."
else:
separator = "-"
head, _sep, _tail = devpath.rpartition(separator)
devpath = head + separator + str(sibling_port)
if not os.path.isdir(devpath):
break
if not fields:
return devpath
return [ devpath ] + [
_sysfs_read(os.path.join(devpath, field))
for field in fields
]
return None if not fields else [ None ] + [ None for field in fields ] | 34e4981c4cd936f9dfcdfd0a1820348a789f535e | 34,901 |
import numpy
def fold(time, period, T0):
"""Normal phase folding"""
return (time - T0) / period - numpy.floor((time - T0) / period) | 724882ab2e028c0bbfc717a0ebdbd250dd8c199d | 34,902 |
def quote_plus(s):
"""
Convert some URL elements to be HTTP-safe.
Not the same as in urllib, because, for instance, parentheses and commas
are passed through.
Parameters
----------
s: input URL/portion
Returns
-------
corrected URL
"""
s = s.replace('/', '%2F')
s = s.replace(' ', '%20')
return s | b4b2c7a5cb43bcb9a58614257729d3c541e83e2a | 34,903 |
def get_activation_details(name, layer_type, layer, keyword_arguments):
"""
Creates the layer details data for the activation function
"""
return {
'layer_details': None,
'name': name,
'type': layer_type,
'layer': layer,
"keyword_arguments": keyword_arguments
} | 9c6bf8f1faa5c2b752e70d3a3d9c3950fe3270b0 | 34,904 |
def cc_parse_path_text(path_text):
"""
将目标主机/模块/自定义层级的文本路径解析为列表形式,支持空格/空行容错解析
:param path_text: 目标主机/模块/自定义层级的文本路径
:return:路径列表,每个路径是一个节点列表
example:
a > b > c > s
a>v>c
a
解析结果
[
[a, b, c, s],
[a, v, c],
[a]
]
"""
text_path_list = path_text.split("\n")
path_list = []
for text_path in text_path_list:
text_path = text_path.strip()
path = []
if len(text_path) == 0:
continue
for text_node in text_path.split(">"):
text_node = text_node.strip()
if len(text_path) == 0:
continue
path.append(text_node)
path_list.append(path)
return path_list | 3df400b2a83e8edf7ce7e5033624669bec47ece9 | 34,906 |
def resolve_timestamp_field(field):
"""
Timestamp fields should respect configuration.
"""
if field.use_isoformat:
return dict(
type="string",
format="date-time",
)
else:
return dict(
type="float",
format="timestamp",
) | 71908a9ba5536770cb2f8c2a921b0c7e16389a25 | 34,907 |
def getGuessedWord(secretWord: str, lettersGuessed: list) -> str:
"""
secretWord: the word the user is guessing
lettersGuessed: letters that have been guessed so far
returns: string, comprised of letters and underscores that
represents what letters in secretWord have been guessed so far.
"""
return ' '.join(letter if letter in lettersGuessed else '_'
for letter in secretWord) | df5214524af174d435eb496c87b3d03ab3170de7 | 34,908 |
def is_palindromic_number(numb: int) -> bool:
"""
Returns whether on not numb
is a palindromic number
https://oeis.org/A002113
"""
return numb == int(str(numb)[::-1]) | 474792d46b6ba2267f64987adf84da2156184561 | 34,910 |
def GetAtomicWeightsForModel(probeMol, fpFunction, predictionFunction):
"""
Calculates the atomic weights for the probe molecule based on
a fingerprint function and the prediction function of a ML model.
Parameters:
probeMol -- the probe molecule
fpFunction -- the fingerprint function
predictionFunction -- the prediction function of the ML model
"""
if hasattr(probeMol, '_fpInfo'):
delattr(probeMol, '_fpInfo')
probeFP = fpFunction(probeMol, -1)
baseProba = predictionFunction(probeFP)
# loop over atoms
weights = []
for atomId in range(probeMol.GetNumAtoms()):
newFP = fpFunction(probeMol, atomId)
newProba = predictionFunction(newFP)
weights.append(baseProba - newProba)
if hasattr(probeMol, '_fpInfo'):
delattr(probeMol, '_fpInfo')
return weights | e71976c0969b26d2f514968f11aea6f603b2c559 | 34,911 |
def validate_required_keys_in_dict(dictionary, key_list):
"""
Check if the dictionary contains the required keys. If not, raise an exception.
:param args: A request instance.
:param key_list: The keys that should be in the json structure (only 1st level keys).
:return: Returns an array of individual json blocks.
"""
for key in key_list:
if key not in dictionary.keys():
raise ValueError("Missing JSON key: '{}'.".format(key))
return dictionary | 7e2b9513b26dc8169b576c2a93eab28182807b1e | 34,912 |
def variable_filter(constraints, variables):
"""return True if variable fulfills contraints"""
var_types = [u'variable', u'cf_standard_name', u'variable_long_name']
success = True
# check different types of variables
for var_type in var_types:
# is there a constrain for this variable type?
if var_type in constraints:
# at least one variable constraint must be fulfilled
success = False
# do we have this variable type?
if var_type in variables:
# do we have an allowed value?
allowed_values = constraints.getall(var_type)
for var in variables[var_type]:
if var in allowed_values:
# if one variable matches then we are ok
return True
return success | deacefe80075177a8868d52dd3b04b18d8a1f877 | 34,913 |
def MakeUnique (s, in_use):
"""Return an identifier based on C{s} that is not in the given set.
The returned identifier is made unique by appending an underscore
and, if necessary, a serial number.
The order is : C{x}, C{x_}, C{x_2}, C{x_3}, ...
@param in_use: The set of identifiers already in use in the
relevant scope. C{in_use} is updated to contain the returned
identifier.
@rtype: C{str}
"""
if s in in_use:
ctr = 2
s = s.rstrip('_')
candidate = '%s_' % (s,)
while candidate in in_use:
candidate = '%s_%d' % (s, ctr)
ctr += 1
s = candidate
in_use.add(s)
return s | 42fd292385e690b4990331403582502d348ad1e2 | 34,914 |
def reverse_builtin(value):
"""Reverse string using the "reversed" function."""
return "".join(reversed(value)) | 7703f4b51db4d4cd73224dc791321b695d475f55 | 34,915 |
def calc_min_vms_for_availability(zk_servers, bk_servers, ss_servers, cc_servers):
"""
This method assumes as input a number of instances per service to tolerate a given number of failures. With this,
we calculate the number of VMs to respect the same failure tolerance, which translates into the maximum number of
instances of any type. This is because having fewer VMs yield that a single VM failure would induce multiple
failures in the Pravega services, making the failure tolerance guarantees ineffective.
:param zk_servers: Number of Zookeeper instances.
:param bk_servers: Number of Bookkeeper instances.
:param ss_servers: Number of Segment Stores.
:param cc_servers: Number of Controllers.
:return: Minimum number of VMs to satisfy the failure tolerance requirements.
"""
return max(zk_servers, bk_servers, ss_servers, cc_servers) | 6d2e155d246c50e303c31378ccd5cf534f2d6aa9 | 34,916 |
from typing import Dict
from typing import Optional
from typing import Set
def remove_zero_statistics(
statistics: Dict[str, Dict[str, int]],
force_keep: Optional[Set[str]] = None,
) -> Dict[str, Dict[str, int]]:
"""
Any module that has zero for all available statistics is removed from the
set of statistics. This can help declutter the reporting of statistics
if many submodules have zero statistics.
Args:
statistics (dict(str, dict(str, int))) : the statistics to
remove zeros from. Organized as a dictionary over modules,
which are each a dictionary over statistic types.
force_keep (set(str) or None) : a set of modules to always keep, even
if they are all zero.
Returns:
dict(str, dict(str, int)) : the input statistics dictionary,
with submodules removed if they have zero for all statistics.
"""
out_stats = {}
if force_keep is None:
force_keep = set()
for mod, stats in statistics.items():
if not all(val == 0 for val in stats.values()) or mod in force_keep:
out_stats[mod] = stats.copy()
return out_stats | 45d8cb863ffa6d97d6ed5fe6b8afe9a8049ac8a8 | 34,917 |
def guard(f, *args, **kwargs):
"""
Run a function.
Return (is_error, result), where ``is_error`` is a boolean indicating whether
it raised an exception. In that case, ``result`` will be an exception.
"""
try:
return (False, f(*args, **kwargs))
except Exception as e:
return (True, e) | 7b612dbc88a098c50a5f3b9cc2d2e8eeb617b160 | 34,919 |
def _predictions_inner(d1, d2):
"""
:param d1: dict of model k-step ahead prediction lists
:param d2: dict of weights
:return:
"""
the_sum = None
for k1,v1 in d1.items():
if k1 in d2:
if the_sum is None:
the_sum = [ v1i*d2[k1] for v1i in v1 ]
else:
the_sum = [ si + v1i*d2[k1] for si,v1i in zip(the_sum,v1) ]
return the_sum | 7de90aa46b3b5b1abcfd4a138024b2fbd6c0357e | 34,922 |
def get_kind_and_id(thing_id):
"""
Args:
thing_id (str): a reddit thing id in the form t#_#+
Returns:
(str, str): a tuple of kind and id
"""
return thing_id.split("_", 1) | 7b487b3a79c92104a938dfb9370c4092265c36f3 | 34,925 |
def helper(n, current_max):
"""
:param n: int, the number to find largest digit.
:param current_max: int, current max digit was found.
:return: int, largest digit was found.
"""
# get positive int
if n < 0:
n *= -1
# get last digit num
digit_num = n - (n // 10) * 10
if 0 < digit_num <= 9 and digit_num > current_max:
current_max = digit_num
# base case
if n // 10 == 0:
return current_max
# recursive case
else:
return helper(n // 10, current_max) | a5752acde6ad44c9744dfa90febfc8a20ab42690 | 34,926 |
def rescale_eigenfunctions(eigenfunctions, scale_factor=1.0):
""" Scale eigenfunctions by a `scale_factor` """
return scale_factor * eigenfunctions | 5a15aeb0eecd5eb161a66c4c0844191d0426f26f | 34,927 |
def cast_value_to_bool(value):
"""Casts a passed value to boolean value."""
if isinstance(value, str):
return value.lower() in ["1", "true", "yes"]
if isinstance(value, bool):
return value
return False | e9288d4c29781d49d657df732313b1d3cf43d9f7 | 34,929 |
def sample_cov(r, **kwargs):
"""
Returns the sample covariance of the supplied returns
"""
return r.cov() | 971b98d9f951ed4c0dc3c9fca33098441f93cbf3 | 34,930 |
def handleRawTBL(inDS):
"""
Extract table information from PDF file. Process the column names and add column CAT for SOC and PT.
Return the whole table as pd dataframe.
Args:
inDS ([type]): pandas dataframe
Returns:
[type]: pandas dataframe
"""
rawTBL = inDS
# =====> For AZ PDF
if len(rawTBL.columns) == 4 and ("Unnamed: 0" in rawTBL.columns and list(rawTBL.columns).index("Unnamed: 0")) == 1 :
rawTBL = rawTBL.drop(["Unnamed: 0"], axis=1)
# =====> For AZ PDF
elif len(rawTBL.columns) == 3 and "Reaction NameTotalFatal" in rawTBL.columns :
rawTBL = rawTBL.rename(index=str, columns={
"Reaction NameTotalFatal": "Reaction Name",
"Unnamed: 0": "Total",
"Unnamed: 1": "Fatal"}
)
# =====> For BNT PDF
if len(rawTBL.columns) == 4 and "Reaction Name" in rawTBL.columns and "Unnamed: 0" in rawTBL.columns :
rawTBL = rawTBL.drop(["Fatal"], axis=1)
rawTBL = rawTBL.rename(index=str, columns={
"Unnamed: 0": "Fatal"}
)
# =====> For BNT PDF
elif len(rawTBL.columns) == 5 and "Reaction Name" in rawTBL.columns and "Unnamed: 0" in rawTBL.columns :
rawTBL = rawTBL.drop(["Unnamed: 0", "Fatal"], axis=1)
rawTBL = rawTBL.rename(index=str, columns={
"Unnamed: 1": "Fatal"}
)
handTBL = rawTBL.reset_index()
handTBL.loc[:, "index"] = handTBL.loc[:, "index"].astype(int)
handTBL.loc[handTBL.loc[:, "index"] == 0, "CAT"] = "SOC"
handTBL.loc[(handTBL.loc[:, "Total"].isna()) & (handTBL.loc[:, "index"] != 0), "CAT"] = "PT"
print(handTBL.shape)
handTBL = handTBL.drop(["index"], axis=1)
return handTBL | b5f64a564eca0379339db95973bfc93100ac4911 | 34,931 |
import json
def mkccj_read_existing_json(parsedArgs):
"""Convert the exising json file to list and crossRefDict"""
crossRefDict = {}
outputList = []
if parsedArgs.existing:
existingFile = parsedArgs.existing
with open(existingFile) as jsonFile:
outputList = json.load(jsonFile)
crossRefDict = {record['file'] : record for record in outputList }
return outputList, crossRefDict | d71afa9985ca4e4241062970b154ed5eda0b79b5 | 34,935 |
def getPerimeterFromSiteLength(a, b, c):
"""
Berechnung des Umfangs des Dreiecks aus den 3 Seiten a, b, c
Parameter: a:float, b:float, c:float
Rückgabewerte: Der Umfang des Dreiecks (float)
"""
return a + b + c; | 75a3f9ab7bf9f72e4bdafc67e32824dab285ee7f | 34,940 |
import json
def js_to_json(js_text):
"""
Takes a JS file in the form of 'var objectName = {...};' and converts it into a dictionary
in the form of {'objectName': {...}}.
:param js_filename: The filename of a JS file containing only a single variable definition.
:return: A python dictionary
"""
first_equals = js_text.find('=')
var_name = js_text[:first_equals].strip()
var_value = js_text[first_equals+1:].strip()
# remove the 'var' from the name
var_name = var_name[3:].strip()
return {var_name: json.loads(var_value)} | a70fa572ccf87dba4ca8f9bcc56d53bb97efdb11 | 34,941 |
def street_not_in_use(street, items):
"""
Check if elements of street are not in use already.
For example [[(2, 'black'), (3, 'black'), (4, 'black'),(5, 'black)],[(3, 'black'), (4, 'black'), (5, 'black')]]
would return false, because 3, 4 and 5 are already used
:param street: List of elements that will be checked
:param items: list of items that are already planned to be played (here 2,3,4,5)
:return: True if new street can be played as well, otherwise false
"""
for element in street:
if element in items:
return False
return True | 6ba82838ca0b49c59c20cb6b47ec593c1fe43454 | 34,944 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.