content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def commit_diff(c):
"""Return the set of changed files.
Args:
c (git.Commit)
Returns:
set[str]: a set of file paths (relative to the git repo's root directory).
"""
changed = set()
def add_path(blob):
if blob is not None:
changed.add(blob.path)
prev_c = c.parents[0]
for x in c.diff(prev_c):
add_path(x.a_blob)
add_path(x.b_blob)
return changed
|
de6d7a2a1dfbadec2c4237259670118b7538ce81
| 25,661
|
def sort_donors(donor_dict):
"""Sort the list of donors by total amount donated.
Returns a list of only the donors' names.
"""
return sorted(list(donor_dict), key=lambda x: -sum(donor_dict[x]))
|
5fe6a6a426294a6c67dd808f39c04dfd6fe74daa
| 25,663
|
from typing import List
import ast
def get_class_names_in_files(path: str) -> List[str]:
"""Read all class names in file."""
with open(path) as file:
module = ast.parse(file.read())
return [node.name for node in module.body if isinstance(node, ast.ClassDef)]
|
d72a4ea6570dc6a261f65e3b441e42b1e46852c3
| 25,664
|
import numpy
def _encoder_checksum(modes: int) -> numpy.ndarray:
""" Helper function for checksum_code that outputs the encoder matrix.
Args:
modes (int): matrix size is (modes - 1) x modes
Returns (numpy.ndarray): encoder matrix
"""
enc = numpy.zeros(shape=(modes - 1, modes), dtype=int)
for i in range(modes - 1):
enc[i, i] = 1
return enc
|
616b1b5af552eae5eddf095275666401b0e3b3e7
| 25,665
|
import re
def calc_query_pos_from_cigar(cigar, strand):
"""Uses the CIGAR string to determine the query position of a read
The cigar arg is a string like the following: 86M65S
The strand arg is a boolean, True for forward strand and False for
reverse
Returns pair of ints for query start, end positions
"""
cigar_ops = [[int(op[0]), op[1]] for op in re.findall('(\d+)([A-Za-z])', cigar)]
order_ops = cigar_ops
if not strand: # - strand
order_ops = order_ops[::-1]
qs_pos = 0
qe_pos = 0
q_len = 0
for op_position in range(len(cigar_ops)):
op_len = cigar_ops[op_position][0]
op_type = cigar_ops[op_position][1]
if op_position == 0 and ( op_type == 'H' or op_type == 'S' ):
qs_pos += op_len
qe_pos += op_len
q_len += op_len
elif op_type == 'H' or op_type == 'S':
q_len += op_len
elif op_type == 'M' or op_type == 'I' or op_type == 'X':
qe_pos += op_len
q_len += op_len
return qs_pos, qe_pos
|
a3a5366b52aefbf628a92155193684f91c60c208
| 25,666
|
def mesh_error(mesh1, mesh2):
"""Error (intersection over union) of the two meshes"""
intersection = mesh1.intersection(mesh2)
union = mesh1.union(mesh2)
error = intersection.volume/union.volume
return error
|
f6fb92b950020a7e5f0945179839838f5a613d44
| 25,667
|
from typing import List
def split_filter(string: str, delim: str) -> List[str]:
"""Split the given string with the given delimiter.
If the given string is empty, an empty list is returned.
:param string: String to split.
:param delim: Delimiter character.
"""
if not string:
return []
return string.split(delim)
|
743b29818fb13b90e2f9eff9ddd7c778dbc1b3dc
| 25,668
|
from typing import Tuple
def index_to_tuple(index: int, size: int) -> Tuple[int, int]:
"""
Returns a tuple to indicate 2d-array for the given index
:param index:
:param size:
:return:
"""
assert 0 <= index < size * size, "Out of bound"
return int(index / size), int(index % size)
|
d3f5af92671bf5680dd2328f0d0b1f4a53615964
| 25,669
|
def find_maxProfit(weights: "list[int]", profits: "list[int]", capacity: "int",
greedy_method: "str" = "max") -> float:
"""Fractional Knapsack Problem
Find maximum profit using greedy method
Args:
weights (List[int]): list/array of weights
profits (List[int]): list/array of profits
capacity (int): total capacity of knapsack
greedy_method (str):
"min " - find maximum profit by considering minimum values first
"max" - find maximum profit by considering maximum values first
"optimal" - find maximum profit by considering profit/weight
Returns:
float: Maximum profit
"""
if len(weights) != len(profits):
print("Please provide correct values for profits and weights")
return -1
# make items/objects
items = [{"weight": w, "profit": p} for w, p in zip(weights, profits)]
# sort the items
if greedy_method == "min":
items = sorted(items, key=lambda x: x['weight'], )
elif greedy_method == "max":
items = sorted(items, key=lambda x: x['weight'], reverse=True)
elif greedy_method == "optimal":
items = sorted(
items, key=lambda x: x['profit'] / x['weight'], reverse=True)
else:
raise Exception("please provide correct value for 'greedy_method' ")
cur_weight, total_profit = 0, 0
for i in range(len(weights)):
if (cur_weight + items[i]['weight']) <= capacity:
cur_weight += items[i]['weight']
total_profit += items[i]['profit']
else:
remains = capacity - cur_weight
total_profit += items[i]['profit'] * (remains / items[i]['weight'])
break
return total_profit
|
75d12d63877bd792704fab00caf6690ff0e13f31
| 25,671
|
def replace_at(word, line, index):
""" Replace the text in-line.
The text in line is replaced (not inserted) with the word. The
replacement starts at the provided index. The result is cliped to
the input length
Arguments
---------
word : str
The text to copy into the line.
line : str
The line where the copy takes place.
index : int
The index to start coping.
Returns
-------
result : str
line of text with the text replaced.
"""
word_length = len(word)
result = line[:index] + word + line[(index + word_length):]
return result[:len(line)]
|
1587e97e4886d75d509ec6558aedd66759028b06
| 25,673
|
def check_positioners(positioner_ids, command, fps, initialised=False):
"""Checks if some of the positioners are not connected."""
if any([pid not in fps.positioners for pid in positioner_ids]):
command.fail(error="some positioners are not connected.")
return False
if initialised:
if any([not fps[pid].initialised for pid in positioner_ids]):
command.fail(error="some positioners are not initialised.")
return False
return True
|
7f6da172cdde8afedd9a4027e3d9b3af407975f5
| 25,675
|
def coverage(comm,edge_attr=None):
"""
comm: community
"""
num,den = 0.,0.
for e in comm.graph.es:
w = 1
if edge_attr: w = e[edge_attr]
den += w
i = e.source
j = e.target
if comm.membership[i] == comm.membership[j]:
num += w
return num/den
|
5be108f8ccc8594e77db20b8d8c4eb7f4b58dbc7
| 25,677
|
def _normalize_string(text):
"""Trims the text, removes all the spaces from text and replaces every sequence of lines with a single line.
Args:
text (str): The schema definition.
Returns:
str: The normalized text.
"""
text = text.strip()
result = ""
whitespaces = [" ", "\t"]
in_comment = False
in_string = False
string_char = None
for c in text:
if c == "#":
in_comment = True
if c == "\n":
in_comment = False
if c == '"' or c == "'":
if in_string and c == string_char:
in_string = False
elif not in_string and not in_comment:
in_string = True
string_char = c
if not in_comment and not in_string and c in whitespaces:
continue
if c == "\n" and result.endswith("\n"):
continue
result += c
return result
|
0bc387e7563a4c961a9fe189547c5146337899bc
| 25,680
|
def load(file, width, height, data_start, colors, color_depth, *, bitmap=None, palette=None):
"""Loads indexed bitmap data into bitmap and palette objects.
:param file file: The open bmp file
:param int width: Image width in pixels
:param int height: Image height in pixels
:param int data_start: Byte location where the data starts (after headers)
:param int colors: Number of distinct colors in the image
:param int color_depth: Number of bits used to store a value"""
# pylint: disable=too-many-arguments,too-many-locals
if palette:
palette = palette(colors)
file.seek(data_start - colors * 4)
for value in range(colors):
c_bytes = file.read(4)
# Need to swap red & blue bytes (bytes 0 and 2)
palette[value] = bytes(b''.join([c_bytes[2:3],
c_bytes[1:2],
c_bytes[0:1],
c_bytes[3:1]]))
if bitmap:
minimum_color_depth = 1
while colors > 2 ** minimum_color_depth:
minimum_color_depth *= 2
bitmap = bitmap(width, height, colors)
file.seek(data_start)
line_size = width // (8 // color_depth)
if width % (8 // color_depth) != 0:
line_size += 1
if line_size % 4 != 0:
line_size += (4 - line_size % 4)
chunk = bytearray(line_size)
mask = (1 << minimum_color_depth) - 1
for y in range(height - 1, -1, -1):
file.readinto(chunk)
pixels_per_byte = 8 // color_depth
offset = y * width
for x in range(width):
i = x // pixels_per_byte
pixel = (chunk[i] >> (8 - color_depth*(x % pixels_per_byte + 1))) & mask
bitmap[offset + x] = pixel
return bitmap, palette
|
5d69ff0dc0188b3128be59c0c1b955e72256345b
| 25,683
|
import os
def get_imu_csv_files(input_dir):
"""Generates a list of all csv files that start with imu"""
imu_files = list()
if os.path.exists(input_dir):
for path, _, files in os.walk(input_dir):
for filename in files:
if filename[0:3] == 'imu' and os.path.splitext(
filename)[1] == ".csv":
imu_files.append(os.path.join(path, filename))
return imu_files
|
1bec4ed4f9549e5949962103058126c389a98b90
| 25,684
|
def normalize_rgb(rgb):
"""
Normalize rgb to 0~1 range
:param rgb: the rgb values to be normalized, could be a tuple or list of tuples
:return:
"""
if isinstance(rgb, tuple):
return tuple([float(a)/255 for a in rgb])
elif isinstance(rgb, list):
norm_rgb = []
for item in rgb:
norm_rgb.append(normalize_rgb(item))
return norm_rgb
else:
raise NotImplementedError('Data type: {} not understood'.format(type(rgb)))
|
b2eecc75cdae5d26714768a26d888bad18adb548
| 25,685
|
def justReturn(inval):
"""
Really, just return the input.
Parameters
----------
input : anything
Returns
-------
input : anything
Just return whatever you sent in.
"""
return inval
|
9e7ae43cf4aa2456e67cbe12f08e01a6fbc682a9
| 25,687
|
def find_dialogue_event(dialogue_event_name, dialogue_event_defs):
""" Find a dialogue event by name in a list of dialogue event definitions.
:param dialogue_event_name: the name of the dialogue event to look for.
:param dialogue_event_defs: a list of dialogue event definitions.
:return: the dialogue_event_def with matching name, or None.
"""
for dialogue_event_def in dialogue_event_defs:
if dialogue_event_def.name == dialogue_event_name:
return dialogue_event_def
return None
|
114ec4a2ee426d789ebcf76eb46756194e108d19
| 25,688
|
def split_by_pred(pred, iterable, constructor=list):
"""Sort elements of `iterable` into two lists based on predicate `pred`.
Returns a tuple (l1, l2), where
* l1: list of elements in `iterable` for which pred(elem) == True
* l2: list of elements in `iterable` for which pred(elem) == False
"""
pred_true = constructor()
pred_false = constructor()
for item in iterable:
if pred(item):
pred_true.append(item)
else:
pred_false.append(item)
return pred_true, pred_false
|
dc0c43cd5d34869566f283d92038aaa4a53d5c62
| 25,689
|
from typing import List
def selection_sort(arr: List[int]) -> List[int]:
"""
sort i items from the end, and decrease i until 1
"""
# if [9,7,6,4,1], then i would enumerate as 4,3,2,1
for i in range(len(arr) - 1, 0, -1):
# if i = 4, then j would enumerate as 0,1,2,3
largest_j = i
for j in range(i):
if arr[largest_j] < arr[j]:
largest_j = j
arr[largest_j], arr[i] = arr[i], arr[largest_j]
return arr
|
e3792982518312b158695873af1dafac5b22a92d
| 25,690
|
def add_size(string):
"""Helper function for tag creation."""
return str(len(string)) + ':' + string
|
bc740c0e91b2bd5879f684b522dce5bf3dc1da27
| 25,691
|
from typing import Dict
def _map_dtypes(sql_dtypes: Dict[str, str]) -> Dict[str, type]:
"""
Create mapping from SQL data types to Python data types.
:param sql_dtypes: A mapping from the column names in a SQL table
to their respective SQL data types.
Example: {"ct_id": int(10) unsigned NOT NULL AUTO_INCREMENT}
:type sql_dtypes: Dict[str, str]
:return: A mapping from the column names in a SQL table
to their respective Python data types. Example: {"ct_id": int}
:rtype: Dict[str, type]
"""
types: Dict[str, type] = {}
for key, val in sql_dtypes.items():
if "int" in val:
types[key] = int
elif any(dtype in val for dtype in ("float", "double", "decimal", "numeric")):
types[key] = float
else:
types[key] = str
return types
|
70360c323f9193f49de40acf37ddaa58010467f8
| 25,692
|
def crc16(data):
"""
Calculate an ISO13239 CRC checksum of the input buffer.
"""
m_crc = 0xffff
for this in data:
m_crc ^= ord(this)
for _ in range(8):
j = m_crc & 1
m_crc >>= 1
if j:
m_crc ^= 0x8408
return m_crc
|
41739f0c1e38b3e4c579634bfbbde0ae128b8146
| 25,693
|
from typing import Sequence
from typing import Any
from typing import Counter
def sequence_equals(a: Sequence[Any], b: Sequence[Any]) -> bool: # pylint: disable=invalid-name
"""
Returns a value indicating whether two sequences contain the same elements.
More specifically, returns ``True`` if the two sequences are of the same size, contain the same distinct elements,
and each element has equal appearance frequency on both collections.
This method runs in time proportional to the size of the arguments and uses extra space that is also proportional
to the size of the arguments.
:param Sequence[Any] a: one collection
:param Sequence[Any] b: the other collection
:return: ``True`` if ``a`` and ``b`` contain the same elements, otherwise ``False``
:rtype: bool
"""
if len(a) != len(b):
return False
if a is b:
return True
counter_a: Counter = Counter(a)
counter_b: Counter = Counter(b)
return counter_a == counter_b
|
bb4bc84e7be1491f49f7e3212e34d755298178ea
| 25,694
|
def _get_gpus_by_instance_type(instances, instance_type):
"""
Get gpus for the given instance type from the pricing file.
:param instances: dictionary conatining the content of the instances file
:param instance_type: The instance type to search for
:return: the number of GPU for the given instance type
:raise CriticalError if unable to find the given instance or whatever error.
"""
try:
gpus = int(instances[instance_type]["gpu"])
return gpus
except KeyError:
# If instance has no GPU, return 0
return 0
|
f47989f6d17fe185c15b742d8d1f3ac615a20240
| 25,697
|
from bs4 import BeautifulSoup
def process_sitemap(s):
"""
:param s: Sitemap content in xml format
:return: A list of URLs of all the web sites listed in site map
"""
soup = BeautifulSoup(s, features='html.parser')
result = []
for loc in soup.findAll('loc'):
result.append(loc.text)
return result
|
a5bc184794284cba83705f418833093f1f7f7976
| 25,698
|
def backward_cross_entropy_loss(probabilities, labels):
"""
Compute the gradient of the cross entropy loss with respect to the probabilities.
probabilities is of the shape (# classes)
labels is of the shape (# classes)
The output should be the gradient with respect to the probabilities.
Returns:
The gradient of the loss with respect to the probabilities.
"""
# *** START CODE HERE ***
return -labels / probabilities
# *** END CODE HERE ***
|
78c860d9acf6501826ff63fb24a9002dba616ae0
| 25,699
|
def pad_docket_by_year(y):
"""
year manipulation
"""
x = str(y)
if len(x) == 7:
return x
# this might be an ought's 2000's case
if len(x) == 6:
return "0" + x
# this might be an exactly 2000 case?
if len(x) <= 5:
return "00" + x
return x
|
0a1db17338a50bff8fd6da4beb42516d431b78fd
| 25,700
|
def add_mixin(klass, *mixins):
"""
dynamically declare a new class with the name Extended{old_name} and
return the inherited class object
:param klass: the class to redeclare with mixins
:param mixins: the mixins to use
:return: object class
"""
new_name = 'Extended{}'.format(klass.__name__)
return type(new_name, tuple(mixins), {})
|
6049bd876c5318d6b9a8f53e4ab8febd4368a802
| 25,701
|
import json
def read_class_names(class_names_path: str):
"""Reads class names from text file.
Supports .txt and .json.
Args:
class_names_path: `str`, path to json/txt file containing classes.
Text file should contain one class name per line.
Json file should contain only one dictionary, `Mapping[int, str]`
"""
names = {}
if class_names_path.endswith('.txt'):
with open(class_names_path, 'r') as data:
for idx, name in enumerate(data):
names[idx] = name.strip('\n')
elif class_names_path.endswith('.json'):
with open(class_names_path) as f:
names = json.load(f)
if type(list(names.keys())[0]) == str and type(list(names.values())[0]) == int:
names = dict((v,k) for k,v in names.items())
else:
raise NotImplementedError('File type is not .txt or .json, path %s' %class_names_path)
if type(list(names.keys())[0]) != int:
raise ValueError('Loaded dict %s has wrong key type %s' %(
class_names_path, type(list(names.keys())[0])))
if type(list(names.values())[0]) != str:
raise ValueError('Loaded dict %s has wrong value type %s' %(
class_names_path, type(list(names.values())[0])))
return names
|
8808507206c491a297a1ad04d4a30f58b3977ca9
| 25,703
|
def validate_file_input(file: dict):
"""This fnx checks if file is correct type for code
This fnx tales in a file and if it is not the one
needed it returns an error and if it is it returns
true.
:param file:dictionary file
:returns: str and error code or boolean and error code
"""
if not file:
return "The input was not a correct file type", 400
return True, 200
|
0214b63abd91ec47e3722ed6b423b4eb57d7fe6f
| 25,704
|
import math
def factorial(x):
"""Returns the factorial of x"""
return math.factorial(x)
|
ad881ed82c3bc40fc726b15597563774286ba681
| 25,705
|
def pd_rolling_mean(series, window_size):
"""
Compute rolling mean on a Series
with the given window_size
and return only non-None rows,
i.e. starning from row number window_size - 1
and until the end.
"""
rolling_mean = series.rolling(window_size).mean()
return rolling_mean[window_size - 1:]
|
1f2b03d29b61c1f2d1bf1b98dfa92a9e852667a2
| 25,706
|
def bufsize_type_to_bufsize(bf_type):
""" for a given bufsize type, return the actual bufsize we will read.
notice that although 1 means "newline-buffered", we're reading a chunk size
of 1024. this is because we have to read something. we let a
StreamBufferer instance handle splitting our chunk on newlines """
# newlines
if bf_type == 1:
bufsize = 1024
# unbuffered
elif bf_type == 0:
bufsize = 1
# or buffered by specific amount
else:
bufsize = bf_type
return bufsize
|
810e56c15871caa5df58518f586a721ae55b92ae
| 25,707
|
import os
def PathExists(v):
"""Verify the path exists, regardless of its type."""
return os.path.exists(v)
|
36de7b06ca228e162244bb710f54d566e7707521
| 25,708
|
def kida_parser(kida_file):
"""
KIDA used a fixed format file so we read each line in the chunks they specify
and use python built in classes to convert to the necessary types.
NOTE KIDA defines some of the same reaction types to UMIST but with different names
and coefficients. We fix that by converting them here.
"""
str_parse = lambda x: str(x).strip().upper()
kida_contents = [
[3, {str_parse: 11}],
[1, {"skip": 1}],
[5, {str_parse: 11}],
[1, {"skip": 1}],
[3, {float: 10, "skip": 1}],
[1, {"skip": 27}],
[2, {int: 6, "skip": 1}],
[1, {int: 2}],
[1, {"skip": 11}],
]
rows = []
with open(kida_file, "r") as f:
f.readline() # throw away header
for line in f: # then iterate over file
row = []
for item in kida_contents:
for i in range(item[0]):
for func, count in item[1].items():
if func != "skip":
a = line[:count]
row.append(func(a))
line = line[count:]
# Some reformatting required
# KIDA gives CRP reactions in different units to UMIST
if row[-1] == 1:
# Amazingly both UMIST and KIDA use CRP but differently.
# Translate KIDA names to UMIST
if row[1] == "CRP":
row[1] = "CRPHOT"
# with beta=0 and gamma=1, the KIDA formulation of
# CRPHOT reactions becomes the UMIST one
row[10] = 1.0
elif row[1] == "CR":
row[1] = "CRP"
# UMIST alpha includes zeta_0 but KIDA doesn't. Since UCLCHEM
# rate calculation follows UMIST, we convert.
row[8] = row[8] * 1.36e-17
rows.append(row[:7] + row[8:-1])
elif row[-1] in [2, 3]:
rows.append(row[:7] + row[8:-1])
elif row[-1] == 4:
row[2] = "IONOPOL1"
rows.append(row[:7] + row[8:-1])
elif row[-1] == 5:
row[2] = "IONOPOL2"
rows.append(row[:7] + row[8:-1])
return rows
|
e5c9484a9f4ccbc6549c7558ed5087f85ec2571a
| 25,709
|
import itertools
def flatten(iterable):
"""Flatten the input iterable.
>>> list(flatten([[0, 1], [2, 3]]))
[0, 1, 2, 3]
>>> list(flatten([[0, 1], [2, 3, 4, 5]]))
[0, 1, 2, 3, 4, 5]
"""
return itertools.chain.from_iterable(iterable)
|
6860f65582952819ae56178cf97cd2eb2133bbf1
| 25,710
|
def build_url(url, store_name, key):
"""This method combine the different parts of the urls to build the url to
acces the REST-API.
Parameters
----------
url : str
the base url
store_name : str
the name of the voldemort store
key : str
the url part which represents the key or keys
Returns
-------
str
the combined url of the REST-API
"""
return "%s/%s/%s" % (url, store_name, key)
|
6cf0c8e86721b139907dda491d92e413a913ae38
| 25,711
|
import math
def generateParetoScaledVariate(rand, alpha, beta):
""" F(x) = 1 - (b/x)^a, x >= b """
return beta / (math.pow((1 - rand), (1/alpha)))
|
5a3aaa7407a37b9837d3eaab8c5162df017b6511
| 25,712
|
import os
def read_config(fin):
"""Reads search parameters from file fin"""
APP_DATA = os.path.dirname(os.path.abspath(__file__))
res = {}
with open(os.path.join(APP_DATA, fin), 'rb') as f:
for line in f:
line = line.decode('utf-8').lower().strip()
if line == '' or line[0] == '#' or line[-1] == '=': continue
key, val = line.split('=')
res[key.strip()] = val.strip()
return res
|
6bcc857b51ba35068ddc226291216f94109bdb4b
| 25,714
|
def interpret_numbers(user_range):
"""
:param user_range: A string specifying a range of numbers. Eg.
interpret_numbers('4-6')==[4,5,6]
interpret_numbers('4,6')==[4,6]
interpret_numbers('4,6-9')==[4,6,7,8,9]
:return: A list of integers, or None if the input is not numberic
"""
if all(d in '0123456789-,' for d in user_range):
numbers_and_ranges = user_range.split(',')
numbers = [n for lst in [[int(s)] if '-' not in s else range(int(s[:s.index('-')]), int(s[s.index('-')+1:])+1) for s in numbers_and_ranges] for n in lst]
return numbers
else:
return None
|
dc3a156bdb392e8a54edf95fc4182dfd5965010a
| 25,716
|
def square_sum(numbers):
"""
Complete the square sum function so that it squares each number passed into it and then sums the results together.
:param numbers: an array of integers.
:return: the sum of all the square elements in the array.
"""
return sum(x ** 2 for x in numbers)
|
a9827057e287711dfc449d9169e9b6da7fd0e1da
| 25,717
|
from typing import Any
from typing import Optional
from typing import Dict
from typing import Iterable
def get_attributes(
obj: Any,
remap: Optional[
Dict[
str,
str,
]
] = None,
attrs: Optional[Iterable[str]] = None,
) -> Dict[str, Any]:
"""Extract a list of attributes from a Reddit object."""
if not remap and not attrs:
raise ValueError("Must provide remap or attributes.")
attributes = {}
if remap:
for key, value in remap.items():
if a := getattr(obj, key):
attributes[value] = a
if attrs:
for attribute in attrs:
if a := getattr(obj, attribute):
attributes[attribute] = a
return attributes
|
3437f373d911ba845e0acf0601128479db594404
| 25,719
|
from datetime import datetime
def same_date(date1: datetime, date2: datetime) -> bool:
"""Check whether two given datetime object are in the same date"""
return date1.day == date2.day and date1.month == date2.month and date1.year == date2.year
|
27abc8d14dfdc7e02f004696ceed028aef27aa0e
| 25,720
|
import numpy
def iplanck(radiance, wavelength=10.25):
"""
Calculate the inverse of Planck's function for a given thermal radiance and central wavelength
Reference: https://ncc.nesdis.noaa.gov/data/planck.html
Parameters
----------
wavelength : float
Central wavelength used for the calculation [micrometers]. Default is 10.25um
radiance : float
Thermal radiance of the black body with a given temperature at a given wavelength [W/m2].
Returns
-------
float: Object temperature [Kelvin]
"""
h = 6.6260755e-34
c = 2.9979246e8
k = 1.380658e-23
cwl = wavelength * 1e-6
return h * c / (k * cwl) / numpy.log((2.0 * h * c * c) / (radiance * 1e6 * numpy.power(cwl, 5)) + 1.0)
|
3205f0e18fd68daad5cf1a7f1a25151e51d0f036
| 25,724
|
def find_metadata_end(raw_file):
"""Extract the metadata from the raw file."""
lines = raw_file.split("\n")
start = None
end = None
for idx, line in enumerate(lines):
if "---" in line and start is None:
start = idx
elif "---" in line and start is not None:
return idx
|
87ade1898d0a6e227dee6e6ba1495453e0bd8098
| 25,727
|
def newton(f, df, x, tol=1e-12, n=0):
"""
RECURSIVE
Parameters
==========
f : callable, function
df : callable, derivative of f
x : initial value
tol : tolerance
n : number of steps
Returns
=======
x : root of f
n : number of steps
"""
xn = x - f(x) / df(x)
return (xn, n+1) if abs(x-xn) < tol or abs(f(xn)) < tol \
else newton(f, df, xn, tol, n+1)
|
c8a4bc23025cd5cc6552da3c66ff46dabd5e54ec
| 25,728
|
def cli(ctx, dataset_id, published=False):
"""Make a dataset publicly available or private. For more fine-grained control (assigning different permissions to specific roles), use the ``update_permissions()`` method.
Output:
Current roles for all available permission types.
.. note::
This method can only be used with Galaxy ``release_19.05`` or later.
"""
return ctx.gi.datasets.publish_dataset(dataset_id, published=published)
|
44bfafc3fc5a1dcd28c0439f31e757b57d158d8b
| 25,730
|
import torch
def simple_nms(scores, nms_radius: int):
""" Fast Non-maximum suppression to remove nearby points """
assert (nms_radius >= 0)
def max_pool(x):
return torch.nn.functional.max_pool2d(
x, kernel_size=nms_radius * 2 + 1, stride=1, padding=nms_radius)
zeros = torch.zeros_like(scores)
max_mask = scores == max_pool(scores)
for _ in range(2):
supp_mask = max_pool(max_mask.float()) > 0
supp_scores = torch.where(supp_mask, zeros, scores)
new_max_mask = supp_scores == max_pool(supp_scores)
max_mask = max_mask | (new_max_mask & (~supp_mask))
return torch.where(max_mask, scores, zeros)
|
21a60b7be5f029a5be3e6d9d56fa6a576b111e8d
| 25,731
|
import numpy
def AdamBashforth2Corrector(predicted, calculated, actual, timeStep):
"""
Implementation of Adam-Bashforth 2 correctorMethod.
"""
output = numpy.zeros((actual.size, ))
summation = output[0] = actual[0]
for i in range(1, actual.size):
summation += (predicted[i] - calculated[i-1])*(1/2)* timeStep
output[i] = summation
return output
|
9515fe42e3a125b939c69f8b1636139a0ca79ee1
| 25,732
|
import random
def random_hero_image():
"""Random the image from hero folder and return the location use in static tag"""
return f"img/hero/{random.randint(1, 43)}.jpg"
|
968cdc5f7b8910b05f9716442f5d331843f4dfcb
| 25,734
|
import math
def _cuda_line_ellipsoid_intersection(r0, step, semiaxes):
"""Calculate the distance from r0 to an axis-aligned ellipsoid centered at
origin along step. r0 must be inside the ellipsoid.
Parameters
----------
r0 : numba.cuda.cudadrv.devicearray.DeviceNDArray
step : numba.cuda.cudadrv.devicearray.DeviceNDArray
semiaxes : numba.cuda.cudadrv.devicearray.DeviceNDArray
Returns
-------
float
"""
a = semiaxes[0]
b = semiaxes[1]
c = semiaxes[2]
A = (step[0] / a) ** 2 + (step[1] / b) ** 2 + (step[2] / c) ** 2
B = 2 * (
a ** (-2) * step[0] * r0[0]
+ b ** (-2) * step[1] * r0[1]
+ c ** (-2) * step[2] * r0[2]
)
C = (r0[0] / a) ** 2 + (r0[1] / b) ** 2 + (r0[2] / c) ** 2 - 1
d = (-B + math.sqrt(B ** 2 - 4 * A * C)) / (2 * A)
return d
|
96c768a0073459b682eade39304550b4d5a7ae6a
| 25,735
|
def safe_for_db_retry(f):
"""Enable db-retry for decorated function, if config option enabled."""
f.__dict__['enable_retry'] = True
return f
|
824f66c2de035e69bb811e0aaf70a8df3b876c32
| 25,737
|
def get_targ_pairs(start_cond, end_cond):
"""Return boolean mask for paired target conditions.
This function returns a mask indicating which start/end target conditions
in the provided lists come from a paired set. For example, the entry for a
a target condition 'T1 -> T5' will have a corresponding value of True if
'T5 -> T1' also exists in the data.
Inputs:
start_cond - List of strings indicating starting conditions
end_cond - List of strings indicating ending conditions
Returns:
mask - List of boolean indicating whether each of the input elements is
part of a pair
"""
# Define target pair and flipped target pair conditions
cond = [''.join([s, e]) for s, e in zip(start_cond, end_cond)]
filp_cond = [''.join([e, s]) for s, e in zip(start_cond, end_cond)]
# If the flipped version of a target condition appears in the set of unique
# target conditions, then the target has a matching condition
uni_cond = set(cond)
mask = [True if c in uni_cond else False for c in filp_cond]
return mask
|
aa353dc9f5c17f210e1f82669d7fef24a43aa035
| 25,739
|
import argparse
def make_parser():
"""
Build the command line parser for the program.
Returns
-------
parser: ArgumentParser
The parser
"""
parser = argparse.ArgumentParser(description='Program to compute tangles')
parser.add_argument('-t', dest='dataset_name', action='store')
parser.add_argument('-p', dest='preprocessing_name', action='store')
parser.add_argument('-a', dest='agreement', action='store', type=int)
parser.add_argument('-o', dest='percentile_orders', action='store', type=int)
parser.add_argument('-s', dest='seed', action='store', type=int)
# SBM
parser.add_argument('--sbm_bs', dest='sbm_bs', nargs='+', type=int)
parser.add_argument('--sbm_p', dest='sbm_p', action='store', type=float)
parser.add_argument('--sbm_q', dest='sbm_q', action='store', type=float)
# Gaussian + KNN
parser.add_argument('--gauss_bs', dest='gauss_bs', nargs='+', type=int)
parser.add_argument('--gauss_cs', dest='gauss_cs', nargs='+', type=float)
parser.add_argument('--gauss_k', dest='gauss_k', action='store', type=int)
# Mindsets
parser.add_argument('--mind_sizes', dest='mind_sizes', nargs='+', type=int)
parser.add_argument('--mind_questions', dest='mind_questions', action='store', type=int)
parser.add_argument('--mind_useless', dest='mind_useless', action='store', type=int)
parser.add_argument('--mind_noise', dest='mind_noise', action='store', type=float)
# Questionaire
parser.add_argument('--q_nb_samples', dest='q_nb_samples', action='store', type=int)
parser.add_argument('--q_nb_features', dest='q_nb_features', action='store', type=int)
parser.add_argument('--q_nb_mindsets', dest='q_nb_mindsets', action='store', type=int)
parser.add_argument('--q_range_answers', dest='q_range_answers', nargs='+', type=int)
# Preprocessing
parser.add_argument('--nb_cuts', dest='nb_cuts', action='store', type=int)
parser.add_argument('--lb_f', dest='lb_f', action='store', type=float)
# ID
parser.add_argument('--id', dest='unique_id', action='store', default=0)
# Plotting
parser.add_argument('--yes_plots', dest='no_plots', action='store_false', default=None)
parser.add_argument('--no_plots', dest='no_plots', action='store_true', default=None)
return parser
|
9e15ff5a6d93002e17357a451f8717e7252e2ac6
| 25,740
|
def fls(val: int, v6: bool) -> int:
"""Find last set - returns the index, counting from 0 (from the right) of the
most significant set bit in `val`."""
# if b is zero, there is no first set bit
if val == 0:
return 0
# gradually set all bits right of MSB
# this technique is called 'bit smearing'
# if ipv6, max bit index we want to smear is 2^7=64,
# otherwise it's 2^4=16
max_power_of_2 = 7 if v6 else 5
n = val | val >> 1
for i in range(1, max_power_of_2+1):
n |= n >> 2**i
# increment diff by one so that there's only
# one set bit which is just before original MSB
n += 1
# shift it so it's in the original position
n >>= 1
# figure out the ordinal of the bit from LSB
pos = 0
while (n & 1) == 0:
n >>= 1
pos += 1
return pos
|
913127b72e3cab96423d5a7fcee8b5f5f1fb3f19
| 25,741
|
def water_evapotranspiration_flux(evap):
"""Water evapotranspiration flux ``evspsbl`` [mm].
Computes water evapotranspiration flux ``evspsbl`` from surface evaporation.
"""
return evap * (-1)
|
eb1233cfb26e8a08722e691cc3880fbdc37ebf13
| 25,743
|
def _canonize_validator(current_validator):
"""
Convert current_validator to a new list and return it.
If current_validator is None return an empty list.
If current_validator is a list, return a copy of it.
If current_validator is another type of iterable, return a list version of it.
If current_validator is a single value, return a one-list containing it.
"""
if not current_validator:
return []
if isinstance(current_validator, (list, tuple)):
current_validator = list(current_validator)
else:
current_validator = [current_validator]
return current_validator
|
32c3df654e048c3551a1e665ff773b57e59524d6
| 25,744
|
def hexcolor_to_rgbcc(hexcolor):
""" Converts an Hex color to its equivalent Red, Green, Blue
Converse = hexcolor = (r << 16) + (g << 8) + b
"""
r = (hexcolor >> 16) & 0x7F
g = (hexcolor >> 8) & 0x7F
b = hexcolor & 0x7F
return r, g, b
|
d8e58fc0e42bfa4bd987a0526c824735a80432ab
| 25,745
|
def ra_as_hours(ra_degrees, seconds_decimal_places=2):
""" Takes Right Ascension degrees as float, returns RA string. TESTS OK 2020-10-24.
:param ra_degrees: Right Ascension in degrees, limited to 0 through 360. [float]
:param seconds_decimal_places: number of places at end of RA string (no period if zero). [int]
:return: RA in hours/hex format. [string, or None if outside RA range]
"""
if (ra_degrees < 0) | (ra_degrees > 360):
return None
seconds_decimal_places = int(max(0, seconds_decimal_places)) # ensure int and non-negative.
total_ra_seconds = ra_degrees * (3600 / 15)
int_hours = int(total_ra_seconds // 3600)
remaining_seconds = total_ra_seconds - 3600 * int_hours
int_minutes = int(remaining_seconds // 60)
remaining_seconds -= 60 * int_minutes
if seconds_decimal_places > 0:
seconds, fract_seconds = divmod(remaining_seconds, 1)
int_fract_seconds = int(round(fract_seconds * 10 ** seconds_decimal_places))
else:
seconds, fract_seconds, int_fract_seconds = round(remaining_seconds), 0, 0
int_seconds = int(seconds)
if seconds_decimal_places > 0:
if int_fract_seconds >= 10 ** seconds_decimal_places:
int_fract_seconds -= 10 ** seconds_decimal_places
int_seconds += 1
if int_seconds >= 60:
int_seconds -= 60
int_minutes += 1
if int_minutes >= 60:
int_minutes -= 60
int_hours += 1
if int_hours >= 24:
int_hours -= 24
if seconds_decimal_places > 0:
format_string = '{0:02d}:{1:02d}:{2:02d}.{3:0' + str(int(seconds_decimal_places)) + 'd}'
else:
format_string = '{0:02d}:{1:02d}:{2:02d}'
ra_string = format_string.format(int_hours, int_minutes, int_seconds, int_fract_seconds)
return ra_string
|
e23cce78633cdb4d182babe095cf13b351ddf68f
| 25,747
|
def instrumentState(actor, cmd, user, instrument=None, state="up"):
"""set the state of an instrument"""
if state == "down":
actor.instrumentDown[instrument] = True
for a in actor.activeAlerts:
if instrument in a.instruments:
a.disable(user)
else:
actor.instrumentDown[instrument] = False
for a in actor.disabledAlerts:
if not a.instDown:
a.enable()
actor.broadcastActive()
actor.broadcastDisabled()
actor.broadcastAll()
actor.broadcastInstruments()
cmd.setState(cmd.Done, '{} set to {}'.format(instrument, state))
return False
|
ae1db62cd4ccde1bd8929de4f158973ea4058799
| 25,749
|
def str_block(block): # pragma: nocover
""" Get title based on block type
The default `__str__` methods do not operate within migrations
:return: model title
"""
if block.type == 'static':
return '-'.join((block.type, block.node.title))
elif block.type == 'menu':
return '-'.join((block.type, block.menu_item.title))
elif block.type == 'signal':
return '-'.join((block.type, block.signal))
return block.type
|
5eaa2a5a4329ba2f3a064dfdc4c2ab8f6ac5bba9
| 25,751
|
def add(x: int, y: int):
"""A function to add stuff.
:param x: A number, x
:param y: A number, y
:return: A number, x + y
"""
return x + y
|
3358800af03e094463b22296b393f6e935bf154c
| 25,752
|
def to_title(str):
"""returns a string formatted as a title"""
return str.replace("_", " ").capitalize()
|
13336936174445f61d5209ce1fabd19d7ae66fa2
| 25,753
|
def _read_count_normalize(X):
"""Read depth normalization by sample. Assumes samples are columns and guides are rows."""
return (X / X.sum(axis=0)) * 1e6
|
799b5b7b4c207a6e89da68775c8f708acd1feb7f
| 25,754
|
def compFirstFivePosInt(iset={1, 2, 3, 4, 5}):
"""
task 0.5.5
a comprehension over the given set whose value is the set consisting
of the squares of the first five positive integers
"""
return {2**x for x in iset}
|
472e8e4d36d34ec47429be15c65c45341ab3f3a8
| 25,755
|
import struct
from functools import reduce
def fix(d):
"""Turn mono data into stereo"""
line=d
n=2
return ''.join([line[i:i+n]*2 for i in range(0, len(line), n)])
shorts = struct.unpack('<' + 'h' * (len(d)/2), d)
dbl = reduce(lambda x,y: x+y, zip(shorts, shorts))
return struct.pack('<' + 'h' * len(d), *dbl)
|
4a644d151d67f851280911680495ba32df059c61
| 25,756
|
def flat_cell(cell):
"""
flat dictionarys in celss
"""
if isinstance(cell, dict):
value_cell = list(cell.values())[0]
else:
value_cell = cell
return value_cell
|
d6a3e1bdcc1416c078759a62c6f5af1bb3768509
| 25,757
|
def _mock_kwargs_from_env(**kwargs):
"""
Method to mock docker.utils.kwargs_from_env method.
"""
return {'base_url': None}
|
ffc7f994efbf08b154e23378e1f7231227399a0b
| 25,758
|
def assignOrder(order):
"""
Static method that adds the order attribute to a function.
:param order: Order in which the function will be called
"""
def dstFunc(srcFunc):
srcFunc.order = order
return srcFunc
return dstFunc
|
d8f15e0e21c227a000b1d1e2d4179e5d9b2eed1f
| 25,759
|
def partial_positionals(func, fix_args, **fix_kwargs):
"""Like functools.partial, but with positionals as well"""
def wrapper(*args, **kwargs):
arg = iter(args)
return func(
*(
fix_args[i] if i in fix_args else next(arg)
for i in range(len(args) + len(fix_args))
),
**{**fix_kwargs, **kwargs},
)
return wrapper
|
a3efcfb126e506656f87d36d781a8a0b4f512083
| 25,760
|
import os
def load_required(key: str) -> str:
"""
Load value from env, fails if not found.
Args:
key: key to lookup
Returns:
the found value
Raises:
EnvironmentError: key not found
"""
value = os.getenv(key, None)
if value is None:
raise EnvironmentError(f"Missing envioroment varible {key!r}")
return value
|
26f02615a29b72025305b1887b96b0a990cc1501
| 25,761
|
def get_key_value_from_tokens(tokens):
""" Converts a list of tokens into a single key/value pair.
:param tokens: The tokens, as strings.
:type tokens: [str]
:returns: (key, value)
:rtype: (string, string)
"""
key_tokens = []
value_tokens = []
found_equals_sign = False
for token in tokens:
# Mark and skip the equals sign.
if token == "=":
found_equals_sign = True
continue
if not found_equals_sign:
key_tokens.append(token)
else:
value_tokens.append(token)
# Combine the tokens into a string
if len(key_tokens) == 0:
key = None
else:
key = "".join(key_tokens)
if len(value_tokens) == 0:
value = None
else:
value = "".join(value_tokens)
return (key, value)
|
3af0003992a07fb8daf70c17cbaa3a414a59a9e0
| 25,763
|
def attributes_present(variables, attr_map):
"""Returns a list of the relevant attributes present
among the variables.
"""
return [attr for attr in attr_map if any(v.attributes[attr] for v
in variables)]
|
ade75f103ede7f74c8b67cdaf23497d1983169eb
| 25,764
|
def is_consecutive(l):
"""
Determines whether a list contains consecutive numbers. If it does, the sum of the
numbers should be the same as n * (min + max) / 2 (where n is num elements)
:param l: A set of numbers
:return: True or False
"""
total = sum(l)
mathsum = len(l) * (min(l) + max(l)) / 2
return total == mathsum
|
ec74d48eead8c3bf9eb300ee2135968eaac65d48
| 25,765
|
import requests
import json
def number_of_subscribers(subreddit):
"""api call to reddit to get the number of subscribers
"""
base_url = 'https://www.reddit.com/r/'
headers = {
'User-Agent':
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) \
Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)'
}
# grab info about all users
url = base_url + '{}/about.json'.format(subreddit)
response = requests.get(url, headers=headers)
resp = json.loads(response.text)
try:
# grab the info about the users' tasks
data = resp.get('data')
subscribers = data.get('subscribers')
except:
return 0
if subscribers is None:
return 0
return int(subscribers)
|
05964f0f8a8b3c1901b5ad7667fbbeb19f25ff78
| 25,766
|
def parse_squad(dataset):
"""
Parses SQUAD database into more readable format. In this case I only care
about question/answers pairs in order to make a seq2seq model that would
generate questions out of a paragraph.
Inputs:
dataset: squad dataset in json format
Returns:
squad_json: parsed squad dataset in json format
"""
total_topics = 0
total_questions = 0
squad_json = []
# Iterate through every topic in the dataset
for topic in dataset:
total_topics += 1
# Iterate through every text passage in the topic
for passage in topic['paragraphs']:
# Iterate through every question/answer pairs in the passage
for qas in passage['qas']:
total_questions += 1
text_question_pair = {}
# Append the title
text_question_pair['topic'] = topic['title']
# Append the text paragraph
text_question_pair['paragraph'] = passage['context']
# Append the question
text_question_pair['question'] = qas['question']
# Iterate through available answers
answers = []
for answer in qas['answers']:
answers.append(answer['text'])
# And append them all together
text_question_pair['answers'] = answers
# Append found dictionary to the full parsed dataset array
squad_json.append(text_question_pair)
print('Found ' + str(total_topics) + ' topics in total.')
print('Found ' + str(total_questions) + ' questions in total.')
return squad_json
|
15971b1bd8dd241af5e458fafe363b8859303e4f
| 25,767
|
import glob
import os
def get_file_fullname(searching_root, file_name):
"""
:param searching_root: directory in which file is going to be searched
:param file_name: name of a file without extension
:return: file name with it's extension
"""
output = glob.glob(os.path.join(searching_root, file_name + '.*'))
if output:
choice = output[0]
dots = output[0].count('.')
for possible in output:
if possible.count('.') < dots:
dots = possible.count('.')
choice = possible
return choice
output = glob.glob(os.path.join(searching_root, file_name))
if output:
return output[0]
output = glob.glob(os.path.join(searching_root, file_name + '.local.*'))
if output:
choice = output[0]
dots = output[0].count('.')
for possible in output:
if possible.count('.') < dots:
dots = possible.count('.')
choice = possible
return choice
output = glob.glob(os.path.join(searching_root, file_name + '.local'))
if output:
return output[0]
|
01ef995f4396bd843b073ca9157f24bc45a35fce
| 25,769
|
def get_current(isamAppliance):
"""
Get permitted features for current user
"""
return isamAppliance.invoke_get("Get permitted features for current user",
"/permissions/v1")
|
60f540471e45813c8fddab3a24726915f76cf9b9
| 25,770
|
import os
def _get_ssh_config_file(opts):
"""
:return: Path to the .ssh/config file - usually <home>/.ssh/config
"""
ssh_config_file = opts.get("ssh_config_file")
if not os.path.isfile(ssh_config_file):
raise IOError("Cannot find SSH config file")
if not os.access(ssh_config_file, os.R_OK):
raise IOError("Cannot access SSH config file: {}".format(ssh_config_file))
return ssh_config_file
|
f0edb7ad1e716d60fc28d49b3c0c42598015d00a
| 25,772
|
def nbi_growth(nbi, nvi, nb, nb0, nv, C, f, R, g, c0, alpha, e, pv, eta):
"""
Single clone growth rate for bacteria.
Inputs:
nbi : 1D vector in time of bacteria clone sizes
nvi : 1D vector in time of corresponding phage clone sizes
nb : 1D vector in time of total bacteria population size
nb0 : 1D vector in time of number of bacteria without spacers
nv : 1D vector in time of total phage population size
C : 1D vector in time of total nutrients
f, g, c0, alpha, e, pv, eta : simulation parameters
Output:
s : 1D vector of phage clone growth rate per bacterial generation (not multiplied by population size)
"""
F = f*g*c0
r = R*g*c0
s = g*C - F - r - alpha*pv*(nv - e*nvi) + alpha*eta*nb0*nvi*(1-pv)/nbi
return s/(g*c0)
|
b647bdbdda973407854e59ab98b9d290d00a2961
| 25,773
|
def strip_if_first_match(data):
"""
If we have a keyword and we start with that keyword in the text,
remove the match.
"""
def maybe_strip_match(item):
for key, value in item.items():
value_str = str(value).lower()
key_str = key.lower()
if not value_str.startswith(key_str):
yield (key, value)
continue
index = value_str.find(key_str) + len(key_str)
new_value = value[index:]
# optionally remove : and trailing space
if new_value and new_value[0] == ":":
new_value = new_value[1:]
while new_value and new_value[0] == " ":
new_value = new_value[1:]
yield (key, new_value)
return dict(maybe_strip_match(data))
|
8205d28890d2f0cda73ec8b63b9cbb6a52e522c8
| 25,774
|
def write_aed2_eco_configs(config_dict, out_path, config_type):
"""Write zoo / pathogen / phyto configs"""
block_name = '&'+config_dict.keys()[0]+"\n"
first_level = config_dict.keys()[0]
second_level = config_dict[first_level].keys()[0]
real_keys = config_dict[first_level][second_level].keys()
if config_type != 'zoo':
first_line_prefix = 'pd = '
else:
first_line_prefix = ''
with open(out_path, 'w') as out_handle:
out_handle.write(block_name)
out_handle.write(first_line_prefix)
for idx, rk in enumerate(real_keys):
real_value = config_dict[first_level][second_level][rk]
real_string = ",\t".join(real_value) + "\n"
if idx != 0 and config_type != 'zoo':
out_handle.write("\t"+real_string)
else:
out_handle.write(real_string)
out_handle.write("/")
return 0
|
fa077e0fb9a37009bb8df1767ca343be70b90aa6
| 25,775
|
import copy
def chain(*renderables):
"""Chains renderable objects
pgmock allows renderable objects (selectors and patches) to
be chained to one another like so::
pgmock.statement(0).table('table').patch(...)
Sometimes this syntax is undesirable, especially when working
with multiple patches. This function essentially implements that
syntax. For example, calling::
pgmock.patch('patch1').patch('patch2')
is equivalent to calling::
chain(pgmock.patch('patch1'), pgmock.patch('patch2'))
Raises:
`TypeError`: When no renderables are supplied
`SelectorChainingError`: When the selectors are not compatible for
chaining
"""
if not renderables: # pragma: no cover
raise TypeError('At least one renderable must be given to chain()')
chained = copy.deepcopy(renderables[0])
for renderable in renderables[1:]:
chained.chain(renderable)
return chained
|
6957a677aaf2d1a931613b14517378a18c33bc7c
| 25,776
|
import json
def load_ee_params(filepath):
"""
Loads parameters for a posteriori error estimation for the PINN from the json file provided via param filepath.
returns
- K: as used for trapezoidal rule
- mu: smoothing parameter for delta function
- L_f: Lipschitz constant or spectral abscissa
- delta_mean: average deviation of approximated ODE/PDE from target ODE/PDE
"""
with open(filepath, "r") as jsonfile:
data = json.load(jsonfile)
jsonfile.close()
return float(data['K']), float(data['mu']), float(data['L_f']), float(data['delta_mean'])
|
9c226d39e1c14d66ff1da1ea4afd99a5a9498e7a
| 25,777
|
import math
def euclidean_distance(xyz1, xyz2):
"""
Simple function for calculating euclidean distance between two points.
"""
dist = [(a - b)**2 for a,b in zip(xyz1, xyz2)]
return math.sqrt(sum(dist))
|
eb14ec300a4e4eab65a725b8d6b013f33ca09ae5
| 25,778
|
def contenttypes_uuid(obj):
"""
function for Issue models to generate the content_str by
models app label + model name and object primary key
easy to filter in queryset
"""
return f"{obj._meta.app_label}|{obj._meta.model_name}|{obj.pk}"
|
db5bea9dcfd6f384591e336902ae8fc0ed379d1f
| 25,785
|
import functools
import traceback
def wrap_exceptions(target_exception):
""" Wrap exceptions into generic exception (for RPC transport) """
#
def _decorator(func):
_target_exception = target_exception
#
@functools.wraps(func)
def _decorated(*_args, **_kvargs):
try:
return func(*_args, **_kvargs)
except BaseException as exception_data:
if isinstance(exception_data, _target_exception):
raise exception_data
raise _target_exception(traceback.format_exc())
#
return _decorated
#
return _decorator
|
3cc0ffaf8b87ef44f05702e9776aa327c9582d7b
| 25,787
|
import fnmatch
import click
def _match_pattern(what, items, pattern):
"""
Match given pattern against given items.
Parameters
----------
what: str
Describes what is filterd.
items: Iterable[str]
Items to be filtered
include_pattern: str
Comma separated items which should be included. Can contain glob patterns.
"""
result = set()
for part in pattern.split(","):
found = set(fnmatch.filter(items, part.strip()))
if not found:
raise click.UsageError(
"Could not find {what} {part}".format(what=what, part=part)
)
result |= found
return result
|
9688e8677d2206876d93e36abe17a336ea2be92a
| 25,788
|
def move_row_to_bottom(i,arrayIn):
""""
Moves a specified row (just one) to the bottom of the matrix,
then rotates the rows at the bottom up.
For example, if we had a matrix with 5 rows, and we wanted to
push row 2 to the bottom, then the resulting row order would be:
1,3,4,5,2
"""
arrayOut = arrayIn.copy()
numRows = arrayOut.shape[0]
# Push the specified row to the bottom.
arrayOut[numRows-1] = arrayIn[i,:]
# Now rotate the bottom rows up.
index = 2
while (numRows-index) >= i:
arrayOut[numRows-index,:] = arrayIn[numRows-index+1]
index = index + 1
return arrayOut
|
890d77cf619eb59473dce5e4cd5f14f2aa1f78cf
| 25,790
|
def n_gal(zc, params):
"""
Number density of detected HI galaxies in a given redshift bin, n ~ Mpc^-3.
"""
return 1e-4
|
c4ed3f99e646f92c971ae7e46b8880504a16a099
| 25,791
|
def get_shipping_voucher_discount(voucher, total_price, shipping_price):
"""Calculate discount value for a voucher of shipping type."""
voucher.validate_min_amount_spent(total_price)
return voucher.get_discount_amount_for(shipping_price)
|
be3538f219d219d0ec7c4d343d0dee5a62573bfc
| 25,792
|
def _define_tabledict_keys(header, fields, separator):
"""
Define the keys for the tabledict dictionary.
Note: this function is only used by parse_table_from_file().
:param header: header string.
:param fields: header content string.
:param separator: separator character (char).
:return: tabledict (dictionary), keylist (ordered list with dictionary key names).
"""
tabledict = {}
keylist = []
if not header:
# get the dictionary keys from the header of the file
for key in fields:
# first line defines the header, whose elements will be used as dictionary keys
if key == '':
continue
if key.endswith('\n'):
key = key[:-1]
tabledict[key] = []
keylist.append(key)
else:
# get the dictionary keys from the provided header
keys = header.split(separator)
for key in keys:
if key == '':
continue
if key.endswith('\n'):
key = key[:-1]
tabledict[key] = []
keylist.append(key)
return tabledict, keylist
|
6c41aa138597ca5b0915df0409381ea3caa17d94
| 25,793
|
import math
def _calc_degrees(opposite, adjacent):
"""
Calculates angle in degrees with inverse tangens based on length of opposite and adjacent sides
"""
return math.degrees(math.atan(abs(opposite) / abs(adjacent)))
|
28b0a49194ad2d95f377960b349cde03a762049d
| 25,795
|
def read_gene_subsets2(fns):
"""Read gene names from a single txt file or multiple files. Only unique gene names are kept"""
names=[]
for fn in fns:
name=[]
for line in open(fn,'r'):
try:
name.append(line.strip().split()[0])
except IndexError: # sometimes null line was parsed and null array is returned, in this case, just pass
pass
# include only uniq ids
name = list(set(name))
names.append(name)
return names
|
6230d155cf9efb6bde2204983591f20be9a0a4f0
| 25,797
|
def pv_efficiency(eta_PVref, beta, NOCT, NOCT_ref, NOCT_sol, T_amb, I):
"""
Calculates time resolved PV efficiency [-]
:param eta_PVref: Reference PV efficiency under NOCT [-]
:param beta: Temperature coefficient [-]
:param NOCT: Nominal operating cell temperature [deg C]
:param NOCT_ref: Reference temperature [deg C]
:param NOCT_sol: Reference irradiance [W/m2]
:param T_amb: Ambient temperature [deg C]
:param I: Irradiance on panel [W/m2]. 8760 time series
:return: Time resolved PV efficiency [-], 8760 entries
"""
horizon = len(T_amb)
etapv = [0.0] * horizon
for i in range(0, horizon):
Tpv = T_amb[i] + ((NOCT - NOCT_ref) / NOCT_sol) * I[i]
etapv[i] = eta_PVref * (1 - beta * (Tpv - 25))
return etapv
|
7b9c34811c0a17d734f1b707789fa188ab15014b
| 25,798
|
def is_tonghao(num1, num2):
"""用于判断两个数是否同号"""
is_both_positive = num1 > 0 and num2 > 0
is_both_negative = num1 < 0 and num2 < 0
return is_both_positive or is_both_negative
|
abca78a01970feec3138c336535421bcb9774cbd
| 25,799
|
def getSectionByName(sections, name, key="name", default=None):
"""identify the first section with matching key among 'sections'"""
for s in sections:
if name == s.get(key):
return s
return default
|
0bcdb02956fbc97ed63851ae33c0ffc138d3c9b4
| 25,801
|
def ip_exists(ip_address, client):
"""Determines whether an IP address already exists in database
Args:
ip_address
client: pymongo client used to connect to mongodb database
Return:
exists: boolean indicating whether an IP address exists in database
"""
user_collection = client.db.user
exists = bool(user_collection.find_one({'ipAddress': ip_address}))
return exists
|
91ef98dbda81cb7272ec5641e80d5176cb0c3af6
| 25,805
|
from typing import List
def ibo1_to_bio(sequence_label: List[str]) -> List[str]:
"""
IBO1 的序列标签格式转换成 bio 格式。ibo1格式是指 "I-Label" 作为一个序列的开始, 直到遇到 O。
"B-Label" 是指连续 "I-Label" 是多个 span 的时候,除了第一个span,其他都是 "B-Label"。
另外 BIO 的格式也兼容,比如 "B-Lable", "I-Label" 这种BIO格式也是正确的。
例子如下:
"I-Label1 I-Label1 O I-Label1 I-Label1 I-Label2 I-Label2 O I-Label1 I-Label1 B-Label1 I-Label1 O B-Label I-Label"
这里包含的spans:
"[I-Label1 I-Label1] O [I-Label1 I-Label1] [I-Label2 I-Label2] O [I-Label1 I-Label1] [B-Label1 I-Label1] O [B-Label I-Label"]
:param sequence_label:
:return:
"""
bio: List[str] = list()
idel_state = 0
span_state = 1
state = idel_state
for i, label in enumerate(sequence_label):
if state == idel_state:
if label == "O":
state = idel_state
bio.append(label)
elif label[0] == "I":
state = span_state
# 将 I-label 转换成 B-label
b_label = "B" + label[1:]
bio.append(b_label)
elif label[0] == "B":
# 这种情况是 BIO 标注,认为是对的
state = span_state
bio.append(label)
elif state == span_state:
if label == "O":
state = idel_state
bio.append(label)
elif label[0] == "I":
if bio[-1][1:] == label[1:]:
# 与先前的相同,也就是 I-label1 I-label1 的情况,所以直接append即可
state = span_state
bio.append(label)
else:
# 与先前不一样说明是新的,也就是 I-label1 I-label2 的情况
state = span_state
b_label = "B" + label[1:]
bio.append(b_label)
elif label[0] == "B":
# 这是新的了 也就是 I-label1 B-label1 情况
state = span_state
bio.append(label)
else:
raise RuntimeError(f"非法的状态: {state}")
return bio
|
3d7b870a570e8cedea3f7a8e91faaa197d41a17b
| 25,806
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.