content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_field_id(node):
"""Returns field id of node as string"""
return str(node["field_id"])
|
5765200f26ed76e032830b1fb1360331e238aec4
| 88,596
|
import itertools
def _df_multigroupby_aggregate(pv, func=lambda x:x):
"""
Generate a flattened structure from multigroupby result
Parameters
----------
pv: dataFrame, dict like structure
result from :func:`_df_multigroupby`
func: callable
reduce the data according to this function (default: identity)
Returns
-------
seq: sequence
flattened sequence of keys and value
"""
def aggregate(a, b=()):
data = []
for k, v in a:
if type(v) in (list, tuple,):
data.append(aggregate(v, b=(k,)))
else:
data.append(b + (k, func(v)))
return data
return list(itertools.chain(*aggregate(pv)))
|
408c4b0bc5214f36a7375cfe9b31e77aeb3f959d
| 88,602
|
def byteToRatio(byteValue):
"""Interpolates a byte value [0-255] to a [0, 1] ratio."""
return byteValue / 255.0;
|
4acdca0782e65795a0a395a8e2b47f4d4048a996
| 88,605
|
def separate_values_to_two_lines(table_as_list: list, column_width: int):
"""
If there are two values of the one statistic in different units in one line,
replace this line with two lines, each containing value in one unit.
"""
for i, line in enumerate(table_as_list):
has_two_units = line.count(" / ")
if has_two_units:
table_as_list.remove(line)
value_parts = line[column_width:].split(" / ")
table_as_list.insert(i, line[:column_width] + value_parts[0])
table_as_list.insert(i + 1, line[:column_width] + value_parts[1])
return table_as_list
|
f565897a940ca6dd3d46ba8df3ce0de63c7ff375
| 88,607
|
from typing import Dict
from typing import Any
def str_any_dict(value: Dict[str, Any]) -> Dict[str, Any]:
"""Transforms a dictionary into a dictionary mapping strings to anything."""
return dict([(str(v[0]), v[1] if v[1] is not None else None) for v in value.items()])
|
cbd0b5565d03d46757a7dfb98e56ed75a5a89e34
| 88,615
|
def _removed_items(items, index, return_for_invalid_index):
"""
Return removed items for a given list and index, suppressing IndexError.
This is used by the __setitem__ and __delitem__ implementations to
get the "removed" part of the event.
Note that this deliberately suppresses any IndexError arising from
an out-of-range integer index. A suitable IndexError will be re-raised
when the actual __delitem__ or __setitem__ operation is performed.
Parameters
----------
items : list
The list being operated on.
index : integer or slice
Index of items to remove or replace.
return_for_invalid_index : any
Object to return for an invalid index.
Returns
-------
removed_items : list
List containing the removed items.
"""
if isinstance(index, slice):
return items[index]
else:
try:
return [items[index]]
except IndexError:
return return_for_invalid_index
|
2cb88a8bd332f718fc1c1777765b2a772d9ab00c
| 88,617
|
import torch
def configure_optimizers(model, train_config):
"""
from karpathy/minGPT
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear)
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding) # add denorm here
for mn, m in model.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if "ValueHead" in fpn: # no decay for value head layers
no_decay.add(fpn)
pn_type = pn.split(".")[-1]
if pn_type == 'bias':
# all biases will not be decayed
no_decay.add(fpn)
elif pn_type == 'weight' and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn_type == 'weight' and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# validate that we considered every parameter
param_dict = {pn: p for pn, p in model.named_parameters()}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=train_config.lr, betas=train_config.betas)
return optimizer
|
2a1db45dcde2f3d2fbfe8176eaedd96ac9efb9df
| 88,620
|
def resize_with_aspect_ratio(img, max_size=2800):
"""Helper function to resize image against the longer edge
Args:
img (PIL.Image):
Image object to be resized
max_size (int, optional):
Max size of the longer edge in pixels.
Defaults to 2800.
Returns:
PIL.Image:
Resized image object
"""
w, h = img.size
aspect_ratio = min(max_size/w, max_size/h)
resized_img = img.resize(
(int(w * aspect_ratio), int(h * aspect_ratio))
)
return resized_img
|
d8342ca127a386a76c1efbab14ed6f2de49a55cb
| 88,628
|
def intersection(list_a, list_b):
"""Return the intersection of two lists (maintaining the item order of the first list)."""
result = []
for item in list_a:
if (item in list_b) and (item not in result):
result.append(item)
return result
|
9e13c7c6e34a06864a545d7e7f73d6bd24d6be0a
| 88,630
|
def bubble_sort(arr):
"""
Function to sort a list of integers using bubble sort algorithm
"""
for i in range(len(arr)-1):
for j in range(len(arr)-i-1):
if arr[j] > arr[j+1]:
tmp = arr[j]
arr[j] = arr[j+1]
arr[j+1] = tmp
return arr
|
d9b88a4e4b9c0cfcc372397973c448828802a05f
| 88,633
|
def mock_response_iterator(mock_response_list):
""" Each call will return the next response in mock_response_list """
i = iter(mock_response_list)
def _response_iterator(request, context):
return next(i)
return _response_iterator
|
f87bdf62ca965d3b3d678bdd8b7e898938d86384
| 88,638
|
def make_policy(url, method, query_filter=None, post_filter=None,
allowed=True):
"""
Create a policy dictionary for the given resource and method.
:param str url: the resource URL to grant or deny access to
:param str method: the HTTP method to allow or deny
:param dict query_filter: specific GET parameter names to require or allow
:param dict post_filter: POST parameter names to require or allow
:param allowed bool: whether this request is allowed
"""
return {
'url': url,
'method': method,
'allow': allowed,
'query_filter': query_filter or {},
'post_filter': post_filter or {},
}
|
6740b560642631ac986a050a5709567b18747a25
| 88,643
|
def close(session_attrs, fulfillment_state, message):
"""
Close session in Lex.
"""
response = {
'sessionAttributes': session_attrs,
'dialogAction': {
'type': 'Close',
'fulfillmentState': fulfillment_state,
'message': {'contentType': 'PlainText', 'content': message}
}
}
return response
|
ac32dab8bb0d80704ffc84e2e3323f0c7bc34072
| 88,645
|
def is_there_duplicate_id2(df) -> bool:
"""Checks if there are duplicated ID2 values in the provided dataframe
"""
num_unique = len(df["ID2"].unique())
num_rows = len(df)
if(num_unique != num_rows):
return True
else:
return False
|
dbfa175f76bec1665b3438202edb78174a94478f
| 88,650
|
import re
def parse_show_lacp_interface(raw_result):
"""
Parse the 'show lacp interface' command raw output.
:param str raw_result: vtysh raw result string.
:rtype: dict
:return: The parsed result of the show lacp interface command in a \
dictionary of the form:
::
{
'lag_id': '100',
'local_port_id': '17'
'remote_port_id': '0'
'local_port_priority': '1'
'remote_port_priority': '0'
'local_key': '100'
'remote_key': '0'
'local_state': {
'active': True,
'short_time': False,
'collecting': False,
'state_expired': False,
'passive': False,
'long_timeout': True,
'distributing': False,
'aggregable': True,
'in_sync': False,
'neighbor_state': True,
'individual': False,
'out_sync': True
},
'remote_state': {
'active': False,
'short_time': False,
'collecting': False,
'state_expired': False,
'passive': True,
'long_timeout': True,
'distributing': False,
'aggregable': True,
'in_sync': False,
'neighbor_state': False,
'individual': False,
'out_sync': True
},
'local_system_id': '70:72:cf:52:54:84',
'remote_system_id': '00:00:00:00:00:00',
'local_system_priority': '65534',
'remote_system_priority': '0'
}
"""
lacp_re = (
r'Aggregate-name\s*:\s*[lag]*(?P<lag_id>\w*)?[\s \S]*'
r'Port-id\s*\|\s*(?P<local_port_id>\d*)?\s*\|'
r'\s*(?P<remote_port_id>\d*)?\s+'
r'Port-priority\s*\|\s*(?P<local_port_priority>\d*)?\s*\|'
r'\s*(?P<remote_port_priority>\d*)?\s+'
r'Key\s*\|\s*(?P<local_key>\d*)?\s*\|'
r'\s*(?P<remote_key>\d*)?\s+'
r'State\s*\|\s*(?P<local_state>[APFISLNOCDXE]*)?\s*\|'
r'\s*(?P<remote_state>[APFISLNOCDXE]*)?\s+'
r'System-id\s*\|\s*'
r'(?P<local_system_id>([0-9A-Fa-f]{2}[:-]){5}[0-9A-Fa-f]{2})?\s*\|'
r'\s*(?P<remote_system_id>([0-9A-Fa-f]{2}[:-]){5}[0-9A-Fa-f]{2})?\s+'
r'System-priority\s*\|\s*(?P<local_system_priority>\d*)?\s*\|'
r'\s*(?P<remote_system_priority>\d*)?\s+'
)
re_result = re.search(lacp_re, raw_result)
assert re_result
result = re_result.groupdict()
for state in ['local_state', 'remote_state']:
tmp_dict = {
'active': 'A' in result[state],
'short_time': 'S' in result[state],
'collecting': 'C' in result[state],
'state_expired': 'X' in result[state],
'passive': 'P' in result[state],
'long_timeout': 'L' in result[state],
'distributing': 'D' in result[state],
'aggregable': 'F' in result[state],
'in_sync': 'N' in result[state],
'neighbor_state': 'E' in result[state],
'individual': 'I' in result[state],
'out_sync': 'O' in result[state]
}
result[state] = tmp_dict
return result
|
a093c0912742c0bc7e538c8ec59757b370cb24de
| 88,662
|
def build_new_activity_notification(course_name, activity_name, activity_type):
"""
Build the notification message with the variables from the JSON response.
:param activity_type: The type of the activity. Either quiz or assignment.
:type activity_type: str
:param course_name: The name of the course the activity is added to.
:type course_name: str
:param activity_name: The name of the new activity.
:type activity_name: str
:return: The message that will be send.
:rtype: str
"""
message = f'For the course {course_name} the {activity_type} {activity_name} is added.'
return message
|
42384beca4194f4f5f1cf995547b942dd157a124
| 88,663
|
import math
import locale
def num(num):
"""Make numbers pretty with comma separators."""
if math.isnan(num):
num = 0
return locale.format_string("%d", num, grouping=True)
|
e52f1fb585ec0140812e6d6dec6ad930b54180a7
| 88,666
|
import torch
def occupancy_iou(gt, pred):
"""Compute occupancy iou of (batched) torch tensors."""
intersection = ((gt == pred) & (gt > 0)).sum((-1,-2,-3)).float()
union = gt.sum((-1,-2,-3)) + pred.sum((-1,-2,-3)) - intersection
union[union == 0] = 1
intersection[union == 0] = 1
return torch.mean((intersection / union))
|
138792f8c4382d2584d48cda0f8ece6f84c63952
| 88,667
|
def parse_blocks(log_file):
""" Parse log file to return the blocks that were
marked as processed
"""
blocks = []
with open(log_file, 'r') as f:
for line in f:
# get rid of date-time prefix
line = ' '.join(line.split()[2:])
# check if the line marks a block that has passed
if line.startswith('processed block'):
blocks.append(int(line.split()[-1]))
return blocks
|
8837d391dddc827c7eb5c6ad6a3d2a06ea79a5fe
| 88,668
|
from typing import Union
def _make_bool(val: Union[str, bool]) -> bool:
"""Converts a string with true/True/1 values to bool, false otherwise."""
return val in {True, "true", "True", "1"}
|
141f69fb38a1f17008f3185a98e55073438558a2
| 88,670
|
import re
def get_number_in_string(line, pattern=r'\ done\ processing\ event\ \#(\d+)\,'):
"""
Extract a number from the given string.
E.g. file eventLoopHeartBeat.txt contains
done processing event #20166959, run #276689 22807 events read so far <<<===
This function will return 20166959 as in int.
:param line: line from a file (string).
:param pattern: reg ex pattern (raw string).
:return: extracted number (int).
"""
event_number = None
match = re.search(pattern, line)
if match:
try:
event_number = int(match.group(1))
except (TypeError, ValueError):
pass
return event_number
|
ffb323b664fe873dcf2cf75267a0ec3e3390cedf
| 88,671
|
def remove_repetition_of_nonterminals_from_productions(grammar_in_text: str):
""" Remove nonterminal repeats on the left side of the rule
For example:
grammar: S -> a S b
S -> a b
grammar after function execution: S -> a S b | a b
"""
productions = dict()
for production in grammar_in_text.splitlines():
if "->" not in production:
continue
head, body = production.split(" -> ")
if head in productions:
productions[head] += " | " + body
else:
productions[head] = body
grammar_new = str()
for nonterminal in productions:
grammar_new += f'{nonterminal} -> {productions[nonterminal]}\n'
return grammar_new[:-1]
|
043269edc329c909f87b6f1d283bfbc54aa6a2c7
| 88,672
|
def flatten_list(root):
"""Flatten the given list.
All the non-list elements would be aggregated to the root-level,
with the same order as they appear in the original list.
Parameters
----------
root : collections.Iterable[any]
The list to be flatten.
Returns
-------
list[any]
"""
ret = []
try:
stack = list(reversed(root))
except TypeError:
stack = [root]
while stack:
u = stack.pop()
if isinstance(u, list):
for v in reversed(u):
stack.append(v)
else:
ret.append(u)
return ret
|
fa7aea26d8052b5b438d0607a309e5ee42ce12f4
| 88,674
|
def repeat(string, n):
"""
Description
----------
Repeat a string 'n' number of times.
Parameters
----------
string : str - string to repeat
n : int - number of times to repeat
Returns
----------
str - string consisting of param string repeated 'n' times
Example
----------
>>> repeat('abc', 3)
-> 'abcabcabc'
"""
if not isinstance(string, str):
raise TypeError("param 'string' must be a string")
if not isinstance(n, int):
raise TypeError("param 'n' must be an integer: 0 <= n <= sys.maxsize")
if n < 0:
raise ValueError("param 'n' must be in range 0 <= n <= sys.maxsize")
n_str = ""
for _ in range(n):
n_str = n_str + string
return n_str
|
7edef4df6df14fd7b9087771b000c026e1715689
| 88,675
|
from functools import reduce
def myadd(mynumbers:list)->int:
"""
This function adds only even numbers to list
# Input:
mynumbers: list, This is a list of numbers provided by theuser
# Returns:
int: It returns the calculated sum of the integers
# Functionality:
This function takes in list, filters out all the even number using filter and lambda
and then passes that list to reduce function to add all the elements of it, then the result
is returned back.
"""
result = reduce(lambda x,y: x + y ,filter(lambda x: (x % 2 == 0), mynumbers))
return result
|
8cceb9901a978e746a4dae223d5fb5a102be47ba
| 88,677
|
def get_len(csv_path):
"""
get a csv value path and return len of data
:param csv_path:
:return: len of data
"""
counter = 0
with open(csv_path, mode='r') as f:
# pass the header
f.readline()
for line in f:
counter += 1
return counter
|
5042d17a326ce45114f9c219f6625cea066011fc
| 88,678
|
def has_unresected_pair(resected_idx, unresected_imgs, img_adjacency):
"""Return true if resected_idx image has matches to >= 1 currently unresected image(s) """
for idx in unresected_imgs:
if img_adjacency[resected_idx][idx] == 1 or img_adjacency[idx][resected_idx] == 1:
return True
return False
|
036d9eee5fe9e1db3fb270bef56da7ce2f6614b5
| 88,679
|
def mock_called_some_args(mock, *args, **kwargs):
"""Convience method for some mock assertions. Returns True if the
arguments to one of the calls of `mock` contains `args` and
`kwargs`.
:param mock: the mock to check
:type mock: mock.Mock
:returns: True if the arguments to one of the calls for `mock`
contains `args` and `kwargs`.
:rtype: bool
"""
for call in mock.call_args_list:
call_args, call_kwargs = call
if any(arg not in call_args for arg in args):
continue
if any(k not in call_kwargs or call_kwargs[k] != v
for k, v in kwargs.items()):
continue
return True
return False
|
1eab4a0da2b322770ba0bab2da3a3eed60fa9827
| 88,686
|
from typing import Dict
def get_metadata(detections: Dict) -> Dict:
"""
Get the detection metadata.
"""
metadata = {}
metadata["image_width"] = detections["image"]["width"]
metadata["image_height"] = detections["image"]["height"]
metadata["requestId"] = detections["requestId"]
return metadata
|
5f3a4fefb830c3e5afa3fd40864a72e887b72975
| 88,689
|
def validator(c_dict):
"""Takes a chess board configuration and determines whether or not
it is valid.
Args:
c_dict: A dictionary representing a chess board configuration.
Returns:
A boolean indicating whether or not the configuration is valid.
"""
white_types = ["wking", "wqueen", "wbishop", "wknight", "wrook", "wpawn"]
black_types = ["bking", "bqueen", "bbishop", "bknight", "brook", "bpawn"]
if len(c_dict) > 32:
return False
for key in c_dict:
if (not 1 <= int(key[0]) <= 8) and (not "a" <= key[1] <= "h"):
return False
white_count = dict.fromkeys(white_types, 0)
black_count = dict.fromkeys(black_types, 0)
for value in c_dict.values():
if value not in white_types and value not in black_types:
return False
if value[0] == "w":
white_count[value] += 1
if value[0] == "b":
black_count[value] += 1
if white_count["wking"] > 1 or black_count["bking"] > 1:
return False
if white_count["wpawn"] > 8 or black_count["bpawn"] > 8:
return False
w_sum = 0
b_sum = 0
for value in white_count.values():
w_sum += value
for value in black_count.values():
b_sum += value
if w_sum > 16 or b_sum > 16:
return False
return True
|
ea76ca1379aebb660df5c7c3bdade927a87ce047
| 88,695
|
import torch
def calc_planar(a_coord: torch.Tensor, b_coord: torch.Tensor,
c_coord: torch.Tensor) -> torch.Tensor:
"""
Calculate a planar angle between tensors of coords
"""
v1 = a_coord - b_coord
v2 = c_coord - b_coord
a = (v1 * v2).sum(-1)
b = v1.norm(dim=-1) * v2.norm(dim=-1)
planar = torch.acos(a / b)
return planar
|
c3e4a18cf58ce7f22fc0879383b11c51099df470
| 88,699
|
def user_input(prompt_string='input', ch='> '):
"""Prompt user for input
- prompt_string: string to display when asking for input
- ch: string appended to the main prompt_string
"""
try:
return input(prompt_string + ch)
except (EOFError, KeyboardInterrupt):
print()
return ''
|
00c27a121d9087408df824f00a879c367638adc3
| 88,704
|
def parse(resp):
"""Decode HTTP response to JSON"""
try:
resp = resp.json()
except ValueError as error:
raise RuntimeError('Decoding JSON has failed') from error
if 'statusCode' in resp or 'status-code' in resp or 'error' in resp:
raise RuntimeError(resp) # 'error' in resp seems appear with RPC only
return resp
|
aa93e4337550670a1542c750233bc42525dfd94f
| 88,705
|
def dicesum(throw):
"""
Returns the sum of all dices in a throw
"""
return sum([int(i) for i in throw])
|
a1284649b18a5fbb67f2b9b68e755437feaa3375
| 88,708
|
def _preprocess_text(text):
"""
Prepare a string for syntaxic treatment (encode to utf-8, remove
leading/trailing whitespace, etc...).
"""
return text.encode('utf-8').strip(' \t\r\n').lower()
|
110ed1107bef4f6458b1acc1851a3f376a6047ef
| 88,710
|
def error(state, desire):
"""
Computes the instantaneous reference tracking-error vector.
"""
return desire - state
|
7e2f97916b2f85651cc14ffb55e93c208097cd9d
| 88,718
|
from pathlib import Path
def hash_from_path(path: Path) -> str:
"""Returns hash from a given path.
Simply removes extension and folder structure leaving the hash.
Args:
path: path to get hash from
Returns:
Hash reference for file.
"""
return path.with_suffix('').name
|
89ab2383f042fb31bd27adc759241b0eae2032a0
| 88,721
|
def check_ciphers(fw, service):
"""
Check if ciphers are already set and if so add them to a list
Parameters
----------
fw : Firewall
A PanDevice for firewall
service : list
A list of strings containing the SSH services
Returns
-------
set_ciphers_list : list
A list of strings containing the ciphers already set on the firewall
"""
print('\nChecking {} ciphers...\n'.format(service))
base_xpath = ("/config/devices/entry[@name='localhost.localdomain']"
"/deviceconfig/system/ssh/ciphers/{}".format(service))
results = fw.xapi.get(xpath=base_xpath)
xml_list = results.findall('./result/{}/'.format(service))
set_ciphers_list = []
for item in xml_list:
set_ciphers_list.append(item.tag)
return set_ciphers_list
|
5c12348bce4866d270ae4e6d8b54ad52fb7c18cf
| 88,722
|
def _make_help_call(target, esc, lspace, next_input=None):
"""Prepares a pinfo(2)/psearch call from a target name and the escape
(i.e. ? or ??)"""
method = 'pinfo2' if esc == '??' \
else 'psearch' if '*' in target \
else 'pinfo'
arg = " ".join([method, target])
if next_input is None:
return '%sget_ipython().magic(%r)' % (lspace, arg)
else:
return '%sget_ipython().set_next_input(%r);get_ipython().magic(%r)' % \
(lspace, next_input, arg)
|
5922736b1a817a6b4f5ac64d6e504fd81cfbdfbc
| 88,724
|
def calc_gene_length(file_path):
"""Read in a FASTA file and calculate sequence length.
Assumes a typical one line header for a FASTA file.
**Parameters**
file_path : str
Path to FASTA file
**Returns**
seq_len : int
length of gene
"""
with open(file_path) as handle:
handle.readline() # skip FASTA header
seq = handle.read() # read file into single string
seq = seq.replace('\n', '') # remove line breaks
seq_len = len(seq)
return seq_len
|
f95504a4aebd8b4a81373aaa7a342a98a5579068
| 88,726
|
def num_executors(spark):
""" Get the number of executors configured for Jupyter
Returns:
Number of configured executors for Jupyter
"""
sc = spark.sparkContext
return int(sc._conf.get("spark.executor.instances"))
|
d12704ed4d2fd148567eac227c090855a1a8e516
| 88,728
|
async def async_create_cloudhook(cloud):
"""Create a cloudhook."""
websession = cloud.hass.helpers.aiohttp_client.async_get_clientsession()
return await websession.post(
cloud.cloudhook_create_url, headers={
'authorization': cloud.id_token
})
|
b45a6896f8b56cfd2c123e7dbd30a91e0d973a39
| 88,729
|
import typing
def _retrieve_json(json_path: typing.List[str], obj: typing.Dict[str, typing.Any]) -> typing.Any:
"""Retrieve object from nested dicts."""
if json_path:
return _retrieve_json(json_path[1:], obj[json_path[0]])
else:
return obj
|
466db64716b6ba74028098c0e8812cd73cebf9d6
| 88,731
|
def get_literals(cnf):
"""Extract the literals from a cnf formula
Parameters
----------
cnf :list[set[(string,bool)]]
The cnf from wich we want to extract the literla
Returns
-----
set[str]
set of the literals in the cnf
"""
literals = set()
for conj in cnf:
for disj in conj:
literals.add(disj[0])
return literals
|
8f4cbf70bc823858df17e03530e0d0b861b64918
| 88,733
|
def list_towers(plugin):
"""
Lists all the registered towers. The given information comes from memory, so it is summarized.
Args:
plugin (:obj:`Plugin`): this plugin.
Returns:
:obj:`dict`: a dictionary containing the registered towers data.
"""
towers_info = {"towers": []}
for tower_id, tower in plugin.wt_client.towers.items():
values = {k: v for k, v in tower.to_dict().items() if k not in ["pending_appointments", "invalid_appointments"]}
pending_appointments = [appointment.get("locator") for appointment, signature in tower.pending_appointments]
invalid_appointments = [appointment.get("locator") for appointment, signature in tower.invalid_appointments]
values["pending_appointments"] = pending_appointments
values["invalid_appointments"] = invalid_appointments
towers_info["towers"].append({"id": tower_id, **values})
return towers_info
|
daa2d0131264aa62efa02e62eea8bc1476f2630c
| 88,737
|
def memory2int(memory):
"""Internal helper function to convert Kubernetes memory amount to integers."""
multiplier = 1
if memory.endswith("Ki"):
multiplier = 1024
elif memory.endswith("Mi"):
multiplier = 1024 ** 2
elif memory.endswith("Gi"):
multiplier = 1024 ** 3
return int(memory[:-2]) * multiplier
|
f9d8355084d60bea846941a2d01203f7a1bce1de
| 88,739
|
def add_res(func, t, f):
"""Increment t or f by func bool."""
return [t+1, f] if func else [t, f+1]
|
454ab72d56a0af16cf2ef1688adc83ff5b53fc7d
| 88,740
|
def is_started_with(src, find, ignore_case=True):
"""Verifies if a string content starts with a specific string or not.
This is identical to ``str.startswith``, except that it includes
the ``ignore_case`` parameter.
Parameters
----------
src : str
Current line content.
find : str
Current line content.
ignore_case : bool, optional
Case sensitive or not. The default value is ``True``.
Returns
-------
bool
``True`` if the src string starts with the pattern.
"""
if ignore_case:
return src.lower().startswith(find.lower())
return src.startswith(find)
|
34a7e61b296a19d1cd813d073a7148dac8e9e14e
| 88,743
|
def compressArrays(mask, *arrays):
"""Compress all input arrays using the supplied mask, a.compress(mask), and return them
in the same order as input.
"""
return [a.compress(mask) for a in arrays]
|
4c3cb7376e61e422ab1ea80db7e5820080885deb
| 88,744
|
def batch_flatten(tensor):
"""Flattens a tensor except for the batch dimension."""
return tensor.view(tensor.size(0), -1)
|
396e3f3113003bcd80dc1a8dec351f66f679cdee
| 88,746
|
def aln_resnums_inserts(record, aln, offset):
"""Return two lists: residue numbers for model columns; inserts."""
aln_resnums = []
aln_inserts = []
in_insert = False
del_len = 0
curr_ins_start = None
for i, c in enumerate(aln):
if c.islower():
if not in_insert:
# Start of a new insert region
curr_ins_start = offset + i + 1 - del_len
in_insert = True
continue
if in_insert:
# End of the current insert region
aln_inserts.append((curr_ins_start, offset + i - del_len))
in_insert = False
if c.isupper():
# Match position
aln_resnums.append((c, offset + i - del_len + 1))
elif c == '-':
# Deletion position
aln_resnums.append(('-', None))
del_len += 1
else:
raise ValueError("Unexpected character '%s'" % c)
return aln_resnums, aln_inserts
|
d00782137c328d641fcfa5fbf1ae3413185a0dfc
| 88,747
|
def R11_2_d11(R11, SRM_ratio=4.04367):
"""
Convert Ratio to Delta notation.
Default SRM_ratio is NIST951 11B/10B
"""
return (R11 / SRM_ratio - 1) * 1000
|
0e18a90e3a82279895b7472c9edf9646d95748a7
| 88,748
|
def get_orgname(recIDstr: str) -> str:
"""
Splits a faster header by \s and '_' to return orgname
"""
orgname = recIDstr.split()[0].split('_')[1]
return(orgname)
|
248e1318dbba50b62455098043fe6ec8e5db942c
| 88,750
|
import ast
def augop_to_str(op):
"""
convert augmented assignment classes to strings
"""
op_to_str_map = {
ast.Add: "+=",
ast.Mult: "*=",
ast.Div: "/=",
ast.Sub: "-="
}
return op_to_str_map[type(op)]
|
5c9dda249cbe4358d717500ac96fae34a0e2d69e
| 88,753
|
def bonferroni_correction(scaffoldgraph, crit):
"""Returns bonferroni corrected significance level for each hierarchy.
Parameters
----------
scaffoldgraph : ScaffoldGraph
A ScaffoldGraph object to query.
crit : float
The critical significance value to apply bonferroni correction at
each scaffold hierarchy.
Returns
-------
dict
A dictionary containing the corrected critical significance value
at each scaffold hierarchy {hierarchy: crit}.
"""
hier = scaffoldgraph.get_hierarchy_sizes()
return {k: crit / v for k, v in hier.items()}
|
03ddb5e0b4d3f407d5c00793dca90d5ae9f08e8e
| 88,759
|
def beat_strength(notes):
"""
The strength of the note corresponds to where it lies relative to 1,1/2,1/4 notes...
This corresponds to Temperley's HPR 2
Args:
notes: the notes to consider (from the ChordSpan)
Returns:
int: the score obtained
"""
val = 0
# iterate through all notes in the ChordSpan
for note in notes:
if note.start % 32 == 0:
val += 3
elif note.start % 16 == 0:
val += 2
elif note.start % 8 == 0:
val += 1
else:
val += 0
return val
|
48b3e14275bbf3b472f8568321acb8e5faace55f
| 88,763
|
import io
import wave
def raw_bytes_to_wav(data: bytes, frame_rate: int, channels: int, sample_width: int) -> bytes:
"""
Convert raw PCM bytes to wav bytes (with the initial 44 bytes header)
"""
out = io.BytesIO()
wf = wave.open(out, "wb")
wf.setnchannels(channels)
wf.setsampwidth(sample_width)
wf.setframerate(frame_rate)
wf.writeframes(data)
wf.close()
return out.getvalue()
|
bfda3c142083f97f7d892a6b1401deea1b9ab015
| 88,766
|
def parse_score(input_score):
"""Parses the score provided by player to return a tuple of (bulls, cows)
Parameters:
score (str): Score given input by the user
Returns:
tuple(int):Returning tuple of (bulls, cows)
"""
input_score = input_score.strip().split(',')
return tuple(int(s.strip()) for s in input_score)
|
c60538f23a8f2ac3f8f61338db5abd0c21303c9b
| 88,767
|
def format_multiple_values(value):
"""Reformat multi-line key value for PDS3 labels.
For example if the ``MAKLABEL`` key value has multiple entries, it needs to
be reformatted.
:param value: PDS3 key value
:type value: str
:return: PDS3 key value reformatted
:rtype: str
"""
if ',' in value:
values = value.split(',')
value = "{\n"
for val in values:
value += f"{' ' * 31}{val},\n"
value = value[:-2] + '\n' + ' ' * 31 + "}\n"
return value
|
f52279b25fd36adf77317e284649e78a79d1d4d2
| 88,770
|
def get_wanted_position(prefix='', rconn=None):
"""Return wanted Telescope RA, DEC as two floats in degrees
On failure returns None"""
if rconn is None:
return
try:
wanted_ra = float(rconn.get(prefix+'wanted_ra').decode('utf-8'))
wanted_dec = float(rconn.get(prefix+'wanted_dec').decode('utf-8'))
except:
return
return wanted_ra, wanted_dec
|
27a7eeeeeda503898a50e1df8aa37e736d0c616a
| 88,780
|
def percentile(data):
"""Convert an iterable to a percentile of min/max known from 0-1.
Args:
data (iterable[float]): like (1, 10, 100, 50, 100, 50)
Returns:
list[float]: like (0.01, 0.1, 1.0, 0.5, 1.0, 0.5)
"""
vmax = max(data)
vmin = min(data)
return [((item - vmin) / (vmax - vmin)) for item in data]
|
efffb52adaaebfd429237ca3ca339500bce00b96
| 88,781
|
import requests
def _make_http_response(code=200):
"""A helper for creating HTTP responses."""
response = requests.Response()
response.status_code = code
return response
|
10841ddb4e22fcd352a9925a6e206f1da03aec7d
| 88,786
|
import torch
def normalize_point_batch_to_sphere(pc: torch.Tensor, NCHW=True):
"""
normalize a batch of point clouds
:param
pc [B, N, 3] or [B, 3, N]
NCHW if True, treat the second dimension as channel dimension
:return
pc normalized point clouds, same shape as input
centroid [B, 1, 3] or [B, 3, 1] center of point clouds
furthest_distance [B, 1, 1] scale of point clouds
"""
point_axis = 2 if NCHW else 1
dim_axis = 1 if NCHW else 2
centroid = torch.mean(pc, dim=point_axis, keepdim=True)
pc = pc - centroid
furthest_distance, _ = torch.max(
torch.sqrt(torch.sum(pc ** 2, dim=dim_axis, keepdim=True)), dim=point_axis, keepdim=True)
pc = pc / furthest_distance
return pc, centroid, furthest_distance
|
5be7aee1886fd9a66d0cc5bd6373a3baf5935690
| 88,791
|
def determine_result_id(event):
"""
This function determines the result of an event
Args:
event (pd.Series): Wyscout event Series
Returns:
int: result of the action
"""
if event["offside"] == 1:
return 2
elif event["type_id"] == 2: # foul
return 1
elif event["goal"]: # goal
return 1
elif event["own_goal"]: # own goal
return 3
elif event["subtype_id"] in [100, 33, 35]: # no goal, so 0
return 0
elif event["accurate"]:
return 1
elif event["not_accurate"]:
return 0
elif (
event["interception"] or event["clearance"] or event["subtype_id"] == 71
): # interception or clearance always success
return 1
elif event["type_id"] == 9: # keeper save always success
return 1
else:
# no idea, assume it was successful
return 1
|
25635ccaef8b8b38eb83e20c0e56030ae6a83e41
| 88,804
|
def map_range(
value: float, in_min: float, in_max: float, out_min: float, out_max: float
) -> float:
"""Map a value in one range to another."""
if in_max - in_min <= 0.0:
raise ValueError("invalid input range")
if out_max - out_min <= 0.0:
raise ValueError("invalid output range")
if value < in_min or value > in_max:
raise ValueError("input value out of range")
return (value - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
|
01e05906e32d070a39b202bce1bf3d6446286179
| 88,805
|
def data_stack_elem(index=None):
"""Symbolic accessor for the data stack"""
return 'D', index
|
ef38923362f42e6716f7fbdb81521a1e0b47acdc
| 88,812
|
def SqlServerAuditConfig(sql_messages, bucket=None):
"""Generates the Audit configuration for the instance.
Args:
sql_messages: module, The messages module that should be used.
bucket: string, the GCS bucket name.
Returns:
sql_messages.SqlServerAuditConfig object.
"""
config = sql_messages.SqlServerAuditConfig(bucket=bucket)
return config
|
a7acd25eb093cf4c2fcbc4834f0c480bf0a4e306
| 88,815
|
def putere(numar):
"""Funcție ce determină dacă un număr este putere a lui 2."""
# O altă manieră de rezolvare ar putea fi:
# return (numar & numar - 1) == 0
numar_binar = "{0:b}".format(numar)
return numar_binar.count("1") == 1
|
489a9c51f417b618ef4c1585a9235b23b6aa29b9
| 88,816
|
def lat_dict_to_list(dct: dict) -> list:
"""Make a dictionary of lattice information to a 6-vector [a, b, c, alpha, beta, gamma]."""
return [
dct[k] for k in ("a", "b", "c", "alpha", "beta", "gamma")
]
|
eb1582f92b68def1956d3bf94b0756bec9b2a31e
| 88,818
|
def fit_format(guess):
"""
This function tells whether the guess input fit the format (one alphabet only)
:param guess: str, one alphabet.
:return: 0 or 1: 0, reject, the guess didn't fit the format; 1, pass, the guess fit the format
"""
if len(guess) > 1:
return 0
else:
if guess.isalpha():
return 1
else:
return 0
|
a8198b9f0c1cc2609665ea8b8c281919d1470e27
| 88,829
|
def _validate_kv(kv):
"""
check for malformed data on split
Args:
kv (list): List of key value pair
Returns:
bool: True if list contained expected pair and False otherwise
"""
if len(kv) == 2 and "" not in kv:
return True
return False
|
1deb8a66ba436bc230e144bcc0654f69595d75e4
| 88,830
|
import torch
from typing import Optional
def cross_entropy_loss(
probabilities: torch.Tensor,
target: torch.Tensor,
pad_idx: Optional[int] = None,
reduction: Optional[str] = "mean",
eps: float = 1e-7,
) -> torch.Tensor:
"""Calculate cross entropy loss
:param probabilities: [batch size; n classes] batch with logits
:param target: [batch size; max classes] batch with padded class labels
:param pad_idx: id of pad label
:param reduction: how reduce a batch of losses, `None` mean no reduction
:param eps: small value to avoid `log(0)`
:return: loss
"""
gathered_logits = torch.gather(probabilities, 1, target)
if pad_idx is not None:
pad_mask = target == pad_idx
gathered_logits[pad_mask] = 1
batch_loss = -(gathered_logits + eps).log().sum(-1)
if reduction is None:
return batch_loss
elif reduction == "mean":
return batch_loss.mean()
else:
raise ValueError(f"Unknown reduction: {reduction}")
|
06d98f3e6df8e7994b7aee929fa8ca440b634171
| 88,833
|
def tokenize_text(tokenizer, text, return_sentence=False):
"""
Method to tokenise text using given tokenizer
parameters
-----------
:param tokenizer: object
NLTK tokenizer
:param text: str
String which need to be tokenised
:param return_sentence: boolean, optional
Boolean to indicate the output type.
True - return the tokenised text as a sentence/string. Tokens are appended using spaces.
False - returns tokens as a list
:return: str or list
Tokenised text
Return type depends on the 'return_sentence' argument. Default is a list.
"""
tokens = tokenizer.tokenize(text)
if return_sentence:
return " ".join(tokens)
else:
return tokens
|
863f9e19b891baed40a6858c338ef2084e3398c2
| 88,834
|
def fake_user(**kwargs):
"""Fixture to return a dictionary representing a user with default values set."""
kwargs.setdefault("id", 43)
kwargs.setdefault("name", "bob the test man")
kwargs.setdefault("discriminator", 1337)
kwargs.setdefault("avatar_hash", None)
kwargs.setdefault("roles", (666,))
kwargs.setdefault("in_guild", True)
return kwargs
|
de688eca8b90b94fd90b7f7a1c8fb8ebbc545283
| 88,835
|
def get_countries(market):
"""
Get a comma-separated list of countries the Market's serves
"""
country_list = list(set([str(country) for country in market.countries_served.all()]))
country_list.sort(key=lambda country: country)
return ", ".join(set(country_list))
|
9ed65c7244a171fb288f834042f7c2da86f7b9d5
| 88,838
|
def convert_angle(angle: int):
""" Convert any angle to %360
Args:
angle (int): An angle
Returns:
[int]: The angle
"""
if (angle < 0):
angle += 360
return angle % 360
|
939ddd9f6f9b4607e6169e79f73398ed8dc3f2e8
| 88,839
|
def GetGOSlims(infile):
"""
returns a map of go identifiers to slim categories
Input is the output of Chris Mungal's map2slim.pl.
"""
go2go = {}
for line in infile:
if line[:len("part_of")] == "part_of":
continue
mapped, parents = line.split("//")
go, goslims = mapped.split("=>")
goslims = goslims.split(" ")
if len(goslims) == 0:
continue
go2go[go.strip()] = filter(
lambda x: len(x), map(lambda x: x.strip(), goslims))
return go2go
|
693b640686747c3b59e6e482b6017ccd1f2ab7f8
| 88,845
|
def as_geometry_list(geometry, singletype, multitype):
"""Convenience method to return a list with one or more
Polygons/LineString/Point from a given Polygon/LineString/Point
or MultiPolygon/MultiLineString/MultiPoint.
Parameters
----------
polygon : list or Polygon or MultiPolygon
Object to be converted
Returns
-------
list
list of Polygons
"""
if isinstance(geometry, singletype):
return [geometry]
elif isinstance(geometry, multitype):
return [p for p in geometry]
elif isinstance(geometry, list):
lst = []
for item in geometry:
lst.extend(as_geometry_list(item, singletype, multitype))
return lst
else:
raise TypeError(f'Expected {singletype} or {multitype}. Got '
f'"{type(geometry)}"')
|
0e28a55caeb273453fe2dcb53d8f77755bf8bc33
| 88,847
|
def _parsed_typed_object(paragraph):
"""
Parses a typed-object description like ``name (type) -- description``.
:param docutils.nodes.paragraph paragraph:
:return: :class:`dict` containing the name, type, and description
as strings or an empty :class:`dict`
:rtype: dict
"""
type_map = {
'str': 'string',
'string': 'string',
'int': 'number',
'integer': 'number',
'double': 'number',
'float': 'number',
'object': 'object',
'dict': 'object',
'array': 'array',
'list': 'array',
'bool': 'boolean',
}
name = paragraph[0].astext()
if paragraph[1].astext() == ' (':
t = type_map.get(paragraph[2].astext()) or 'string'
desc_start = 5
else:
t = 'string'
desc_start = 2
description = '\n\n'.join(n.astext().replace('\n', ' ')
for n in paragraph[desc_start:])
return {
'name': name,
'type': t,
'description': description,
}
|
1c1dc8c5bf73018af991fc550fcf142021b861a3
| 88,848
|
def read_expected_output(test):
""" Read the expected stdout output expected for Test 'test',
from the file test.output. Returns a list of strings, one per
line, or an empty list if the file does not exist. """
try:
outputfile = open(test.output)
except IOError:
expected_output = []
else:
expected_output = outputfile.readlines()
outputfile.close()
return expected_output
|
c87576f732947ace0464b6b18ce1b41ea160616b
| 88,851
|
def depth2meters(dimg):
"""
Takes an ndarray of raw depth values from the RealSense SR300 and returns
a new array with depth values in meters
Inputs
------
dimg (ndarray)
Outputs
-------
res (ndarray): same dimensions as input
"""
return 0.00012498664727900177*dimg
|
3a90204ce921d025885706af01c6cafc250f8c3e
| 88,853
|
def _for_id(sid):
"""Return a string specifying the object id for logger messages."""
return " for '{0}'".format(sid)
|
2e479c2c50c1b44e204e1522d58e2b3976fb86f1
| 88,854
|
def _intervalOverlap(a, b):
""" Return overlap between two intervals
Args:
a (list or tuple): first interval
b (list or tuple): second interval
Returns:
float: overlap between the intervals
Example:
>>> _intervalOverlap( [0,2], [0,1])
1
"""
return max(0, min(a[1], b[1]) - max(a[0], b[0]))
|
ccdf41e5a2fb63fbb1635a3e43579cf0daed7665
| 88,859
|
def _normalizePath(path):
"""Normalize a path by resolving '.' and '..' path elements."""
# Special case for the root path.
if path == u'/':
return path
new_segments = []
prefix = u''
if path.startswith('/'):
prefix = u'/'
path = path[1:]
for segment in path.split(u'/'):
if segment == u'.':
continue
if segment == u'..':
new_segments.pop() # raises IndexError if there is nothing to pop
continue
if not segment:
raise ValueError('path must not contain empty segments: %s'
% path)
new_segments.append(segment)
return prefix + u'/'.join(new_segments)
|
d4241cd6eac033c3cda023da5cac134b4ae5fe4e
| 88,866
|
def export_stage(stage, file_path):
"""
Exports given stage into file path
You can pass a different extension to transition between different serialization formats
:param stage: Usd.Stage
:param file_path: str
:return:
"""
return stage.Export(file_path)
|
3d40ae96b8ea7406bd847e97df78fc37ca1e8d2e
| 88,870
|
def select_roi(cur, roi_id):
"""Selects an ROI from the ID.
Args:
cur: Connection's cursor.
roi_id: The ID of the ROI.
Returns:
The ROI.
"""
cur.execute("SELECT * FROM rois WHERE id = ?", (roi_id, ))
row = cur.fetchone()
return row
|
96814bd0d01fc020a197a34c6d17fb3970049f20
| 88,873
|
def is_header(file):
"""Returns whether the given file is a header (i.e. not a directory or the modulemap file)."""
return not file.is_dir() and not file.name == 'module.modulemap'
|
4eec4a4a86508ad89ae8246630131a77b6c8ea9f
| 88,877
|
import re
def _parse_keyvals(keyvals):
"""Parse the key=val pairs from the newline-separated string.
:param keyvals: Newline-separated string with key=val pairs
:rtype: Dict of key=val pairs.
"""
re_keyval = re.compile(r'([\w_]+) \s* = \s* (.*)', re.VERBOSE)
keyvalout = {}
for keyval in keyvals:
m = re.search(re_keyval, keyval.strip())
if m:
key, val = m.groups()
keyvalout[key] = val
return keyvalout
|
4e2f5b48274ac5b3176c754ece4b80b2e2964dc6
| 88,878
|
def _map_tensor(functions, tensors):
"""
Apply the composition of all functions to all given tensors. If a tensor
is None, it remains as None.
:param functions: iterable collection of functions. Each must take a
tensor and return a tensor of the same size. The first function is
applied first.
:param tensors: iterable collection of tensors.
:return: tuple of tensors with identical shapes to input.
"""
new_tensors = []
for tensor in tensors:
if tensor is None:
new_tensors.append(None)
else:
for fn in functions:
tensor = fn(tensor)
new_tensors.append(tensor)
return tuple(new_tensors)
|
88748acf9048d50e5c1c4f33151ecaeb8c737deb
| 88,882
|
def board_people(state, goal):
"""Boards all passengers that can and should be boarded on this floor"""
arrivals = []
for origin in state.origin.items():
passenger = origin[0]
if passenger in goal.served and goal.served[passenger]:
floor = origin[1]
if state.lift_at == floor and not state.boarded[passenger] and not state.served[passenger]:
arrivals.append(('board', passenger, state.lift_at))
return arrivals
|
878607e4c19688399d6cecfe161de9a91dc4ae5f
| 88,883
|
def _user_serializer(user):
"""Returns a dictionary representing a user, for a JSON response"""
return {
"username": user.username,
"name": user.get_full_name(),
}
|
29a9299168888ab41d8a481753e01ebcfcf40081
| 88,885
|
def cre_sw(swup_toa, swup_toa_clr):
"""Cloudy-sky TOA net shortwave radiative flux into atmosphere."""
return -1*swup_toa + swup_toa_clr
|
ecef93235bb984301b64b03ae471fe2832f566a4
| 88,886
|
def get_manifest(drs_id, version, connection):
"""
Retrieve the filenames, sizes and checksums of a dataset.
This function will raise ValueError if more than one dataset is found
matching the given drs_id and version on a search without replicas.
The connection should be either distrib=True or be connected to a suitable
ESGF search interface.
:param drs_id: a string containing the DRS identifier without version
:param version: The version as a string or int
"""
if isinstance(version, int):
version = str(version)
context = connection.new_context(drs_id=drs_id, version=version)
results = context.search()
if len(results) > 1:
raise ValueError("Search for dataset %s.v%s returns multiple hits" %
(drs_id, version))
file_context = results[0].file_context()
manifest = {}
for file in file_context.search():
manifest[file.filename] = {
'checksum_type': file.checksum_type,
'checksum': file.checksum,
'size': file.size,
}
return manifest
|
9f7c9c77b350881a8d1ece7018dc1d19db426bc7
| 88,887
|
def divide(n1, n2):
"""Take two numbers and divide them together"""
return n1 / n2
|
7be7cae60ef7de1c9c66467507005c8d3df5e8e0
| 88,889
|
import struct
def get_array_of_string(num, length, data):
"""Read subset of data as an array of strings
Parameters
----------
num : int
Number of entries in the array
length : int
Length of the strings in the array
data : str
4C binary file
Returns
-------
str
Truncated 4C binary data file
list
List of strings from data file
"""
results = []
pos = 0
for i in range(num):
val = struct.unpack('{}s'.format(length),
data[pos:pos + length])[0].decode()
results.append(val.strip())
pos += length
new_data = data[pos:]
return new_data, results
|
073722515cc4797e4039ad6b478582be9c15bf0c
| 88,892
|
def protein_coding_gene_ids(annotation):
"""Filter GTF just protein coding genes
"""
entry_type = (annotation['type'] == 'gene')
gene_type = (annotation['gene_type'] == 'protein_coding')
return annotation[entry_type & gene_type]['gene_id']
|
e603ef8d3a02d5fd98256d6d922ee8e9e01fe4f3
| 88,893
|
def create_multiple_queries_statement(condition='should'):
"""
>>> create_multiple_queries_statement()
{'query': {'bool': {'should': []}}}
>>> create_multiple_queries_statement(condition='must')
{'query': {'bool': {'must': []}}}
"""
return {'query': {'bool': {condition: []}}}
|
1e8741be84512b40f5c618871d942a770675743e
| 88,894
|
def mkey(layer, argument_name):
"""Gets key for NeuralNet matrix dict
Arguments:
layer {int} -- layer number
argument_name {str} -- one of the dictionary keys KEY_* in neural_net
Returns:
str -- dict key for NeuralNet.matrices
"""
return 'layer' + str(layer) + ' ' + argument_name
|
d774cf8e238053d99ca683a9b85dce49d24a0c99
| 88,896
|
def _wind_height_adjust(uz, zw):
"""Wind speed at 2 m height based on full logarithmic profile (Eq. 33)
Parameters
----------
uz : ee.Image or ee.Number
Wind speed at measurement height [m s-1].
zw : ee.Image or ee.Number
Wind measurement height [m].
Returns
-------
ee.Image or ee.Number
Wind speed at 2 m height [m s-1].
Notes
-----
u2 = uz * 4.87 / log(67.8 * zw - 5.42)
"""
return uz.multiply(4.87).divide(zw.multiply(67.8).subtract(5.42).log())
|
13532101dc54f4af3f2fe7f104859a35af621e91
| 88,897
|
import re
def url_encode_non_ascii(href):
"""URL-encode non ascii characters."""
function = lambda c: '%%%02x' % ord(c.group(0))
return re.sub('[\x80-\xFF]', function, href)
|
7e7731a54704f2298112526a937973811d92bebb
| 88,903
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.