content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import os
def generate_doxygen_report(branch_name, src_dir):
"""
Given a branch name and the source directory of the project this function generates a doxygen summary for documented code.
"""
doc_summary_file = f"doc-summary-{branch_name}.info"
# Produce coverxygen report to produce the coverage report using lcov
coverxygen_cmd = f"python3 -m coverxygen --xml-dir {src_dir}/docs/xml --src-dir {src_dir} --output doc-coverage-{branch_name}.info --scope all --kind all"
os.system(coverxygen_cmd)
# Produce coverxygen report to produce the summary report
coverxygen_cmd = f"python3 -m coverxygen --xml-dir {src_dir}/docs/xml --src-dir {src_dir} --output {doc_summary_file} --scope all --kind all --format summary"
os.system(coverxygen_cmd)
curr_branch_doc_ratio = 0.0
summary_last_line_tokens = []
# Read last summary line and extract the numbers from the fraction
with open(doc_summary_file, "r") as summary_file:
read_last_line = summary_file.readlines()[-1]
summary_last_line_tokens = (
read_last_line.rstrip().split(" ")[-1][1:-1].split("/")
)
curr_branch_doc_ratio = float(summary_last_line_tokens[0]) / float(
summary_last_line_tokens[1]
)
print(f"Branch {branch_name} ratio {curr_branch_doc_ratio}")
return curr_branch_doc_ratio
|
843b1d81b22e8f75e1062654c6e3cf59fc1f297e
| 15,832
|
def get_new_lp_file(test_nr):
""" Get name of new LP file """
return "test/{0}-new.lp".format(test_nr)
|
f7527a053a640d080f3aecd9db24623d5563b250
| 15,833
|
def tokenize(tokenizer, data, max_length = 128):
"""
Iterate over the data and tokenize it. Sequences longer than max_length are trimmed.
:param tokenizer: tokenizer to use for tokenization
:param data: list of sentences
:return: a list of the entire tokenized data
"""
tokenized_data = []
for sent in data:
tokens = tokenizer.encode(sent, add_special_tokens=True)
# keeping a maximum length of bert tokens: 512
tokenized_data.append(tokens[:max_length])
return tokenized_data
|
30b3786c1299bc42cd2698eae83ce1c6bdc3cfbe
| 15,834
|
import torch
def empty_like(input_, dtype):
"""Wrapper of `torch.empty_like`.
Parameters
----------
input_ : DTensor
Input dense tensor.
dtype : data-type
Data type of output.
"""
return torch.empty_like(input_._data, dtype=dtype)
|
6f2a8b5392cb854fd7bb782fe8de0766653bb3b5
| 15,835
|
import logging
def find_log_path(lg):
"""
Find the file paths of the FileHandlers.
"""
out = []
for h in lg.handlers:
if isinstance(h, logging.FileHandler):
out.append(h.baseFilename)
return out
|
efab0cb7eafd0491e1365224dc83f816c7bb1b51
| 15,836
|
import requests
def send_proms_pingback(pingback_target_uri, payload, mimetype='text/turtle'):
"""
Generates and posts a PROMS pingback message
:param pingback_target_uri: a URI, to where the pingback is sent
:param payload: an RDF file, in one of the allowed formats and conformant with the PROMS pingback message spec
:param mimetype: the mimetype of the RDF file being sent
:return: True or an error message
"""
headers = {'Content-Type': mimetype}
# send the post
try:
r = requests.post(pingback_target_uri, data=payload, headers=headers)
result = (r.status_code == 201)
if result:
return [True, r.content]
else:
return [False, r.content]
except Exception as e:
print(str(e))
return [False, str(e)]
|
09ecff1835352f08e7fc5f4bce545585798e688c
| 15,838
|
import re
def expand_zoo_variables(custom_env, string_to_process):
"""
развернуть в строке переменные окружения zoo
поискать в строке '%KEY%' если есть то заменить на соответсвующее значение из словаря
и так для каждого ключа в словаре
:param string_to_process:
:return:
"""
result = string_to_process
Accum = ""
for i in re.finditer("%([^%]+)%", string_to_process):
env_key = i.group(1)
to_replace = i.group(0)
if env_key in custom_env:
value = custom_env[env_key]
result = result.replace(to_replace, value)
return result
|
14064b112f3c46f4c22f3959aeff5cd6d02e3634
| 15,839
|
import torch
def estimate_grad_moments(xloader, network, criterion, steps=None):
"""Estimates mean/sd of gradients without any training steps inbetween - this gives the total_XXX kind of estimators which are only evaled once per epoch on the whole dataset"""
with torch.set_grad_enabled(True):
network.eval()
for step, (arch_inputs, arch_targets) in enumerate(xloader):
if steps is not None and step >= steps:
break
arch_targets = arch_targets.cuda(non_blocking=True)
# prediction
_, logits = network(arch_inputs.cuda(non_blocking=True))
arch_loss = criterion(logits, arch_targets)
arch_loss.backward()
mean_grads = [p.grad.detach().clone() for p in network.parameters() if p.grad is not None]
network.zero_grad()
second_central_moment = []
for step, (arch_inputs, arch_targets) in enumerate(xloader):
if steps is not None and step >= steps:
break
arch_targets = arch_targets.cuda(non_blocking=True)
# prediction
_, logits = network(arch_inputs.cuda(non_blocking=True))
arch_loss = criterion(logits, arch_targets)
arch_loss.backward()
for i, (g, mean_g) in enumerate(zip([p.grad.detach().clone() for p in network.parameters() if p.grad is not None], mean_grads)):
if step == 0:
second_central_moment.append(torch.pow(g-mean_g, 2))
else:
second_central_moment[i] = second_central_moment[i] + torch.pow(g-mean_g, 2)
network.zero_grad()
total_steps = len(xloader) if steps is None else steps
for g in second_central_moment:
g.multiply_(1/total_steps)
network.train()
network.zero_grad()
return mean_grads, second_central_moment
|
7bb57bd5db63270c162e6c3801bba39f2bfeba4b
| 15,841
|
import random
def generate_fake_ohlc_data(data_df):
"""
Generates fake ohlc data for each row of twitter sentiment dataframe. Used for testing
without creating database or making api calls to finnhub.
Args:
data_df: Dataframe containing time and twitter sentiment socres for each currency
Returns:
Identical twitter sentiment dataframe with additional columns for fake ohlc data
"""
data_df.loc[0, 'Close'] = random.random()*0.001 + 1
data_df.loc[0, 'Open'] = random.random()*0.001 + 1
data_df.loc[0, 'High'] = data_df.loc[0, 'Open'] + random.random()*0.001 if data_df.loc[0,
'Open'] > data_df.loc[0, 'Close'] else data_df.loc[0, 'Close'] + random.random()*0.001
data_df.loc[0, 'Low'] = data_df.loc[0, 'Open'] - random.random()*0.001 if data_df.loc[0,
'Open'] < data_df.loc[0, 'Close'] else data_df.loc[0, 'Close'] - random.random()*0.001
data_df.loc[0, 'Volume'] = random.randrange(10, 100, 1)
for i in range(1, len(data_df)):
data_df.loc[i, 'Open'] = data_df.loc[i - 1,
'Close'] + (random.random() - 0.5)*0.001
data_df.loc[i, 'Close'] = data_df.loc[i, 'Open'] + \
(random.random() - 0.5)*0.001
data_df.loc[i, 'Volume'] = random.randrange(10, 100, 1)
if data_df.loc[i, 'Open'] > data_df.loc[i, 'Close']:
data_df.loc[i, 'High'] = data_df.loc[i,
'Open'] + random.random()*0.001
data_df.loc[i, 'Low'] = data_df.loc[i,
'Close'] - random.random()*0.001
else:
data_df.loc[i, 'High'] = data_df.loc[i,
'Close'] + random.random()*0.001
data_df.loc[i, 'Low'] = data_df.loc[i,
'Open'] - random.random()*0.001
return data_df
|
5b6608765a7a8300a17c9cda3a4dab3a2c18327b
| 15,842
|
def home():
"""List all available api routes."""
return (
f"Welcome to Honolulu, Hawaii's Climate Home Page:<br/>"
f"Available Routes:<br/>"
f"<br/>"
f"List of Precipitation Data with Corresponding Dates:<br/>"
f"/api/v1.0/precipitation<br/>"
f"<br/>"
f"List of Stations:<br/>"
f"/api/v1.0/stations<br/>"
f"<br/>"
f"List of Temperature Observations with Corresponding Dates:<br/>"
f"/api/v1.0/tobs<br/>"
f"<br/>"
f"List of Minimum/Average/Maximum Temperatures for a Given Start Date ('yyyy-mm-dd'):<br/>"
f"/api/v1.0/<start><br/>"
f"<br/>"
f"List of Minimum/Average/Maximum Temperatures for a Given Start-End Range ('yyyy-mm-dd'/'yyyy-mm-dd'):<br/>"
f"/api/v1.0/<start>/<end>"
)
|
08cb0c5ae57d3dd672459aef018c7de9d1cbab56
| 15,843
|
def sign(x):
"""
Sign function
"""
return 1 if x >= 0 else -1
|
20d85cf36d183c96e75fa3b795bf7f05f558e3b8
| 15,844
|
def is_selection_within_a_line(view):
"""Returns true if the selection is within single line, but not zero-width."""
selection = view.sel()
if len(selection) == 0:
# Null selection
return False
# selection_coverage will be the region that covers all selections.
selection_coverage = selection[0]
all_cursors = True # Whether the selections consists entirely of cursors
for region in selection:
# Expand selection_coverage to include this region.
selection_coverage = selection_coverage.cover(region)
all_cursors = all_cursors and region.empty()
selection_within_one_line = (len(view.lines(selection_coverage)) == 1)
return selection_within_one_line and not all_cursors
|
441a964140db24040a3180975f9f7fd29a2299c2
| 15,845
|
import sys
def _echo_args(string):
"""cross-platform args for echoing a string"""
if sys.platform.startswith("win"):
return ["cmd", "/c", "echo", string]
else:
return ["echo", string]
|
23c7e29cdfb33034e8d988d2d3b09cda027149c5
| 15,846
|
def dict_hangman(num_of_tries):
"""
The function return the "photo" of the hangman.
:param num_of_tries: the user's number of guessing
:type num_of_tries: int
:return: the photo of the hangman
:rtype: string
"""
HANGMAN_PHOTHOS = {
'1': """ x-------x""",
'2':
"""
x-------x
|
|
|
|
|""",
'3': """
x-------x
| |
| 0
|
|
|""",
'4': """
x-------x
| |
| 0
| |
|
|""",
'5': """
x-------x
| |
| 0
| /|\\
|
|""",
'6': """
x-------x
| |
| 0
| /|\\
| /
|""",
'7':"""
x-------x
| |
| 0
| /|\\
| / \\
|"""
}
return HANGMAN_PHOTHOS[str(num_of_tries)]
|
3864d3072fa0fe9fea6c7e02733e466669335c80
| 15,847
|
def build_void(lines, start, end):
"""
Builds a void from the given parameters.
Parameters
----------
lines : `list` of `str`
The lines of the section.
start : `int`
The starting index of the void.
end : `int`
The void's last line's index +1.
Returns
-------
void : `None`
"""
return None
|
25cad6592033f34a67a65be9d1c1b1c507d33ddd
| 15,849
|
import numpy as np
def random_complex_matrix(matrix_size):
"""
Generate a random, square, complex matrix of size `matrix_size`.
"""
return (np.random.rand(matrix_size, matrix_size)
+ 1j * np.random.rand(matrix_size, matrix_size))
|
4f8f486ebe53b87115631783696715dff6fae92d
| 15,850
|
def split_warnings_errors(output: str):
"""
Function which splits the given string into warning messages and error using W or E in the beginning of string
For error messages that do not start with E , they will be returned as other.
The output of a certain pack can both include:
- Fail msgs
- Fail msgs and warnings msgs
- Passed msgs
- Passed msgs and warnings msgs
- warning msgs
Args:
output(str): string which contains messages from linters.
return:
list of error messags, list of warnings messages, list of all undetected messages
"""
output_lst = output.split('\n')
# Warnings and errors lists currently relevant for XSOAR Linter
warnings_list = []
error_list = []
# Others list is relevant for mypy and flake8.
other_msg_list = []
for msg in output_lst:
# 'W:' for python2 xsoar linter
# 'W[0-9]' for python3 xsoar linter
if (msg.startswith('W') and msg[1].isdigit()) or 'W:' in msg or 'W90' in msg:
warnings_list.append(msg)
elif (msg.startswith('E') and msg[1].isdigit()) or 'E:' in msg or 'E90' in msg:
error_list.append(msg)
else:
other_msg_list.append(msg)
return error_list, warnings_list, other_msg_list
|
aaf0ea05f5d32247f210ae1696ea58824629d075
| 15,851
|
def _get_type_and_value(entry):
"""Parse dmidecode entry and return key/value pair"""
r = {}
for l in entry.split('\n'):
s = l.split(':')
if len(s) != 2:
continue
r[s[0].strip()] = s[1].strip()
return r
|
e6dd2068f10085c2dac233f1f71512e5874c5adc
| 15,855
|
def _replace_strings(obj, old, new, inplace=False):
"""
Recursively replaces all strings in the given object.
This function is specifically meant to help with saving and loading
of config dictionaries and should not be considered a general tool.
"""
if not inplace:
obj = obj.copy()
if isinstance(obj, dict):
obj_keys = obj.keys()
elif isinstance(obj, list):
obj_keys = range(len(obj))
else:
raise TypeError('Object must be either a dict or a list.')
for key in obj_keys:
if isinstance(obj[key], str):
obj[key] = obj[key].replace(old, new)
elif isinstance(obj[key], dict) or isinstance(obj[key], list):
obj[key] = _replace_strings(obj[key], old, new)
return obj
|
3f7661a53ab8cbb836eee68b1bb3d1df9e73c4a5
| 15,857
|
def hello_world() -> str:
""" Say something! """
return "Hello World!"
|
22deba02b863355d150653caf744a65950a2fec5
| 15,858
|
def range_geometric_row(number, d, r=1.1):
"""Returns a list of numbers with a certain relation to each other.
The function divides one number into a list of d numbers [n0, n1, ...], such
that their sum is number and the relation between the numbers is defined
with n1 = n0 / r, n2 = n1 / r, n3 = n2 / r, ...
"""
if r <= 0:
raise ValueError("r must be > 0")
n0 = number / ((1 - (1 / r)**d) / (1 - 1 / r))
numbers = [n0]
for i in range(d - 1):
numbers.append(numbers[-1] / r)
return numbers
|
92e7f9f1b85011323cf5e90002d8f9151ae17e0e
| 15,859
|
import hashlib
def getIdHash(id):
""" Return md5 prefix based on id value"""
m = hashlib.new('md5')
m.update(id.encode('utf8'))
hexdigest = m.hexdigest()
return hexdigest[:5]
|
a9e8d67fae494cd2eaac41b6258be69ed10b667a
| 15,860
|
def reverse_and_collapse(in_str: str) -> str:
"""
Solution: Iterate backwards and copy chars to a new string if they don't equal the last char of that new string.
Time: O(n) -> Iterate every char
Space: O(n) -> Construct another string with max size = length of input string
"""
if not in_str:
raise ValueError('input string must not be empty')
out_str = ''
# Get indexes in reverse
for i in range(len(in_str)-1, -1, -1):
# If out_str last char doesn't match this one, add it to the end of out_str
if not out_str or out_str[-1] != in_str[i]:
out_str += in_str[i]
return out_str
|
4d8c9224420a3bbe73ef6347cccaae0e20441943
| 15,861
|
def exists(_env):
"""Always activate this tool."""
return True
|
62ba86e515af9da467e7e3425e692c589b8c7fd8
| 15,862
|
import os
def create_id2amendment_info(path, tag):
"""Searches for JSON files in this repo and returns
a map of amendment id ==> (`tag`, dir, amendment filepath)
where `tag` is typically the shard name
"""
d = {}
for triple in os.walk(path):
root, files = triple[0], triple[2]
for filename in files:
if filename.endswith('.json'):
# trim its file extension
amendment_id = n = filename[:-5]
d[amendment_id] = (tag, root, os.path.join(root, filename))
return d
|
376e001ef0ddce1ad418514f3a49a1abf7a907d9
| 15,865
|
def norm_layer_name(name):
"""Some heuristics to normalize a layer name from multiple sources.
For example, some depictions of VGG-16 use use upper case; others
use lower case. Some use hyphens; others use underscores. These
heuristics are by no means complete, but they increase the
likelihood that layer names from multiple sources will align.
"""
return name.upper().replace('_', '-')
|
ec1d387010282178616c2e7a63b765ceea6078bc
| 15,866
|
def square_dist(x, y, dim=1):
"""
"""
return (x-y).pow(2).sum(dim=dim)
|
f42f1964b7da0ef4cafe5e7db475bf8f429377bc
| 15,868
|
def S_inverse_values(_data_list, _infinity_value='inf'):
"""
Returns data samples where values are inversely proporsional.
"""
i_data = []
ds = len(_data_list)
for i in range(ds):
if _data_list[i] == 0:
i_data.append(_infinity_value)
if _data_list[i] == _infinity_value:
i_data.append(0.0)
else:
i_data.append(1/_data_list[i])
return i_data
|
97a140b1cb3649c7036758eaaeb7c21bcc8da06d
| 15,869
|
def findPayload(message, type):
"""
Find a payload/part that matches a type as closely as possible and decode it
properly.
Parameters
----------
message : email.message.Message
The message to search.
type : str
A MIME type.
Returns
-------
str
The payload as a string.
"""
charset = message.get_charset() or "utf-8"
if message.is_multipart():
for k in message.walk():
contenttype = k.get_content_type()
if contenttype == type:
return k.get_payload(decode=True).decode(charset), contenttype
for k in message.walk():
contenttype = k.get_content_type()
if k.get_content_type() == message.get_default_type():
return k.get_payload(decode=True).decode(charset), contenttype
return message.get_payload(decode=True).decode(charset), message.get_content_type()
|
265159546f1b0d2a6cc065e2c467528ea74048ef
| 15,870
|
import csv
def calculate_number_of_synthetic_data_to_mix(original_data_file,
target_ratio):
"""Calculate the number of negative samples that need to be added to
achieve the target ratio of negative samples.
Args:
original_data_file: path to the original data file
target_ratio: the target ratio of negative samples
Returns:
The number of negative samples needed to achieve the target ratio.
"""
total_number_of_samples = 0
original_negative_sample_count = 0
with open(original_data_file) as tsv_file:
read_tsv = csv.reader(tsv_file, delimiter="\t")
for line in read_tsv:
if int(line[-1]) == 0:
original_negative_sample_count += 1
total_number_of_samples += 1
return int(
(original_negative_sample_count - total_number_of_samples * target_ratio)
/ (target_ratio - 1))
|
eb83cdbb9af39715f16e412f8ae799222a2a232f
| 15,871
|
def GenerateId_base10(hash_sha512, input_file):
"""
Implement asset id of input file for base10 format
@param hash_sha512: string to encode
@param input_file: input file to encode
"""
string_id = int(hash_sha512.encode("hex_codec"), 16)
return string_id
|
82512598623ca4cefe1abf535b556d175fe1549e
| 15,874
|
import json
def global_weight(criterion, report):
"""
Formula:
Global Weight = Criterion W value / Criterion Count
For example:
W Value = 1
Criterion Count = 5
Global Weight = 1 / 5 = 0.2
"""
criterion_count = criterion['parent']['count']
data = report.criterion_compare[str(criterion['parent']['id'])]
criterion_index = 0
columns = filter(lambda x: x != 'criterion_0',
data['main_table'][0])
# get column index from matris
for index, column in enumerate(columns):
if 'criterion_%s' % criterion['id'] == column:
criterion_index = index
break
w_value = data['w'][criterion_index]
return json.dumps(round(w_value / criterion_count, 4))
|
02ce4873fa93556e5d7f86037819c0962b0ad207
| 15,875
|
def pickChromosomes(selection='A'):
"""
Selects the chromosomes you want to overlap.
:param selection: A(autosomes), AS (Auto+Sex), or ASM (Auto+Sex+M)
:return: chrList
"""
chrList = [
'1', '2', '3', '4', '5',
'6', '7', '8', '9', '10',
'11', '12', '13', '14', '15',
'16', '17', '18', '19', '20',
'21', '22'
]
if selection == "AS":
chrList.append("x")
chrList.append("y")
if selection == "ASM":
chrList.append("x")
chrList.append("y")
chrList.append("m")
return chrList
|
40b1922a6995ad0d5ead8c282b945c152ecbf0be
| 15,878
|
def get_contact_info_keys(status_update):
"""Returns the contact info method keys (email, sms)
used to send a notification for a status update if the notification
exists. Returns [] if there is no notification
"""
if hasattr(status_update, 'notification'):
return list(status_update.notification.contact_info.keys())
else:
return []
|
020a9742df99cd65be1433165823c4f364009d85
| 15,879
|
async def _wtforms_base_form_validate(self, extra_validators=None):
"""
Validates the form by calling `validate` on each field.
:param extra_validators:
If provided, is a dict mapping field names to a sequence of
callables which will be passed as extra validators to the field's
`validate` method.
Returns `True` if no errors occur.
"""
self._errors = None
success = True
for name, field in self._fields.items():
if extra_validators is not None and name in extra_validators:
extra = extra_validators[name]
else:
extra = tuple()
# Added an await here
if not await field.validate(self, extra):
success = False
return success
|
378366444278c4409bc5164b143ee8971af53bf1
| 15,880
|
def binary_search(input_list, number, min_idx, max_idx):
"""
Find the index for a given value (number) by searching in a sorted array
Time complexity: O(log2(n))
Space Complexity: O(1)
Args:
- input_list(array): sorted array of numbers to be searched in
- number(int): number to be searched for
Returns:
- position(int): reuturns array index for the given number
returns -1 when the number was not found
"""
# corner case for case when provided min_idx is higher than provided max_idx
if max_idx < min_idx:
return -1
# binary search
while min_idx <= max_idx:
mid = (min_idx + max_idx) // 2
# Check if x is present at mid
if input_list[mid] == number:
return mid
# If the guess was too low, set min to be one larger than the guess
if input_list[mid] < number:
min_idx = mid + 1
# If the guess was too high, set max to be one smaller than the guess
else:
max_idx = mid - 1
# if we got here, the number was not found
return -1
|
20358c096c529937d57503285150a90a37f62dd1
| 15,882
|
def get_goal_status(goal):
"""
1 = Success
2 = Rejected
3 = Locked
"""
print(goal.is_closed)
if goal.is_closed:
if goal.was_accomplished:
return 1
else:
return 2
return 3
|
64e742899a3a6484855f0543a7184db24c0c396f
| 15,883
|
import torch
def _calculate_ece(logits, labels, n_bins=10):
"""
Calculates the Expected Calibration Error of a model.
(This isn't necessary for temperature scaling, just a cool metric).
The input to this loss is the logits of a model, NOT the softmax scores.
This divides the confidence outputs into equally-sized interval bins.
In each bin, we compute the confidence gap:
bin_gap = | avg_confidence_in_bin - accuracy_in_bin |
We then return a weighted average of the gaps, based on the number
of samples in each bin
See: Naeini, Mahdi Pakdaman, Gregory F. Cooper, and Milos Hauskrecht.
"Obtaining Well Calibrated Probabilities Using Bayesian Binning." AAAI.
2015.
"""
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
bin_lowers = bin_boundaries[:-1]
bin_uppers = bin_boundaries[1:]
softmaxes = logits
confidences, predictions = torch.max(softmaxes, 1)
accuracies = predictions.eq(labels)
ece = torch.zeros(1, device=logits.device)
for bin_lower, bin_upper in zip(bin_lowers, bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece.item()
|
af311a47b7558b07838a38d736386147804ea109
| 15,886
|
def floor_div_mul(value, factor):
"""Fuction to get floor number."""
return (((value) // (factor))) * (factor)
|
05c9b01c297f4ec68d3c5755db550482c520dac8
| 15,887
|
def history_parser(arg):
"""
@param: arg is a string that contains the words seperated by spaces
@return: Returns two strings. The first word removed from arg and everything after the space
"""
v = -1
try:
v = arg.index(' ')
except ValueError:
return None, None
first_word = arg[0:v]
remain = arg[v + 1: len(arg)]
return first_word, remain
|
267f0cd8ddfc0bfa9106d18341421d5d4d48ed1f
| 15,888
|
def _to_scanner(varr):
"""Transform data to scanner coordinate space by properly swapping axes
Standard vnmrj orientation - meaning rotations are 0,0,0 - is axial, with
x,y,z axes (as global gradient axes) corresponding to phase, readout, slice.
vnmrjpy defaults to handling numpy arrays of:
(receivers, phase, readout, slice/phase2, time*echo)
but arrays of
(receivers, x,y,z, time*echo)
is also desirable in some cases (for example registration in FSL flirt)
Euler angles of rotations are psi, phi, theta,
Also corrects reversed X gradient and sliceorder
Args:
data (3,4, or 5D np ndarray) -- input data to transform
procpar (path/to/file) -- Varian procpar file of data
Return:
swapped_data (np.ndarray)
"""
return varr
|
f8a8506e49722e7f5f66199ca15f0a441fb5e180
| 15,889
|
def select_clause(table_name, *fields, distinct=False):
""" Create a select from clause string for SQL.
Args:
table_name: The table to select from as string.
*fields: The fields to select.
Returns:
A string with the crafted clause.
"""
select = 'SELECT'
if distinct:
select += ' DISTINCT'
fields_str = ', '.join(fields) if fields else '*'
return '{} {} FROM {}'.format(select, fields_str, table_name)
|
3e373d6b493f60d94afe5d0372e2b1e752f98284
| 15,890
|
import pandas as pd
import math
def removeMissing(filename):
"""Takes a file that contains missing scans and removes those rows, while providing the subject name and reason for removal."""
loaded_file = pd.read_csv(filename)
cleaned_list = []
missing_counter = 0
for row in loaded_file.index:
if math.isnan(loaded_file.iloc[row, 3]):
print("Dropping subject scan " + loaded_file.iloc[row, 0] + " because of " + loaded_file.iloc[row,1])
missing_counter = missing_counter + 1
else:
cleaned_list.append(loaded_file.iloc[row])
print("There were " + str(missing_counter) + " scans with missing data dropped.")
cleaned_df = pd.DataFrame(cleaned_list)
return cleaned_df
|
497af6cc2b15da59d0bbad5785368ad8e1cbb5d9
| 15,891
|
import json
def decode(body):
"""decode string to object"""
if not body:
return None
return json.loads(body)
|
2663a3d742b6f5e17d5b0aed876f136b30fdde1c
| 15,893
|
def _remove(s, e):
"""Removes an element from the set.
Element must be hashable. This mutates the orginal set.
Args:
s: A set, as returned by `sets.make()`.
e: The element to be removed.
Returns:
The set `s` with `e` removed.
"""
s._values.pop(e)
return s
|
ab7169a69f95025323fd20e3e11e09c04da3d427
| 15,894
|
def bintodec(x):
"""Convert Binary to Decimal. Input is a string and output is a positive integer."""
num = 0
n = len(x)
for i in range(n):
num = num + 2 ** i * int(x[n - i - 1])
return num
|
e83e3c34c237d5840bd024f49f3d436e6804b427
| 15,895
|
import collections
def _create_gt_string(cnv_row):
"""
Creates VCF gt string for a single-sample VCF.
"""
gt_dict = collections.OrderedDict()
gt_dict["GT"] = cnv_row.GT
gt_dict["S"] = cnv_row.S
gt_dict["NS"] = cnv_row.NS
gt_dict["LS"] = cnv_row.LS
gt_dict["LNS"] = cnv_row.LNS
gt_dict["RS"] = cnv_row.RS
gt_dict["RNS"] = cnv_row.RNS
gt_dict["GQ"] = cnv_row.GQ
gt_dict["AB"] = cnv_row.AB
gt_dict["SQ"] = cnv_row.SQ
out_string = ""
length_of_info = len(gt_dict.values())
for i, item in enumerate(gt_dict.items()):
value = str(item[1])
if (i + 1) != length_of_info:
out_string += value +":"
else:
out_string += value
return out_string
|
fd866f2ce22cb8b34608dcd6161be5d11f374c82
| 15,896
|
def generate_name_Id_map(name, map):
"""
Given a name and map, return corresponding Id. If name not in map, generate a new Id.
:param name: session or item name in dataset
:param map: existing map, a dictionary: map[name]=Id
:return: Id: allocated new Id of the corresponding name
"""
if name in map:
Id = map[name]
else:
Id = len(map.keys()) + 1
map[name] = Id
return Id
|
8e86daf1a345803b280ad91f40a18eaeaa0cde5f
| 15,897
|
def _get_datasource_type(): # filepath):
"""
Determine if the input is from a local system or is an s3 bucket
Not needed now, but will need to use for cloud data access
"""
source_types = ["is2_local", "is2_s3"]
return source_types[0]
|
3b0145ed73f68607f46714e7df60b10668dd4f01
| 15,898
|
import argparse
import os
def parse_args():
"""Parse input arguments
"""
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
'--lcdc_results', type=str,
help='File containing results')
parser.add_argument(
'--tcn_results', type=str,
help='File containing results')
parser.add_argument(
'--split', type=str,
help='Test split filename')
args = parser.parse_args()
assert os.path.isfile(args.lcdc_results)
assert os.path.isfile(args.tcn_results)
assert os.path.isfile(args.split)
return args
|
9897aa524a7acae61b9b22d62cc9156a09a10d7a
| 15,899
|
def IdentityLayer(inp, inp_dim, outp_dim, vs, name="identity_layer", use_bias=True, initializer=None):
"""An identity function that takes the same parameters as the above layers."""
assert inp_dim == outp_dim, "Identity layer requires inp_dim == outp_dim."
return inp
|
eb09dd15a2db7c2901c4889eba5b6a9e577ddc1f
| 15,900
|
def format_text_to_html(text: str):
""" realiza el formato del texto pasado a un formato html"""
html_txt = text.replace("<", '<')
html_txt = html_txt.replace(">", '>').replace("\n", '<br>')
html_txt = html_txt.replace("\t", '	')
html_txt = html_txt.replace(" ", ' ')
return html_txt
|
36b21f11ffdcc6364c745e76edb339fef3f0f38c
| 15,901
|
import torch
def batch_uv_to_global_with_multi_depth(uv1, inv_affine_t, inv_cam_intrimat, inv_cam_extri_R, inv_cam_extri_T,
depths, nbones):
"""
:param uv1:
:param inv_affine_t: hm -> uv
:param inv_cam_intrimat: uv -> norm image frame
:param inv_cam_extri_R: transpose of cam_extri_R
:param inv_cam_extri_T: same as cam_extri_T
:param depths:
:param nbones:
:return:
"""
dev = uv1.device
nview_batch = inv_affine_t.shape[0]
h = int(torch.max(uv1[1]).item()) + 1
w = int(torch.max(uv1[0]).item()) + 1
depths = torch.as_tensor(depths, device=dev).view(-1,1,1)
ndepth = depths.shape[0]
# uv1 copy
coords_hm_frame = uv1.view(1, 3, h*w, 1).expand(nview_batch, -1, -1, nbones*2).contiguous().view(nview_batch, 3, -1)
# uv to image frame
inv_cam_intrimat = inv_cam_intrimat.view(nview_batch, 3, 3)
inv_affine_t = inv_affine_t.view(nview_batch, 3, 3)
synth_trans = torch.bmm(inv_cam_intrimat, inv_affine_t)
coords_img_frame = torch.bmm(synth_trans, coords_hm_frame)
# image frame to 100 depth cam frame
coords_img_frame = coords_img_frame.permute(1,0,2).contiguous().view(1, 3,-1) # (1, 3, nview*batch * h*w * 2nbones)
coords_img_frame_all_depth = coords_img_frame.expand(ndepth, -1, -1)
coords_img_frame_all_depth = torch.mul(coords_img_frame_all_depth, depths) # (ndepth, 3, nview*batch *h*w *2nbones)
# cam frame to global frame
coords_img_frame_all_depth = coords_img_frame_all_depth.view(ndepth, 3, nview_batch, -1).permute(2, 1, 0, 3)\
.contiguous().view(nview_batch, 3, -1)
inv_cam_extri_R = inv_cam_extri_R.view(-1, 3, 3)
inv_cam_extri_T = inv_cam_extri_T.view(-1, 3, 1)
coords_global = torch.bmm(inv_cam_extri_R, coords_img_frame_all_depth) + inv_cam_extri_T
coords_global = coords_global.view(nview_batch, 3, ndepth, h*w, 2*nbones)
return coords_global
|
cba0bd631abec00bbee5be99c19f66e600dae572
| 15,902
|
def predict_classification(tree, labels, test):
"""Predict the classification for a test."""
if isinstance(tree, dict):
node, node_choices = tree.popitem()
feature = node_choices[test[labels.index(node)]]
result = predict_classification(feature, labels, test)
else:
result = tree
return result
|
2aa7d7349b5f4d510acd337699c10eb038cc6809
| 15,903
|
def find_sentence(sentence_list, taggedspan, filename):
"""
Find the sentence index that corresponds with the span.
Keyword arguments:
sentence_list: list -- list of all sentences in the note (list)
taggedspan: the span of the annotation (list)
filename: the name of the file (string)
"""
sent_nrs = list()
#for each span in the list of taggedspan
for span in taggedspan:
#if the first number of the span is larger than the length of the first sentence in the sentencelist, pop the sentence and
#subtract the length of it from the span. This code uses this concept to iteratively find the sentence that matches the span.
sentence = sentence_list.copy()
amount_char = 0
sent_nr = -1
while amount_char <= int(span[0]):
amount_char += len(sentence[0])
sent_nr += 1
sentence.pop(0)
#if there are only 20 characters in a new sentence of a multi-sentence annotation annotated, it is a randomly annotated word, and hence ignored
#This code could have been better using a recursive function, but I chose against it since it won't be needed when annotations are revised (that'll eliminate multi-sentence annotations)
temp_length = int(span[1]) - int(span[0]) - len(sentence_list[sent_nr])
if temp_length > 20:
if temp_length - len(sentence_list[sent_nr+1]) > 20:
if temp_length - len(sentence_list[sent_nr+1]) - len(sentence_list[sent_nr+2]) > 20:
if temp_length - len(sentence_list[sent_nr+1]) - len(sentence_list[sent_nr+2]) -len(sentence_list[sent_nr+3]) > 20:
if temp_length - len(sentence_list[sent_nr+1]) - len(sentence_list[sent_nr+2]) -len(sentence_list[sent_nr+3]) - len(sentence_list[sent_nr + 4]) > 20:
if temp_length - len(sentence_list[sent_nr+1]) - len(sentence_list[sent_nr+2]) -len(sentence_list[sent_nr+3]) - len(sentence_list[sent_nr + 4]) - len(sentence_list[sent_nr + 5])> 20:
sent_nrs.append([sent_nr, sent_nr +1, sent_nr +2, sent_nr + 3, sent_nr + 4, sent_nr + 5, sent_nr + 6])
else:
sent_nrs.append([sent_nr, sent_nr +1, sent_nr +2, sent_nr + 3, sent_nr + 4, sent_nr + 5])
else:
sent_nrs.append([sent_nr, sent_nr +1, sent_nr +2, sent_nr + 3, sent_nr + 4])
else:
sent_nrs.append([sent_nr, sent_nr +1, sent_nr +2, sent_nr + 3])
else:
sent_nrs.append([sent_nr, sent_nr +1, sent_nr +2])
else:
sent_nrs.append([sent_nr, sent_nr +1])
else:
sent_nrs.append(sent_nr)
return sent_nrs
|
d3705523347f734ad9b1ffdfc6cc057123bdfc11
| 15,904
|
import re
def eqauls_or_matches(s, str_or_regex):
"""Returns true or regex match.
"""
_type = type(re.compile(''))
if isinstance(str_or_regex, _type):
return re.match(str_or_regex, s)
elif s == str_or_regex:
return True
|
8a508af9f3319fd95f3c5e15353013c8eeb91346
| 15,905
|
import sys
import os
def silent_execute( string, return_stderr=True):
""" Execute the given shell adding '> /dev/null' if under a posix OS
and '> nul' under windows.
"""
if sys.platform.startswith('win') or return_stderr:
return os.system(string + " > " + os.devnull)
else:
return os.system('%s >%s 2>%s' % (string, os.devnull,
os.devnull))
|
f8331734f60a32e9b7a9529e64baa8907013d64f
| 15,906
|
def _flatten_task(obj):
"""Flatten the structure of the task into a single dict
"""
data = {
'id': obj.id,
'checkpoint_id': obj.checkpoint_id,
'policy_id': obj.policy_id,
'provider_id': obj.provider_id,
'vault_id': obj.vault_id,
'vault_name': obj.vault_name,
'operation_type': obj.operation_type,
'error_mesage': obj.error_info.message,
'error_code': obj.error_info.code,
'created_at': obj.created_at,
'ended_at': obj.ended_at,
'started_at': obj.started_at,
'updated_at': obj.updated_at,
}
return data
|
f10b0db4cefad81818f3195da0cf25339c420823
| 15,907
|
async def claptext(memereview):
"""Praise people!"""
textx = await memereview.get_reply_message()
message = memereview.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await memereview.edit(
"`Lord, Mohon Balas Ke Pesan Orang Yang Ingin Anda Puji ツ`"
)
reply_text = "👏 "
reply_text += message.replace(" ", " 👏 ")
reply_text += " 👏"
await memereview.edit(reply_text)
|
ea6a9fe891c88108997082f27cae7da1be43cb84
| 15,908
|
import itertools
def prev_this_next(it):
"""
iterator to gradually return three consecutive elements of
another iterable. If at the beginning or the end of the iterable,
None is returned for corresponding elements.
"""
a, b, c = itertools.tee(it, 3)
next(c)
return zip(itertools.chain([None], a), b, itertools.chain(c, [None]))
|
dc4416ed0b1c06502f3005df418536b9a92b4481
| 15,911
|
def parse_unity_results(output):
"""Read output from Unity and parse the results into 5-tuples:
(file, lineno, name, result, message)"""
result = []
lines = output.split('\n')
for line in lines:
if line == '':
break
parts = line.split(':', maxsplit=4)
if len(parts) == 4:
parts.append(None)
else:
parts[4] = parts[4][1:]
result.append(tuple(parts))
return result
|
488f98d69434b2abdb9200353e51c12805694297
| 15,913
|
async def looped_fetch(fetch, query):
"""using the async fetch method of a deta Base instance return the first
match of the specified query, or None if no match is found.
:param fetch: async fetch method
:param query: query object for which to fetch results
:return: value matching the query or None
"""
last = None
while True:
res = await fetch(query=query, last=last)
if res.count > 0:
return res.items[0]
if not res.last:
return None
last = res.last
|
c8cdf281521d6832d0b9f068d74d47a78a93af5e
| 15,914
|
def without_duplicates(duplicates, similar_songs):
"""Creates a list that only includes those strings which occur once in the
similar_songs list.
Args:
duplicates: Usually the list that is the return list of get_duplicates.
similar_songs: Usually the list that is the return list of
find_similar_songs.
Returns:
A list of all items that are found in the similar_songs list but not in
the duplicates list.
"""
result = []
for song in similar_songs:
if song not in duplicates:
result.append(song)
return result
|
e5c413a9395b731d8f533d4ba5eeb8c9824592a8
| 15,915
|
def airports_codes_from_city(name, airports_list, airport_type):
"""
Here we finding all airports(their codes) in city or state.
:param name: name of airport we gonna check
:param airports_list: list of all airports
:param airport_type: type of :param name: - 'code', 'city', 'state'
:return: list of airports codes
"""
temp = []
for airport in airports_list:
if name.lower() == airport[airport_type].lower():
temp.append(airport['code'])
return temp
|
995bab1eca1633c5e52cfdfe5b1a92b7738fbbb5
| 15,916
|
from tqdm import tqdm
def _get_iterator(to_iter, progress):
"""
Create an iterator.
Args:
to_iter (:py:attr:`array_like`): The list or array to iterate.
progress (:py:attr:`bool`): Show progress bar.
Returns:
:py:attr:`range` or :py:class:`tqdm.std.tqdm`: Iterator object.
"""
iterator = range(len(to_iter))
if progress:
try:
iterator = tqdm(range(len(to_iter)))
except ModuleNotFoundError:
print(
"For the progress bar, you need to have the tqdm package "
"installed. No progress bar will be shown"
)
return iterator
|
c1dd29a430d2c468e3f89536fef593b7477a04ce
| 15,917
|
def safer_getattr(object, name, default=None, getattr=getattr):
"""Getattr implementation which prevents using format on string objects.
format() is considered harmful:
http://lucumr.pocoo.org/2016/12/29/careful-with-str-format/
"""
if isinstance(object, str) and name == 'format':
raise NotImplementedError(
'Using format() on a %s is not safe.' % object.__class__.__name__)
if name.startswith('_'):
raise AttributeError(
'"{name}" is an invalid attribute name because it '
'starts with "_"'.format(name=name)
)
return getattr(object, name, default)
|
3a0c4a2a38c5f9f46ee02ab8ddb868eb67602f09
| 15,918
|
def check_replication(service, service_replication,
warn_range, crit_range):
"""Check for sufficient replication of a service
:param service: A string representing the name of the service
this replication check is relevant to.
:param service_replication: An int representing the number of available
service instances
:param warn_range: A two tuple of integers representing the minimum and
maximum allowed replication before entering the WARNING
state.
:param crit_range: A two tuple of integers representing the minimum and
maximum allowed replication before entering the CRITICAL
state.
Note that all ranges are closed interval. If the replication is outside the
closed interval for the relevant level (e.g. warning, critical), then
the error code will change appropriately.
:returns check_result: A tuple of error code and a human readable error
message. The error codes conform to the nagios plugin api.
e.g. for an OK service
(0,
"OK lucy has 1 instance(s)")
e.g. for a CRITICAL service
(2,
"CRITICAL lucy has 0 instance(s), expected value in [1, 1e18])
"""
code, status, interval = 0, 'OK', None
if not (crit_range[0] <= service_replication <= crit_range[1]):
code, status, interval = 2, 'CRITICAL', crit_range
elif not (warn_range[0] <= service_replication <= warn_range[1]):
code, status, interval = 1, 'WARNING', warn_range
expected_message = ""
if interval is not None:
expected_message = ", expected value in {0}".format(interval)
message = "{0} {1} has {2} instance(s){3}".format(
status, service, service_replication, expected_message
)
return code, message
|
ac515bd0881431eebd0e14d0578f0bb1c07835f3
| 15,919
|
import json
def load_metadata(filename):
"""Read json from file and return json object."""
with open(filename, encoding="utf-8") as fd:
return json.load(fd)
|
9a9fbccaf4a7e64d2aef4b427a68226e2b41c181
| 15,920
|
def compare_question_answer(response, question_id, value):
"""
Return whether a question has the required ``value``.
Return a sympy boolean.
"""
answer = response[question_id]
return str(answer) == str(value)
|
c32f304807c48da873fec6c3fef9a0f2a48ebf95
| 15,921
|
def get_section_number(data):
"""Gets the section number from the given section data
Parses the given array of section data bytes and returns the section number. SI tables come in sections.
Each section is numbered and this function will return the number of the given section.
"""
return data[6]
|
5d7b9c51d614f627e3765b683216905ad124598b
| 15,922
|
import os
def match_superpmi_tool_files(full_path):
""" Match all the SuperPMI tool files that we want to copy and use.
Note that we currently only match Windows files.
"""
file_name = os.path.basename(full_path)
if file_name == "superpmi.exe" or file_name == "mcs.exe":
return True
return False
|
29191936285ee7de5ed982896b4b207fdf5dc3ba
| 15,923
|
def denumpyfy(tuple_list_dict_number):
"""A nested structure of tuples, lists, dicts and the lowest level numpy
values gets converted to an object with the same structure but all being
corresponding native python numbers.
Parameters
----------
tuple_list_dict_number : tuple, list, dict, number
The object that should be converted.
Returns
-------
tuple, list, dict, native number (float, int)
The object with the same structure but only native python numbers.
"""
if isinstance(tuple_list_dict_number, tuple):
return tuple([denumpyfy(elem) for elem in tuple_list_dict_number])
if isinstance(tuple_list_dict_number, list):
return [denumpyfy(elem) for elem in tuple_list_dict_number]
if isinstance(tuple_list_dict_number, dict):
return {denumpyfy(k): denumpyfy(tuple_list_dict_number[k])
for k in tuple_list_dict_number}
if isinstance(tuple_list_dict_number, float):
return float(tuple_list_dict_number)
if isinstance(tuple_list_dict_number, int):
return int(tuple_list_dict_number)
return tuple_list_dict_number
|
70558250e3875cde2c66fe6680fd6a6ace498602
| 15,924
|
def _get_nn_for_timestamp(kd_tree, X, timestep, aso_idx, k, radius):
"""Returns the nearest ASOs to the provided `aso_idx` ASO. If a `radius` is
provided then the results are all the ASOs within that given radius, otherwise
the results are the `k` nearest ASOs
:param kd_tree: The KD-tree build for the prediction timestep
:type kd_tree: sklearn.neighbors.KDTree
:param X: The numpy array of orbital predictions for each ASO for the
prediction timestep
:type X: numpy.array
:param timestep: The orbital prediction timestep that the `X` array
represents
:type timestep: int
:param aso_idx: The index in `X` of the ASO to find nearest ASOs for
:type aso_idx: int
:param k: The number of nearest ASOs to return. Not used if `radius` is passed
:type k: int
:param radius: The radius, in meters, to use in determining what is a near ASO
:type radius: float
:return: A list of tuples representing all ASOs that match the provided query
where the first value is the index in `X` of the matching ASO, the
second value is the timestep where this match occurred, and the third
value is the distance from the query ASO to the matching ASO.
:rtype: [(int, int, float)]
"""
query_point = X[aso_idx].reshape(1, -1)
if radius:
result_idxs, dists = kd_tree.query_radius(query_point,
r=radius,
return_distance=True)
else:
dists, result_idxs = kd_tree.query(query_point, k=k+1)
idx_dists = zip(result_idxs[0], dists[0])
if radius:
# Only return results that have non-zero distance
result = [(int(i), int(timestep), float(d))
for i, d in idx_dists
if d > 0]
else:
# Remove query object from results
result = [(int(i), int(timestep), float(d))
for i, d in idx_dists
if i != aso_idx]
return result
|
d2dc4f7912aafc903782c21ec374bdd4d24475bc
| 15,925
|
def get_uid():
"""Inquiry UID hack for RBAC
"""
return 'inquiry'
|
6cf38c5a3d818d393b132aef25880074d30279b6
| 15,926
|
import os
def _check_if_folder_exists(folder: str) -> None:
"""Auxiliary function. Check if folder exists and create it if necessary."""
if not os.path.isdir(folder):
os.mkdir(folder)
return None
|
6ebd0782129170c9e4267a3e618754d689d5a063
| 15,927
|
def get_value_or_404(json: dict, key: str):
""" If a key is present in a dict, it returns the value of key else None."""
try:
return json[key]
except BaseException:
# print(f'{key} not found')
return None
|
781f8e9de99863e9e172c4c1ca24d5203d28f127
| 15,928
|
def _poynting(field):
"""Computes poynting vector from the field vector"""
tmp1 = (field[0].real * field[1].real + field[0].imag * field[1].imag)
tmp2 = (field[2].real * field[3].real + field[2].imag * field[3].imag)
return tmp1-tmp2
|
55c334b5c2e5df87d13ad0c000e5e599d6e8b948
| 15,929
|
import socket
def tcp_port_reachable(addr, port, timeout=5):
"""
Return 'True' if we could establish a TCP connection with the given
addr:port tuple and 'False' otherwise.
Use the optional third argument to determine the timeout for the connect()
call.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
try:
s.connect((addr, port))
s.shutdown(socket.SHUT_RDWR)
return True
except:
return False
|
22c530cdbccf6c19ffe60b5e1904e797f7d059ea
| 15,930
|
def scale_wind_speed(spd, scale_factor: float):
"""
Scales wind speed by the scale_factor
:param spd: Series or data frame or a single value of wind speed to scale
:param scale_factor: Scaling factor in decimal, if scaling factor is 0.8 output would be (1+0.8) times wind speed,
if it is -0.8 the output would be (1-0.8) times the wind speed
:return: Series or data frame with scaled wind speeds
"""
return spd * scale_factor
|
a55b76aee3e3ab2718db2714b6be0915c24e1631
| 15,931
|
import shutil
def verbose_copy(src, dst):
"""复制文件(属性和内容均一致)"""
print("copying {!r} to {!r}".format(src, dst))
return shutil.copy2(src, dst)
|
0ba12f296ba45c22c31c8a23d4cc5355424dd719
| 15,932
|
def transform_dict(img):
"""
Take a raster data source and return a dictionary with geotranform values
and keys that make sense.
Parameters
----------
img : gdal.datasource
The image datasource from which the GeoTransform will be retrieved.
Returns
-------
dict
A dict with the geotransform values labeled.
"""
geotrans = img.GetGeoTransform()
ret_dict = {
'originX': geotrans[0],
'pixWidth': geotrans[1],
'rotation1': geotrans[2],
'originY': geotrans[3],
'rotation2': geotrans[4],
'pixHeight': geotrans[5],
}
return ret_dict
|
8817028adfce28ae7f7ae787d4256d52fee095bc
| 15,933
|
def delta(evt):
"""Modified delta to work with all platforms."""
if evt.num == 5 or evt.delta < 0:
return -1
return 1
|
fdb12d51c6bc25ef5b9848559dcdd26a52e5fe71
| 15,934
|
def path_parts(path):
"""Returns a list of all the prefixes for this document's [snoop.data.digests.full_path][].
This is set on the Digest as field `path-parts` to create path buckets in Elasticsearch.
"""
elements = path.split('/')[1:]
result = []
prev = None
for e in elements:
if prev:
prev = prev + '/' + e
else:
prev = '/' + e
result.append(prev)
return result
|
13897228c62ebb258f7300ba2c95c02cd848fa2f
| 15,935
|
def read_list(filename):
"""
Read file line by line, ignoring lines starting with '#'
Parameters
----------
filename : str or pathlib.Path
Returns
-------
list
"""
with open(filename, 'r') as f:
lines = f.readlines()
return [line.rstrip('\n') for line in lines if not line.startswith('#')]
|
e4957425f1c2ff99e9e16ad8fe22e57ffa6e38a9
| 15,936
|
def norm_factory(v_min, v_max):
"""Creates a normalisation function for scores"""
v_range = v_max - v_min
def norm(val):
return (val - v_min) / v_range
return norm
|
4bd88c92785043c739c105715a7382c8d53efaba
| 15,938
|
import pickle
def read_pkl_file(fpath):
""" """
with open(fpath, "rb") as f:
return pickle.load(f)
|
cbf2d9e2f6cad1de5e26308f142baa15d0e3a4c3
| 15,939
|
def tokenize(text, re_, ignore):
"""tokenize an email template"""
tokens = []
pos = 0
match = re_.match(text)
while pos < len(text):
typ = match.lastgroup
if typ in ignore:
pos = max(match.end(), pos + 1)
elif typ == 'BODY':
tok = text[pos:]
tokens.append((typ, tok))
break
else:
tok = match.group(typ).strip()
tokens.append((typ, tok))
pos = match.end()
match = re_.match(text, pos)
return tokens
|
728b266c507268a7bbc6c086f0c4d7aa50bbdf8e
| 15,940
|
import pathlib
import os
def _get_auth_directory() -> pathlib.Path:
"""Find the first valid and specified AWS credentials directory."""
if creds_path := os.environ.get("AWS_SHARED_CREDENTIALS_FILE"):
return pathlib.Path(creds_path).expanduser().parent.absolute()
if config_path := os.environ.get("AWS_CONFIG_FILE"):
return pathlib.Path(config_path).expanduser().parent.absolute()
return pathlib.Path("~/.aws").expanduser().absolute()
|
8918b1e116675caa90ed82ba20d690a88492108b
| 15,941
|
def get_trunc_minute_time(obstime):
"""Truncate obstime to nearest minute"""
return((int(obstime)/60) * 60)
|
1a1a6ba47573442f0e98ca9aeaa8a5506e7ab081
| 15,942
|
import subprocess
def version():
"""Generate version using oorb script."""
cmd = ['./build-tools/compute-version.sh']
return subprocess.check_output(cmd, cwd='oorb').decode().strip()
|
ae557209fddbb9bddd5f49cd04c86dc670fbe65c
| 15,943
|
def process_proc_output(proc, print_output=True):
"""Print output of process line by line. Returns the whole output."""
def _print(s):
if print_output:
print(s)
lines = []
for line in iter(proc.stdout.readline, b''):
_print('| %s' % line.rstrip())
lines.append(line)
return ''.join(lines)
|
5af5d3355a7d588806120da625894fcbe93bdca0
| 15,944
|
async def get_owner(group_id: int, bot):
""" 获取群主 QQ 号
"""
group_info = await bot._get_group_info(group_id=group_id)
return group_info['owner_id']
|
448eb13df73c0e12619a61dbafd47fef29a51ef8
| 15,946
|
def make_subtitle(rho_rms_aurora, rho_rms_emtf,
phi_rms_aurora, phi_rms_emtf,
matlab_or_fortran, ttl_str=""):
"""
Parameters
----------
rho_rms_aurora: float
rho_rms for aurora data differenced against a model. comes from compute_rms
rho_rms_emtf:
rho_rms for emtf data differenced against a model. comes from compute_rms
phi_rms_aurora:
phi_rms for aurora data differenced against a model. comes from compute_rms
phi_rms_emtf:
phi_rms for emtf data differenced against a model. comes from compute_rms
matlab_or_fortran: str
"matlab" or "fortran". A specifer for the version of emtf.
ttl_str: str
string onto which we add the subtitle
Returns
-------
ttl_str: str
Figure title with subtitle
"""
ttl_str += (
f"\n rho rms_aurora {rho_rms_aurora:.1f} rms_{matlab_or_fortran}"
f" {rho_rms_emtf:.1f}"
)
ttl_str += (
f"\n phi rms_aurora {phi_rms_aurora:.1f} rms_{matlab_or_fortran}"
f" {phi_rms_emtf:.1f}"
)
return ttl_str
|
7569e658785e571a4dcd4428e76a13b8b10e3327
| 15,947
|
from typing import List
def find_substring_by_pattern(
strings: List[str], starts_with: str, ends_before: str
) -> str:
"""
search for a first occurrence of a given pattern in a string list
>>> some_strings = ["one", "two", "three"]
>>> find_substring_by_pattern(some_strings, "t", "o")
'tw'
>>> find_substring_by_pattern(some_strings, "four", "five")
Traceback (most recent call last):
...
ValueError: pattern four.*five not found
:param strings: a list of strings where the pattern is searched for
:param starts_with: the first letters of a pattern
:param ends_before: a substring which marks the beginning of something different
:returns: a pattern which starts with ``starts_with`` and ends before ``ends_before``
"""
for package_name in strings:
starting_index = package_name.find(starts_with)
if starting_index >= 0:
ending_index = package_name.find(ends_before)
return package_name[starting_index:ending_index]
raise ValueError(f"pattern {starts_with}.*{ends_before} not found")
|
4bc0abe6fcdbf81350b575dd9834b9c646fda81e
| 15,948
|
def progress_level_percentage(percentage):
""" Progess percentage util.
- 0-33 red
- 34-66 yellow
- 67-100 green
"""
_percentage = int(percentage)
if 0 < _percentage <= 33:
level = 'danger'
elif 34 <= _percentage <= 66:
level = 'warning'
elif _percentage >= 67:
level = 'success'
else:
level = 'none'
return level
|
15a8078946211606d36374e7c3c16e41481613a4
| 15,949
|
from typing import Callable
def curry_explicit(function: Callable, arity: int) -> Callable:
"""
:param function: function to curry
:param arity: int positive value of wanted function arity
:return: curried function
"""
if arity < 0:
raise ValueError("Arity can not be negative number")
def curried_function(arg=None) -> Callable:
if arity == 0:
return function()
if arity == 1:
return function(arg)
return curry_explicit(lambda *args: function(arg, *args), arity - 1)
return curried_function
|
bc3ce60239c56e5e177606ae777a3ee17f005b1d
| 15,951
|
def noramlize_data(df):
"""
Normalizes the data by subtracting the mean and dividing by the max - min.
:param df: the dataframe that we are normalizing
:return: the normalized dataframe
"""
df_normalized = (df - df.mean()) / (df.max() - df.min())
return df_normalized
|
13adfc79876f989d6983f74e57c7372c9dc00000
| 15,952
|
def rdg(edges, gtype):
"""Reduce graph given type."""
if gtype == "reftype":
return edges[(edges.etype == "EVAL_TYPE") | (edges.etype == "REF")]
if gtype == "ast":
return edges[(edges.etype == "AST")]
if gtype == "pdg":
return edges[(edges.etype == "REACHING_DEF") | (edges.etype == "CDG")]
if gtype == "cfgcdg":
return edges[(edges.etype == "CFG") | (edges.etype == "CDG")]
if gtype == "all":
return edges[
(edges.etype == "REACHING_DEF")
| (edges.etype == "CDG")
| (edges.etype == "AST")
| (edges.etype == "EVAL_TYPE")
| (edges.etype == "REF")
]
|
20fbcd1c73eb9aa3e97c8e0c0eb1a9da180177fa
| 15,953
|
def create_custom_backbone(model_name, num_classes):
"""
加载自定义网络
model_name: 网络名称 eg:"ClassNet"
num_classes: 网络输出
"""
# 加载指定网络
method = eval(model_name)
return method(num_classes)
|
f7c7f4073f9b3bc4c884f2fd12d3e40098e2a4d7
| 15,954
|
def filter_packets_by_filter_list(packets, filter_list):
"""
:param packets: Packet list
:param filter_list: Filters with respect to packet field
:type filter_list: list of pyshark_filter_util.PySharkFilter
:return: Filtered packets as list
"""
filtered_packets = [packet for packet in packets
if all(single_filter.apply_filter_to_packet(packet) for single_filter in filter_list)]
return filtered_packets
|
61106178dca039498c4fec1239bd7d251b69f812
| 15,955
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.