content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def fftmod(out):
"""Performs a modulated FFT on the input, that is multiplying every other line by exp(j*pi) which is a shift of N/2, hence modulating the output by +/- pi.
Args:
out (array_like): Input to the FFT.
Returns:
The modulated FFT of the input.
"""
out2 = out.copy()
out2[...,::2,:] *= -1
out2[...,:,::2] *= -1
out2 *= -1
return out2
|
d54fccc09baaf4e48d701090323f9c1f85c7a0bb
| 69,266
|
def split_seqid(seqid):
"""Split NCBI sequence ID to get last value."""
if '|' not in seqid:
return seqid
return seqid.split('|')[-2]
|
70d1024b9ea4faec951ee99d00b0743afdaee1cc
| 69,269
|
import pathlib
def get_file_extension(filename):
"""
Reliably determine relevant file extension of archive
>>> get_file_extension("/tmp/foo_v3.8.2_linux_amd64.tgz")
'.tgz'
>>> get_file_extension("/tmp/foo_v1.2.3_darwin.1.212.tar.gz")
'.tar.gz'
>>> get_file_extension("/tmp/2020.12.31_windows.1.007.zip")
'.zip'
"""
ext_list = pathlib.Path(filename).suffixes
good_exts = [_ for _ in ext_list[-2:]
if 3 <= len(_) <= 4 and _[1:].isalpha()]
return ''.join(good_exts)
|
0687a75e4a9f93d4fbb302946ea103dd550108a5
| 69,271
|
import re
def get_new_filename(filename: str) -> str:
"""get_new_filename is system agnostic (3A, 3B1, 3B2).
Gets an alyx compatible filename from any spikeglx ephys file.
:param filename: Name of an ephys file
:return: New name for ephys file
"""
root = "_spikeglx_ephysData"
parts = filename.split('.')
if len(parts) < 3:
raise ValueError(fr'unrecognized filename "{filename}"')
pattern = r'.*(?P<gt>_g\d+_t\d+)'
match = re.match(pattern, parts[0])
if not match: # py 3.8
raise ValueError(fr'unrecognized filename "{filename}"')
return '.'.join([root + match.group(1), *parts[1:]])
|
d123e53f6ad72c25485c55e5e5a9e1f23e5c7501
| 69,273
|
def hasTemplate (s):
""" Return True if string s has string templates """
return '{' in s and '}' in s
|
ab2f68c0e5b77ab4336a8814eb686b2743c2f6e1
| 69,276
|
def id_prefix(tag_name):
"""return the id attribute prefix for the tag name"""
id_prefix_map = {
"mml:math": "m",
"disp-formula": "equ",
"fig": "fig",
"table-wrap": "table",
"media": "video",
}
return str(id_prefix_map.get(tag_name))
|
0b9698cd0626371c38ac6ddd220318ff3660a7cf
| 69,279
|
def count_parameters(model):
"""Count TensorFlow model parameters.
Parameters
----------
model: TensorFlow model
Returns
-------
total_parameters
"""
total_parameters = 0
# iterating over all variables
for variable in model.trainable_variables:
local_parameters=1
shape = variable.get_shape() # getting shape of a variable
for i in shape:
local_parameters*=i # mutiplying dimension values
total_parameters+=local_parameters
return int(total_parameters)
|
a696e8ceaf975a61e3228dffc11ee1c14b891c84
| 69,282
|
import math
def ppmi_similarity(target_word, context_word, co_occurrences, word_id, word_frequency, count):
"""
Compute PPMI as log2(P(word1,word2)/(P(word1)*P(word2)))
:param target_word: the target word
:param context_word: the context word
:param co_occurrences: the number of co-occurrences between each pair of words in the corpus
:param word_id: the word to wordID mapping used in co_occurrences
:param word_frequency: the frequency of each word in the corpus
:param count: the number of word occurrences in the corpus
:return:
"""
target_id = word_id[target_word]
context_id = word_id[context_word]
target_occurrences = word_frequency[target_word]
context_occurrences = word_frequency[context_word]
if context_id not in co_occurrences[target_id]:
return 0
cooccurrence_prob = co_occurrences[target_id][context_id]/target_occurrences
target_occurrence_prob = target_occurrences/count
context_occurrence_prob = context_occurrences/count
pmi = math.log2(cooccurrence_prob/(target_occurrence_prob*context_occurrence_prob))
if pmi < 0:
return 0
else:
return pmi
|
2b25181b0d9be852e41757260836161d7f531a0b
| 69,284
|
def save_list(input_list, file_name):
"""A list (input_list) is saved in a file (file_name)"""
with open(file_name, 'w') as fh:
for item in input_list:
fh.write('{0}\n'.format(item))
return None
|
22593dcd0003d01c3cccf0faf7c1b690ea7f4cd3
| 69,287
|
import json
def end(event, context):
"""
Second Lambda function. Triggered by the SQS.
:param event: AWS event data (this time will be the SQS's data)
:param context: AWS function's context
:return: ''
"""
print(f'sqs event: {event}')
body = json.loads(event['Records'][0]['body'])
print(f"sqs parsed body: {body['data']}")
return ''
|
96f5a38dca3dceaa7597fb262b6a56b12fdc5f71
| 69,290
|
import math
def score(x: float, y: float) -> int:
"""Calculate score.
:param x: float - x coordinate of a dart.
:param y: float - y coordinate of a dart.
:return: int - score.
"""
r = math.sqrt(x**2 + y**2)
if r <= 1:
return 10
if r <= 5:
return 5
if r <= 10:
return 1
return 0
|
8f8f4ba74ab6570ecc92af2f4265d47f1f7c6197
| 69,292
|
def assert_trigger(library, session, protocol):
"""Assert software or hardware trigger.
Corresponds to viAssertTrigger function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
protocol : constants.TriggerProtocol
Trigger protocol to use during assertion.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viAssertTrigger(session, protocol)
|
65def6f6c1df3d7eff84c02550f8a4f6e623b1a7
| 69,294
|
def reverse_string(string, result=None):
"""reverse_string(str) -> str
Reverse a string using recursion.
Note:
Does not slice (e.g. string[::-1]) or use iteration.
>>> reverse_string('hello')
olleh
>>> reverse_string('hello world')
dlrow olleh
>>> reverse_string('123456789')
987654321
"""
if not isinstance(string, str):
raise TypeError("Must be a string")
if result is None:
result = []
if len(string) == len(result):
return ''.join(result)
# Assigned a variable for readablity.
offset = (len(string) - 1) - len(result)
result.append(string[offset])
return reverse_string(string, result)
|
24783afc43bfed473d4105ddec9f24e7afc31f61
| 69,297
|
import click
from typing import Any
def get_click_supplied_value(ctx: click.core.Context, param_name: str) -> Any:
"""Find the value passed to Click through the following priority:
#1 - a parameter passed on the command line
#2 - a config value passed through @click_config_file
#3 - None
"""
# I didn't find in the docs for Click a simpler way to get the
# parameter if specified, fall back to the default_map if not, None if neither
# but this feels like a standard thing that should be built-in
if param_name in ctx.params:
return ctx.params[param_name]
if ctx.default_map:
return ctx.default_map.get(param_name)
return None
|
b8d10589f9cd00cb52d3d9634dd6bad4a16217ec
| 69,300
|
def numericCols(df):
""" List numeric columns of pandas DataFrame
"""
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
return df.select_dtypes(include=numerics).columns
|
3da3034136efc7ce1e521b662acffe9b248ef94d
| 69,304
|
def filter_matches_distance(matches, dist_threshold):
"""
Filter matched features from two images by distance between the best matches
Arguments:
match -- list of matched features from two images
dist_threshold -- maximum allowed relative distance between the best matches, (0.0, 1.0)
Returns:
filtered_match -- list of good matches, satisfying the distance threshold
"""
filtered_match = []
for m, n in matches:
if m.distance <= dist_threshold*n.distance:
filtered_match.append(m)
return filtered_match
|
a43e364a6239dc7246a5d0da59acdfbf3bc89409
| 69,305
|
def GetCrProbs(rates, pixarea, exptime):
"""
Calculate probabilities of CR hits.
Parameters
----------
rates: tuple of float
CR hit rates (hits/cm^2/s).
pixarea: float
Pixel area (cm^2).
exptime: float
Exposure time (s).
Returns
-------
probs: tuple of float
Probabilities for respective elements in `rates`.
"""
m = pixarea * exptime # cm^2 s
probs = []
for r in rates: probs.append( r * m) # hits
return tuple(probs)
|
0e584ca0801bf023615180c4f32c49a396526468
| 69,312
|
def guess_metal(ase_obj):
"""
Make an educated guess of the metallic compound character,
returns bool
"""
non_metallic_atoms = {
'H', 'He',
'Be', 'B', 'C', 'N', 'O', 'F', 'Ne',
'Si', 'P', 'S', 'Cl', 'Ar',
'Ge', 'As', 'Se', 'Br', 'Kr',
'Sb', 'Te', 'I', 'Xe',
'Po', 'At', 'Rn',
'Og'
}
return not any([el for el in set(ase_obj.get_chemical_symbols()) if el in non_metallic_atoms])
|
33da43c90ba2feac9436a99de2ace84e35b4a458
| 69,316
|
def get_lines_from_file(file_path: str) -> list:
"""Get all the lines from a file as a list of strings.
:param str file_path: The path to the file.
:Returns: A list of lines (each a `str`).
"""
with open(file_path, encoding="utf-8") as temp:
return temp.readlines()
|
487a9c07bf2d46e84a3be2e1aed91e5df0a8179b
| 69,317
|
def fqcn(cls):
"""Return the fully qualified class name of a class."""
return '%s.%s' % (cls.__module__, cls.__name__)
|
b62b4fc5e9f0a6a046a443554f1fca4054711d93
| 69,319
|
import re
def to_hex_ascii(value):
""" Convert binary value to hex representation in ascii """
output = ''
# Format value to hex in ascii
value_str = '{:02X}'.format(value)
# Append zero if needed
if len(value_str) % 2:
value_str = '0' + value_str
# Merge into 16 bit values
value_list = re.findall('..', value_str)
# Add '0x' prefix
value_list = ['0x' + v for v in value_list]
# Add newline to every 8th byte
value_list = ['\n' + v if i != 0 and i % 8 == 0 else v for i, v in enumerate(value_list)]
# Merge with comas
output += ', '.join(value_list)
output += ',\n'
return output, len(value_list)
|
8868aecb273ef71cfb80069a40ddcdccd674685d
| 69,324
|
from typing import Counter
def tweetsByDate(tweets):
"""
Group received Tweet objects by date (derived from timestamp),
with a count against each value.
"""
return Counter(str(t.createdAt.date()) for t in tweets)
|
111a6654aa5204fb5af577dbed5d6f5e5664878a
| 69,325
|
def load_tests(loader, standard_tests, pattern):
"""Prevents test discovery in the mixin modules.
Mixin test cases can't be run directly. They have to be used in conjunction
with a test class which performs the setup required by the mixin test cases.
This does not prevent mixin test case discovery in test classes that inherit
from mixins.
Args:
loader: A unit test loader instance. Unused.
standard_tests: Already loaded test cases.
pattern: Test method name pattern. Unused.
Returns:
Already loaded standard_tests.
"""
del loader, pattern # Unused.
return standard_tests
|
d6784ad9699e4d84826f89815996bb351af2e35e
| 69,327
|
def time_minutes_to_string(time: int) -> str:
"""
Converts time from an integer number of minutes after 00:00 to string-format
:param time: The number of minutes between 'time' and 00:00
:return: A string of the form "HH:MM" representing a time of day
"""
return "{0:0=2d}".format(int(time/60)) + ":" + "{0:0=2d}".format(time%60)
|
b2754e7aa747ae5bad6d580d625583e1722d101c
| 69,332
|
def _find_pypeit_block(lines, group):
"""
Find the PypeIt group block
Args:
lines (:obj:`list`):
List of file lines
group (:obj:`str`):
Name of group to parse
Returns:
int, int: Starting,ending line of the block; -1 if not present
"""
start = -1
end = -1
for i, l in enumerate(lines):
entries = l.split()
if start < 0 and entries[0] == group and entries[1] == 'read':
start = i+1
continue
if entries[0] == group and entries[1] == 'end':
end = i
continue
if start >= 0 and end >= 0:
break
return start, end
|
2c8df14c9f08ce25aab6ab7b9d1e930e2183e5af
| 69,336
|
from pathlib import Path
from typing import Tuple
from typing import Set
def packages_find(workspace_path: Path, tracing: str = "") -> Tuple[Path, ...]:
"""Find the various packages within a ROS2 workspace."""
# next_tracing: str = tracing + " " if tracing else ""
if tracing:
print(f"{tracing}=>packages_find({workspace_path})")
src_path: Path = workspace_path / "src"
assert src_path.is_dir(), f"{src_path} is not a directory"
all_packages: Set[Path] = set()
package_path: Path
for package_path in src_path.glob("*/package.xml"):
all_packages.add(package_path.parent)
for package_path in src_path.glob("*/CMakeLists.txt"):
all_packages.add(package_path.parent)
final_packages: Tuple[Path, ...] = tuple(sorted(all_packages))
if tracing:
print(f"{tracing}<=packages_find({workspace_path})=>{final_packages}")
return tuple(final_packages)
|
6f59433ab0cf60e843a197b50c95a54853cc3061
| 69,340
|
def pad(batch, fill=0):
""" Pad a mini-batch of sequence samples to maximum length of this batch.
:param batch: list of list
:param fill: word index to pad, default 0.
:return batch: a padded mini-batch
"""
max_length = max([len(x) for x in batch])
for idx, sample in enumerate(batch):
if len(sample) < max_length:
batch[idx] = sample + ([fill] * (max_length - len(sample)))
return batch
|
b1e17bdb2c289944379c98458f7dfe5cd5e2a13f
| 69,342
|
def pool_targets(config):
"""List of pool_target section names
@returns: str Comma delimited list of pool_target section names
"""
return ', '.join([s['pool_target'] for s in config.pool_config])
|
6ce24127a712b9488d55295853b9fc50fa1d7725
| 69,344
|
def fetch_available_volumes(ec2, filters=None):
"""
Generator of available EBS volumes
:param ec2: EC2 resource
:type ec2: boto3.resources.factory.ec2.ServiceResource
:param filters: Optional list of filters
:type filters: None|list
:returns: volumes collection
:rtype: boto3.resources.collection.ec2.volumesCollection
"""
# Set an empty filter set if none provided
if filters is None:
filters = []
# Append the filter for finding only volumes that are in the 'available'
# state.
# Ref: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html
filters.append({
'Name': 'status',
'Values': ['available'],
})
return ec2.volumes.filter(
Filters=filters
)
|
97576e74cfebf45e70943573637503e9427f8758
| 69,347
|
import json
def get_cached_variants(cache, read_id):
"""Try to get variants from a read we've seen before.
This is useful for ONT reads where there's many variants
per read and retrieving them takes a while.
"""
cache_id = f"variants.{read_id}"
if cache and cache.exists(cache_id):
return json.loads(cache.get(cache_id))
return None
|
f632ffd3eec2831d0fb9af42cf18b786ec87db51
| 69,348
|
def get_categories(cat_file):
"""Get categories of plasmids from input file"""
catDict = {}
with open(cat_file, 'r') as infile:
for line in infile:
line = line.strip().split()
catDict[line[0]] = line[1]
return catDict
|
2b03c548977bb6d89450da4d0bb2a02e1cdbeb03
| 69,349
|
import random
def mutate_bytes(inset, mutation_chance=2):
""" Mutate the instruction set by changing whole bytes. """
return [random.randrange(256) if random.randrange(100) < mutation_chance else i for i in inset]
|
2f6c3933b1a49cfaac769a8f108277ade795ca89
| 69,350
|
def get_class_name(class_or_instance) -> str:
"""Return the fully qualified name of a class."""
try:
return class_or_instance.__qualname__
except AttributeError:
# We're dealing with a dataclass instance
return type(class_or_instance).__qualname__
|
8918b2200dbce200b3f560e7730e474acdae8584
| 69,353
|
def _overlap_ratio(box1, box2):
"""Computes overlap ratio of two bounding boxes.
Args:
box1: (x, y, width, height).
box2: (x, y, width, height).
Returns:
float, represents overlap ratio between given boxes.
"""
def _area(box):
_, _, width, height = box
area = width * height
assert area >= 0
return area
def _intersection_area(box1, box2):
x1, y1, width1, height1 = box1
x2, y2, width2, height2 = box2
x = max(x1, x2)
y = max(y1, y2)
width = max(min(x1 + width1, x2 + width2) - x, 0)
height = max(min(y1 + height1, y2 + height2) - y, 0)
area = width * height
assert area >= 0
return area
intersection_area = _intersection_area(box1, box2)
union_area = _area(box1) + _area(box2) - intersection_area
assert union_area >= 0
if union_area > 0:
return float(intersection_area) / float(union_area)
return 1.0
|
1fb3c75eeb99d4ce568fed8243096edb1f2313f8
| 69,354
|
from datetime import datetime
def datetime_to_discord(time: datetime, format: str = "f") -> str:
"""Convert a datetime object to a Discord timestamp."""
return f"<t:{int(time.timestamp())}:{format}>"
|
a428141f8afa4a81c04a01d10eb5f46d1dd0e071
| 69,356
|
def corners_to_box(x0, y0, x1, y1):
"""convert two corners (x0, y0, x1, y1) to (x, y, width, height)"""
x0, x1 = min(x0, x1), max(x0, x1)
y0, y1 = min(y0, y1), max(y0, y1)
return x0, y0, x1 - x0 + 1, y1 - y0 + 1
|
1ff85fd719f182698a7610266b06d255547f767a
| 69,359
|
def get_package_breadcrumbs(package_tree, name, version):
"""
Takes a npm ls JSON tree and looks up the paths to the given
dependency (name and version).
Returns an array of paths. Where a path is an array of
dependencies leading to the given dependency in the tree.
>>> get_package_breadcrumbs(tree, 'minim', '1.0.0')
[
['fury-adapter-swagger@1.0.0'],
['fury@2.0.0'],
['apielements@0.1.0', 'fury@2.0.0']
]
"""
def traverse_dependencies(dependencies, path):
"""
Inline function to be called recursively to check for dependency and
pass down the path to further dependencies.
"""
results = []
for dependency_name in dependencies:
dependency = dependencies[dependency_name]
if dependency_name == name and dependency.get('version') == version:
# Found dependency in path
results.append(path)
continue
if 'dependencies' in dependency:
# Traverse dependency dependencies
sub_dependencies = dependency['dependencies']
path_component = '{}@{}'.format(dependency_name, dependency['version'])
results += traverse_dependencies(sub_dependencies, path + [path_component])
return results
return traverse_dependencies(package_tree['dependencies'], [])
|
e9c18dac3ca2237de9d1e85d983d4ed372ba65bd
| 69,361
|
def get_DFG_name(job_name):
"""Returns DFG name."""
return job_name.split('-')[1]
|
c87983bfdfee4c90844e52bd5979ccb32cc047d6
| 69,362
|
def split_subview_sep(string):
"""
Splits the subview separators
--subview-sep ' - ,.' -> [' - ', '.']
"""
return string.split(",")
|
f5a2eec9ed5df636cb97b71a5895eda148960ed9
| 69,363
|
def zeller(config, n):
"""
Splits up the input config into n pieces as used by Zeller in the original
reference implementation. The approach works iteratively in n steps, first
slicing off a chunk sized 1/n-th of the original config, then slicing off
1/(n-1)-th of the remainder, and so on, until the last piece is halved
(always using integers division).
:param config: The configuration to split.
:param n: The number of sets the configuration will be split up to.
:return: List of the split sets.
"""
subsets = []
start = 0
for i in range(n):
subset = config[start:start + (len(config) - start) // (n - i)]
subsets.append(subset)
start += len(subset)
return subsets
|
c5f7023f1bae5f1eac75b992fa372d9360ae2f2b
| 69,368
|
def bound(value, bound1, bound2):
"""
returns value if value is between bound1 and bound2
otherwise returns bound that is closer to value
"""
if bound1 > bound2:
return min(max(value, bound2), bound1)
else:
return min(max(value, bound1), bound2)
|
2b3a8c474c8ef10ca15dd9ad8cec0f50726565fd
| 69,370
|
def sanitize_metric_name(name: str) -> str:
"""Sanitize scope/variable name for tensorflow and mlflow
This is needed as sometimes variables automatically created while
building layers contain forbidden characters
>>> from tensorflow.python.framework.ops import _VALID_SCOPE_NAME_REGEX as TF_VALID_REGEX
>>> from mlflow.utils.validation import _VALID_PARAM_AND_METRIC_NAMES as MLFLOW_VALID_REGEX
>>> from deepr.metrics import sanitize_metric_name
>>> kernel_variable_name = 'my_layer/kernel:0'
>>> bool(TF_VALID_REGEX.match(kernel_variable_name))
False
>>> bool(MLFLOW_VALID_REGEX.match(kernel_variable_name))
False
>>> bool(TF_VALID_REGEX.match(sanitize_metric_name(kernel_variable_name)))
True
>>> bool(MLFLOW_VALID_REGEX.match(sanitize_metric_name(kernel_variable_name)))
True
"""
name = name.replace(":", "-")
return name
|
6a54703fdd6ac3158d4bb28e60a897e378444874
| 69,375
|
def key_name(tpl):
"""Return the name of the first item in the tuple."""
return tpl[0].name
|
146680341487eb0a33afacc2c8ebe8f3b050f9c5
| 69,376
|
import csv
def csvtolist(inputstr):
""" converts a csv string into a list """
reader = csv.reader([inputstr], skipinitialspace=True)
output = []
for r in reader:
output += r
return output
|
0f1b9173c886709e813999d4174a74cc2efbe33e
| 69,378
|
def cost(graph,e):
"""
Return the cost of an edge on a graph.
Parameters
----------
G = A networkx graph.
e = An edge on graph.
Returns
-------
The weight of an edge 'e' on a graph 'graph'.
"""
return graph.edges[e]['weight']
|
af41eb667d2ea586b523fd31c72d4e377ffbfa04
| 69,384
|
def check_source_place_presence(net):
"""
Check if there is a unique source place with empty connections
Parameters
-------------
net
Petri net
Returns
-------------
place
Unique source place (or None otherwise)
"""
count_empty_input = 0
unique_source = None
for place in net.places:
if len(place.in_arcs) == 0:
count_empty_input = count_empty_input + 1
unique_source = place
if count_empty_input == 1:
return unique_source
return None
|
2c890e765a419ca1c89a5e6d0079affb791bb2ea
| 69,386
|
def InvalidParsedHotlistRefsNames(parsed_hotlist_refs, user_hotlist_pbs):
"""Find and return all names without a corresponding hotlist so named.
Args:
parsed_hotlist_refs: a list of ParsedHotlistRef objects
user_hotlist_pbs: the hotlist protobuf objects of all hotlists
belonging to the user
Returns:
a list of invalid names; if none are found, the empty list
"""
user_hotlist_names = {hotlist.name for hotlist in user_hotlist_pbs}
invalid_names = list()
for parsed_ref in parsed_hotlist_refs:
if parsed_ref.hotlist_name not in user_hotlist_names:
invalid_names.append(parsed_ref.hotlist_name)
return invalid_names
|
a02edb1c66a1b28b0b73d8555f22d9335b102185
| 69,389
|
def calculate(user_input):
""" This function is to calculate user inputted formula
Args:
user_input (str): Formula user inputted
Returns:
str: The result after evaluation
"""
try:
return eval(user_input.replace('^', "**"))
except:
return 'Please check your syntax'
|
6d251cf0db7c4fa52724013027c5742414b06f14
| 69,392
|
def preprocess_sentence(start_sign, end_sign, sentence):
"""
用于给句子首尾添加start和end
Args:
start_sign: 开始标记
end_sign: 结束标记
sentence: 处理的语句
Returns:
合成之后的句子
"""
sentence = start_sign + ' ' + sentence + ' ' + end_sign
return sentence
|
50412735484e4d2219e098edcb4f481db7c1071d
| 69,395
|
def detach_and_delete_iam_policy(config, iam):
""" Detaches policy from Redshift role & deletes the policy
Args:
config: a ConfigParser object
iam: a boto3 client object for the AWS IAM service
Returns:
dict with AWS API response
"""
try:
print("Detaching policy: ", config['IAM_ROLE']['POLICY_NAME'])
iam.detach_role_policy(RoleName=config['IAM_ROLE']['NAME'],
PolicyArn=config['IAM_ROLE']['POLICY_ARN'])
print("Deleting policy: ", config['IAM_ROLE']['POLICY_NAME'])
return iam.delete_policy(PolicyArn=config['IAM_ROLE']['POLICY_ARN'])
except Exception as e:
print(e)
|
35920a537049759d592e69af9010ea96b9e68db2
| 69,398
|
def toSize(toPad,size):
"""
Adds spaces to a string until the string reaches a certain length
Arguments:
input - A string
size - the destination size of the string
Return:
the expanded string of length <size>
"""
padded = toPad + " " * (size - len(toPad))
return padded.ljust(size," ")
|
119658ac387216debd5bbfa1ff39ff09ba1db3a5
| 69,403
|
import itertools
def create_discrete_actions(num_buttons, max_buttons_down):
"""
Return list of available actions, when we have
num_buttons buttons available and we are allowed
to press at most max_buttons_down, as
a discrete action space.
Parameters:
num_buttons (int): Number of buttons available
max_buttons_down (int): How many buttons can be pressed
down at once.
Returns:
actions (List of Lists): A list of available actions
"""
# Remove no-op action
actions = [
list(action) for action in itertools.product((0, 1), repeat=num_buttons)
if (sum(action) <= max_buttons_down and sum(action) > 0)
]
return actions
|
12b6ccd0efa327fddb52b8b45b663d9abb7a2457
| 69,407
|
def sortListsInDict(data, reverse=False):
"""Recursively loops through a dictionary and sorts all lists.
Args:
data(dict): data dictionary
reverse: (Default value = False)
Returns:
: dict -- sorted dictionary
"""
if isinstance(data, list):
if not data:
return data
if isinstance(data[0], dict):
if all('name' in elem for elem in data):
return sorted(data, key=lambda k: k['name'], reverse=reverse)
elif isinstance(data[0], str):
return sorted(data, reverse=reverse)
return data
elif isinstance(data, dict):
return {key: sortListsInDict(value, reverse) for key, value in data.items()}
else: # any other type, such as string
return data
|
6890cf234ec5eaed784215972fcdf2661bbdd188
| 69,409
|
def _partition(comparables, lo, hi):
"""Return index upon partitioning the array pivoting on the first element
Arguments:
comparables -- an array of which the elements can be compared
lo -- lower bound of indices
hi -- higher bound of indices
After partition,
elements to the left of pivot are no larger than the pivot;
elements to the right of pivot are no smaller than the pivot.
comparables[lo:j] <= pivot
comparables[j+1:hi] >= pivot
This design choice is essential for Nlog(N) performance for
duplicate keys.
"""
i = lo + 1
j = hi - 1
while i < j:
while i < hi and comparables[i] < comparables[lo]: i += 1
while j > lo and comparables[j] > comparables[lo]: j -= 1
if i < j:
comparables[i], comparables[j] = comparables[j], comparables[i]
i += 1 # essential for duplicate keys
j -= 1 # essential for duplicate keys
comparables[lo], comparables[j] = comparables[j], comparables[lo]
return j
|
efdb16c4987d7c85b0e7f793a46c8403d11c7763
| 69,412
|
import re
def alphanumeric_split(time_string):
"""Splits an incoming string based on the first encounter of a alphabetic character after encountering digits.
It will lower case and remove all white space in the string first, and return a number as float and alpha text
as a string. """
preprocessed_string = str(time_string).replace(" ", "").lower()
string_list = [string for string in re.split(r'(\d+)', preprocessed_string) if string]
number = float(string_list[0])
string = str(string_list[1])
return number, string
|
1149b871b482ef0603966fa7ce0522a499c4996d
| 69,414
|
def build_resilient_url(host, port):
"""
build basic url to resilient instance
:param host: host name
:param port: port
:return: base url
"""
if host.lower().startswith("http"):
return "{0}:{1}".format(host, port)
return "https://{0}:{1}".format(host, port)
|
1eca8af16cd20f6c7fc3185fe76d15a9fa328a3b
| 69,420
|
import json
def find_osd_by_id(osd_id, mon_node):
"""Find OSD by OSD.Id using 'ceph osd find {id}'
Args:
osd_id: osd id
mon_node: mon_node to execute ceph commands
Returns:
osd_info
"""
out, _ = mon_node.exec_command(
cmd=f"ceph osd find {osd_id} --format json",
sudo=True,
)
return json.loads(out.read().decode())
|
754fdce0f465a8947ccb76e22770623370ba3429
| 69,423
|
def time2int(time_str: str) -> int:
"""Transform '01:57:00' to (int)157"""
return int(time_str[:2] + time_str[3:5])
|
219e646a7788a1bfd2c19352049324f3a0a03cd1
| 69,430
|
def identity(x):
"""
The identity function. Returns its argument.
not to be confused with the id() builtin
>>> identity('foo')
'foo'
"""
return x
|
914f6432f202712f159e16ed40f0b4c9c458acca
| 69,431
|
def register_decorator(register):
"""Returns a decorator that register a class or function to a specified
register
Parameters
----------
register : dict
The register to which the class or function is register
Returns
-------
decorator : func
The decorator
"""
def decorator(name):
"""Decorator that register a class or a function to a register.
Parameters
----------
name : str
The name assigned to the class or function to store in the register
"""
def _decorator(function):
register[name] = function
function.name = name
return function
return _decorator
return decorator
|
cdc3764ed26c7453e5679c3914e17ec4872c3a05
| 69,432
|
from typing import Iterable
from typing import Tuple
def atomtypes_atomnums_to_atoms(
atomtypes: Iterable[str], atomnums: Iterable[int]
) -> Tuple[str, ...]:
"""Return list representation for atom in use.
Parameters
------------
atomtypes: list
atom names
atomnums: list
atom numbers
Examples
--------
>>> test_nums = [2, 3, 2, 1]
>>> test_elements = ['Si', 'Ag', 'H', 'Si']
>>> atomtypes_atomnums_to_atoms(test_elements, test_nums)
('Si', 'Si', 'Ag', 'Ag', 'Ag', 'H', 'H', 'Si')
"""
atoms = []
for elem, nums in zip(atomtypes, atomnums):
for _ in range(nums):
atoms.append(elem)
return tuple(atoms)
|
a32c2afcaa1712c672072fc9c530f7ed44317ca7
| 69,441
|
def get_id_attribute_name(self):
"""
Retrieves the name of the attribute considered to be
the identifier of the current entity.
This method uses the entity manager for the retrieval
of the identifier attribute, providing reliability.
:rtype: String
:return: The name of the identifier attribute of the
current entity.
"""
# retrieves the class of the current object
entity_class = self.__class__
# retrieves the id attribute name from the current object
id_attribute_name = entity_class.get_id()
# returns the id attribute name
return id_attribute_name
|
22d6f015074734e8668bc069c89c1b2408472519
| 69,443
|
def dotProduct(vector1, vector2):
"""Returns the dot product of two vectors."""
if len(vector1) == len(vector2):
return sum((vector1[i] * vector2[i] for i in range(len(vector1))))
return None
|
b45a06f51c4836e55a0708a82578fba6356317bb
| 69,445
|
def repr_fcall(fname, args, kwargs):
"""Nice string representation for function call"""
data = ', '.join(map(repr, args))
data += ', '.join('%s=%r' % item for item in kwargs.items())
return '%s(%s)' % (fname, data)
|
6ed771d3782cb06b28d1689a7d07d639a825a04b
| 69,448
|
import math
def get_p2l_dis(point_x: int, point_y: int, line_x1: int, line_y1: int, line_x2: int, line_y2: int) -> float:
"""
【计算点P到直线L的最短距离】
:param point_x: P的X坐标,int(在本工程中出现的所有坐标均采用整数型)
:param point_y: P的Y坐标
:param line_x1: 直线上的点L1的X坐标
:param line_y1: 直线上的点L1的Y坐标
:param line_x2: 直线上的点L2的X坐标
:param line_y2: 直线上的点L2的Y坐标
:return: 距离值,float
"""
va = line_y2 - line_y1
vb = line_x1 - line_x2
vc = line_x2 * line_y1 - line_x1 * line_y2
dis = (math.fabs(va * point_x + vb * point_y + vc)) / (math.pow(va * va + vb * vb, 0.5))
return dis
|
45fad4fe399981ae82ffe3824f7d4774936eedf4
| 69,449
|
import base64
def hexstring_to_b64(hexstring, as_str=False):
"""
Convert a hexstring to base64.
:param str hexstring: A hex string as a `str` object.
:param bool as_str: Whether to return the result as a `str` object.
:return: The base64 representation of the hexstring as a bytestring
or `str` object, depending on :param:`bool`.
"""
result = base64.b64encode(bytes.fromhex(hexstring))
if as_str:
result = result.decode()
return result
|
071f8bb94ca92c6de1713a9ddae6399a6619c1e0
| 69,450
|
def parse_direction(item):
"""Parse direction to tuple with rotation and number of steps."""
return item[0], int(item[1:])
|
09b55dc4e7a817d4058e5cbf7cd6238870ac76ac
| 69,451
|
import pkg_resources
def _parse_version_requirement(pkg_name):
"""Parse a version requirement from requirements.txt.
Returns the first parsed version that meets the >= requirement.
Parameters:
pkg_name (string): The string package name to search for.
Returns:
The string version or None if no >= version requirement can be parsed.
"""
for line in open('requirements.txt'):
if line.startswith(pkg_name):
# Assume that we only have one version spec to deal with.
version_specs = pkg_resources.Requirement.parse(line).specs
for requirement_type, version in version_specs:
if requirement_type == '>=':
return version
|
1c9bdb10d5fe401887ec9c72272355581dd24566
| 69,454
|
def list_to_lines(data):
"""cast list to lines or empty string"""
return '\n'.join(data) if data else ''
|
3cab4e53d46402cf45500b9b623857a5c064cd1a
| 69,465
|
def getOptionalAttribute(element, attribute):
"""Return the value of an *attribute* from an *element*,
but do not crash/throw if *attribute* does not exist.
Return None if *attribute* is not in the parent *element*.
:param element: the parent element that (might) contain(s) the attribute
:param attribute: the optional attribute to return the value of
"""
if element.hasAttribute(attribute):
return element.attributes[attribute].value
else:
return None
|
9662d697c80740d109acd9494e903c3efe791439
| 69,466
|
def num_digits(number):
"""
Returns number of digits in an integer.
:param number: Integer
:return: Number of digits
"""
return len(str(number))
|
c026468131e019f731012216e73b9c17f4d3bafd
| 69,469
|
def user_from_face_id(face_id: str) -> str:
"""Returns the name from a face ID."""
return face_id.split("_")[0].capitalize()
|
c49cc881b33699775338a0a772276702874029f3
| 69,470
|
import csv
def csv_to_dict(csv_path:str)->dict:
"""
reads a csv and returns a dict of processed values
where the first column is the key and the second column
is the value
"""
with open(csv_path, 'r') as fid:
reader = csv.reader(fid, delimiter=',')
header = next(reader)
csv_dict = dict()
for line in reader:
key = line[0].lower().strip()
value = line[1].lower().strip()
csv_dict.update({key:value})
return csv_dict
|
616150c0c30f63f1df2edfee25c6dd5e27fcc296
| 69,472
|
def hex_to_rgb(hex):
"""
Map a hex string like "00ff00" to individual r, g, b integer values.
"""
return [int(hex[c : c + 2], 16) for c in (0, 2, 4)]
|
a94c98258605753343af07a9f4c8665a7629bd76
| 69,478
|
def PrintToConsole(message):
"""User defined function printing to console."""
print(message)
return 1
|
c15ce065f945b1d6d6707cab3fa589c10f8130cb
| 69,479
|
def unescape(val, maxLength=0):
"""Unquotes several HTML-quoted characters in a string.
:param val: The value to be unescaped.
:type val: str
:param maxLength: Cut-off after maxLength characters.
A value of 0 means "unlimited". (default)
:type maxLength: int
:returns: The unquoted string.
:rtype: str
"""
val = (
str(val)
.replace("<", "<")
.replace(">", ">")
.replace(""", '"')
.replace("'", "'")
)
if maxLength > 0:
return val[0:maxLength]
return val
|
b420d6c33680a625242170c8c7ccabf1dc9f9ab9
| 69,480
|
def get_mvector_as_list(input_mvector):
"""
Given a MVector it returns a python list with the corresponding values.
Args:
input_mvector
Return:
out_list
"""
out_list = [input_mvector.x, input_mvector.y, input_mvector.z]
return out_list
|
78704adddf2e6b77f2ee47b342ece4f301425985
| 69,482
|
def is_package_info_doc(document_name):
"""Checks if the name of a document represents a package-info.java file."""
return document_name == "package-info"
|
66a46d9acf9f8a3a32033703329d7026b87f61f1
| 69,488
|
def verse(bottle):
"""Sing a verse"""
last = (bottle == 1)
plural = 's' if not last else ''
last_line_plural = 's' if bottle > 2 else ''
last_line = '{} bottle{} of beer on the wall!\n'.format(bottle - 1, last_line_plural)
if last:
last_line = 'No more bottles of beer on the wall!'
return '\n'.join([
'{} bottle{} of beer on the wall,'.format(bottle, plural),
'{} bottle{} of beer,'.format(bottle, plural),
'Take one down, pass it around,',
last_line
])
|
7c8456c0f51a336f4eb48038b8a653db382653b2
| 69,490
|
def create_response(code, method="GET", body="", additional_headers=None):
"""
Create a HTTP repsonse dictionary
:param code: HTTP response code
:param method: HTTP method
:param body: HTTP response body
:param additional_headers: additional headers to add to the default list
:return: HTTP response dictionary
"""
headers = {
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": method,
}
if additional_headers:
headers.update(additional_headers)
return {
"statusCode": code,
"headers": headers,
"body": body,
}
|
b09b4ab68aa2ac3c52f4cf3ea91af81ef5692159
| 69,491
|
from bs4 import BeautifulSoup
import requests
def get_soup(url):
"""Returns an instance of BeautifulSoup for the given URL"""
return BeautifulSoup(requests.get(url).content, 'lxml')
|
c77b69132b6a2c537f3d0f976e210c3714a369ef
| 69,492
|
def Thrust_ship(thrust, motor_isp, mass_flow):
"""Calculates thrust from the Rocket eqation: Thrust = Isp * g0 * massflow
Args:
motor_isp (float): Effiency constant for the motor.
mass_flow (float): Total mass flow of exhaust.
Returns:
thrust (float): Thrust force produced by the rocket.
"""
STANDARD_GRAVITY = 9.80665 # m/s^2
thrust = motor_isp * STANDARD_GRAVITY * mass_flow
return thrust
|
d37dc17c3462fc2081aacd27f2679885267e83c7
| 69,493
|
def _iam_ident_to_email(ident):
"""Given IAM identity returns email address or None."""
for p in ('user:', 'serviceAccount:'):
if ident.startswith(p):
return ident[len(p):]
return None
|
ada4ce18470d67d2311ccd6536d261a464ebe8b9
| 69,494
|
def load_vocab(fname):
"""
load word dict
Args:
fname (str): filename of the vocav file
Returns:
dict: word dict
"""
word2idx = {}
with open(fname, "r") as f:
for line in f:
pair = line.split()
word2idx[pair[0]] = int(pair[1])
return word2idx
|
16c6dfd865ffafce5c4886114121dee3146e9731
| 69,495
|
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model
corresponding to the given database table name.
"""
table_name = str(table_name).lower()
return ['\n',
' class Meta:\n',
'\t db_table = %r\n' % table_name,
'\n',
'\n']
|
627f2ac1b1c72b29b832c4b52a238c659c54eaf1
| 69,497
|
def is_alive(thread):
"""Helper to determine if a thread is alive (handles none safely)."""
if not thread:
return False
return thread.is_alive()
|
9665f09b0c661d7ef0b65704ab8f501fff081cdd
| 69,498
|
def dist(xv, yv):
"""Return Manhattan distance between two points."""
return sum(abs(x-y) for (x,y) in zip(xv,yv))
|
8a3aad33dc9615ef9b4780e81f98ab67c626f793
| 69,503
|
def calc_percent(part, whole):
"""Utility method for getting percentage of part out of whole
Args:
part (:obj:`int` or :obj:`float`)
whole (:obj:`int` or :obj:`float`)
Returns:
:obj:`float`
"""
if 0 in [part, whole]:
return float(0)
return 100 * (float(part) / float(whole))
|
f6d0ff2e970af605ab77e1352370e76b06ec371c
| 69,504
|
def _create_ad_group(client, customer_id, campaign_resource_name):
"""Creates an ad group for the remarketing campaign.
Args:
client: An initialized GoogleAds client.
customer_id: The Google Ads customer ID.
campaign_resource_name: The resource name of the target campaign.
Returns:
The string resource name of the newly created ad group.
"""
# Gets the AdGroupService.
ad_group_service = client.get_service("AdGroupService")
# Creates an ad group operation and configures the new ad group.
ad_group_operation = client.get_type("AdGroupOperation")
ad_group = ad_group_operation.create
ad_group.name = "Dynamic remarketing ad group"
ad_group.campaign = campaign_resource_name
ad_group.status = client.get_type("AdGroupStatusEnum").AdGroupStatus.ENABLED
# Issues a mutate request to add the ad group.
ad_group_response = ad_group_service.mutate_ad_groups(
customer_id=customer_id, operations=[ad_group_operation]
)
ad_group_resource_name = ad_group_response.results[0].resource_name
return ad_group_resource_name
# [END add_merchant_center_dynamic_remarketing_campaign_1]
|
773961e01303dd4cb0829feaef5f355094f37fdb
| 69,505
|
from typing import Dict
from typing import Any
def empty_intent() -> Dict[str, Any]:
"""Get intent structure."""
return {"text": "", "intent": {"name": "", "confidence": 0}, "entities": []}
|
56911962dff5e2a23f4ba9ca6b02b2ec34a6b76e
| 69,508
|
def int_func(word: str) -> str:
"""
Returns a word with the first letter capitalized.
>>> int_func('text')
'Text'
"""
return word[0].upper() + word[1:]
|
b32d80e8960471b5364e44c6c6cf78a6b6d1f434
| 69,509
|
def lower_case(doc):
"""Returns the lowercase form of a token"""
token_list = []
for token in doc:
token_list.append(token.lower_)
return token_list
|
25cf40a6e3f39b3306667688508d498181a2fb82
| 69,511
|
def selected_index(view):
"""Return the selected integer `index` (row) in the view.
If no index is selected return -1
`view` must be in single selection mode.
"""
indices = view.selectedIndexes()
assert len(indices) < 2, "View must be in single selection mode"
if indices:
return indices[0].row()
else:
return -1
|
62c04dec2a6807103eaa2e4e1e3e4cd8b8a0e26b
| 69,515
|
import re
def yugioh_card_in_string(string, cards_json, card_id_regex, card_name_regex):
"""Given a string, find a yugioh card and return that it."""
id_match = re.search(card_id_regex, string)
if id_match is not None:
for card in cards_json:
if card["id"] == int(id_match.group(0)):
return card
assert False, "Should be unreachable"
name_match = re.search(card_name_regex, string)
if name_match is not None:
for card in cards_json:
if card["name"].lower() == name_match.group(0).lower():
return card
assert False, "Should be unreachable"
return None
|
d32c08374bd761f9e1ce257040f49c7a9f5cc3c1
| 69,516
|
from typing import List
def __parseString(array: List, index: int):
"""Parse string at given index in array
Return string or None if string is empty or index is out of bounds"""
try:
value = array[index]
if len(value) == 0:
return None
else:
return value
except ValueError as e:
return None
except TypeError as e:
return None
except IndexError as e:
return None
|
fb303158b7e0ad754032771f45a656a73f7a0a3c
| 69,519
|
import csv
def TsvToData(filePath):
"""This method reads .tsv file and returns the data, XY split in a tuple"""
with open(filePath,'r') as file:
lines=list(csv.reader(file,delimiter='\t'))
data=[[int(x) for x in line[0].split(',')] for line in lines]
label=[line[1] for line in lines]
return(data,label)
|
0540bd2bac2b0db89a230283ddd20e6c8109ec5e
| 69,524
|
def vect3_divide(v1, f):
"""
Divides vector v1 by scalar f.
v1 (3-tuple): 3d vector
f (float): Scalar
return (3-tuple): 3d vector
"""
return (v1[0] / f, v1[1] / f, v1[2] / f)
|
998b7c020cdb387564ae17de697a31736ed5cada
| 69,528
|
def keep_file(filepath):
"""Decide if we keep the filepath, solely by exlcusion of end of path
This is primarily to avoid keeping .pyc files
>>> keep_file('/foo.pyc')
False
"""
ignoredpathendings = ['.pyc',]
for ending in ignoredpathendings:
if filepath.endswith(ending):
return False
return True
|
5f6821aff3cf2344bad4dafe985c6d9503d3d50b
| 69,530
|
def sqrt(number):
"""
Calculate the floored square root of a number
Args:
number(int): Number to find the floored squared root
Returns:
int: Floored Square Root
"""
if number < 0 or number is None:
return None
elif number == 0:
return 0
elif number == 1:
return 1
previous = 0
n = number // 2
while True:
if n * n == number:
return n
elif n * n < number:
return previous - 1
else:
previous = n
n = n // 2
|
d4235023cfbc56aa59d3709a3039914b1e808fb7
| 69,533
|
def _fonts_header_pointers_size(n, address_size):
"""Returns the size of the pointers to font headers for the given number of fonts."""
res = n * address_size
if res % 16: res += 16 - (res % 16) # align to 16 bytes, keep this a multiple of 8
return res
|
9cb80f92e9a9e999c65b84be50eadace557655b5
| 69,537
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.