content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def gcd(a,b):
""" Return the greatest common divisor
E.g. gcd(8,12)=4
"""
if (a % b == 0):
return b
else:
return gcd(b, a % b) | 9353b6f86f7c77a2f293af5f9d03bc7ba4740a29 | 98,624 |
def find_middle(arr):
"""
Gets the middle of an array
:param arr: array
:return: middle of array, middle index
"""
middle = float(len(arr))/2
if middle % 2 != 0:
return arr[int(middle - .5)], int(middle - .5)
return arr[int(middle)], int(middle) | cff2c4bba9af3684674dfb35c94c1abd24e00663 | 98,627 |
def clamp(lower: int, value: int, upper: int) -> int:
""" Clamp a value between (inclusive) lower and upper """
return min(max(value, lower), upper) | f83bbf5807fa42b7351d90b481cb76a5bfa28361 | 98,630 |
import torch
from typing import get_args
def create_optim(enc, parameters, **kwargs):
"""Initialise optimisers.
Args:
enc (string): type of optimiser - either 'SGD' or 'Adam'.
parameters (iterable): parameters to be optimised.
Returns:
An instance of torch.optim.
Raises:
ValueError if enc is not either of 'SGD' or 'Adam'.
"""
if enc == 'SGD':
optim = torch.optim.SGD
elif enc == 'Adam':
optim = torch.optim.Adam
else:
raise ValueError("Optim {} is not supported. "
"Only supports 'SGD' and 'Adam' for now.".format(enc))
args = get_args(optim)
kwargs = {key : kwargs[key] for key in args if key in kwargs}
return optim(parameters, **kwargs) | abbc6e41fbbee030316d6c4515ec4a47a4d2824a | 98,632 |
def is_valid_vlan(vlan):
"""Determine whether vlan is valid."""
try:
if 0 < int(vlan) < 4095:
return True
else:
return False
except (ValueError, TypeError):
return False | 9f7864e4dcc1c1c5922a8dc6da463becf2e4943a | 98,635 |
from typing import Any
from typing import Iterable
from typing import Sequence
from typing import Mapping
def seq(x: Any) -> Iterable:
"""Transforms x into an iterable sequence. Maps are
transformed into iterations of (k,v) pairs
"""
if isinstance(x, Sequence):
return x
elif isinstance(x, Mapping):
return x.items()
else:
assert False | 03e9d13603096d68e51f711610dc1e9d4e71282a | 98,639 |
def e_matrix(distance_matrix):
"""Compute E matrix from a distance matrix.
Squares and divides by -2 the input elementwise. Eq. 9.20 in
Legendre & Legendre 1998."""
return distance_matrix * distance_matrix / -2 | 007f42d868aaacbec926e5bb8e7aeb86e51fe339 | 98,640 |
def mac_byte_mask(mask_bytes=0):
"""Return a MAC address mask with n bytes masked out."""
assert mask_bytes <= 6
return ':'.join(['ff'] * mask_bytes + (['00'] * (6 - mask_bytes))) | 3e3acd78402fc4307141e3ef38aaa73e6e546d20 | 98,641 |
def tcl_delta_filename(curef, fvver, tvver, filename, original=True):
"""
Generate compatible filenames for deltas, if needed.
:param curef: PRD of the phone variant to check.
:type curef: str
:param fvver: Initial software version.
:type fvver: str
:param tvver: Target software version.
:type tvver: str
:param filename: File name from download URL, passed through if not changing filename.
:type filename: str
:param original: If we'll download the file with its original filename. Default is True.
:type original: bool
"""
if not original:
prdver = curef.split("-")[1]
filename = "JSU_PRD-{0}-{1}to{2}.zip".format(prdver, fvver, tvver)
return filename | afd0f12e4413a2557474f75869ca17b59f7660db | 98,645 |
def cleanForIRI(string):
"""Cleans a string to be suitable for use as an IRI (punctation we dont want is removed)"""
iri = ""
for c in string:
if c.isalnum() or c in ["-", ".", "_", "~"]:
iri+=c
return iri | ab7e677c4636e023d57286bcdd5301225130c7ec | 98,646 |
def line_edit_val(le_wg, default_val):
"""Get QLineEdit field value.
Parameters
----------
le_wg : QtGui.QLineEdit
QtGui.QLineEdit widget
default_val : float
Default value of the widget
Returns
-------
le_val : float
the value in the widget
"""
le_val = le_wg.text()
try:
return float(le_val)
except ValueError:
return default_val | 3b41bea6255a34d0774497a51d99bb84f9a99104 | 98,653 |
from typing import List
from typing import Iterator
import itertools
def flatten(x: List[List]) -> Iterator:
"""
Flatten a list of list.
Args:
x: List of list of elements
Returns:
Iterator of flattened array.
"""
return itertools.chain.from_iterable(x) | 4f1b87bfacf7b63b4256475fa30b21e948fc7c9c | 98,660 |
def _retrieve_bucket_name(bucket_url_or_name: str) -> str:
"""Returns bucket name retrieved from URL.
Args:
bucket_url_or_name: The name or url of the storage bucket.
"""
return bucket_url_or_name.split('/')[-1] | 74f57752c4bdc175cdd42aea2bfa2f67984da0a3 | 98,661 |
def like_cdid(xycell):
"""
Given an xycell return True if the .value attribute of the cell _appears_ to be a CDID code, a CDID code is
an upper cased alpha numeric code of length four.
"""
if isinstance(xycell.value, str) and xycell.value.isalnum() == True and xycell.value.isupper() == True and len(xycell.value) == 4:
return True
else:
return False | af1225dc21c9e730336970fae479fbab05f2d19a | 98,662 |
import unicodedata
def normalise(s):
# type: (str) -> str
"""normalises a unicode string"""
return unicodedata.normalize("NFKD", s) | 3f6ea953e8365472c949ec671c73975ca17a6a16 | 98,664 |
def get_parent_resource(project):
"""Returns the parent resource."""
return f'projects/{project}' | 933d13526d48e0a98bcac57933e5c6507cba8209 | 98,670 |
def posofend(str1, str2):
"""returns the position immediately _after_ the end of the occurence
of str2 in str1. If str2 is not in str1, return -1.
"""
pos = str1.find(str2)
if pos>-1:
ret= pos+len(str2)
else:
ret = -1
return ret | adab104d88d5a5fc23a949c0dec1982f58b8162a | 98,671 |
def max_norm() -> float:
"""Gradient clipping max norm."""
return 1.0 | a6096a54aec359de99812328c1aae44d9ea687d1 | 98,676 |
def _HasDefaultRepr(obj):
"""Returns True if obj has default __repr__ and __str__ methods."""
try:
d = obj.__class__.__dict__
return '__str__' not in d and '__repr__' not in d
except AttributeError:
return False | 34f8515f4c5d25da45fa849aa1fc90beb831dfb7 | 98,680 |
import json
import codecs
def parse_json(filename):
"""Parse data from filename and return a list of boats."""
country = filename.split("/")[2][0:3]
try:
rms = json.load(codecs.open(filename, "r", "utf-8-sig"), strict=False)
except json.decoder.JSONDecodeError as e:
print(f"Error parsing file: {filename}, error: {e}")
return []
data = rms["rms"]
for item in data:
item["country"] = country
return data | db05745a9ff37377fe419afae50ec3cf9cc643e9 | 98,682 |
def contagem(pai,sub_pergaminho):
"""
Função que retorna a quantidade de pais no sub_pergaminho
:param sub_pergaminho: sub_pergaminho a ser lido
:return: Int com o numero de pais
"""
cont = 0
for i in range(len(sub_pergaminho)):
if pai in sub_pergaminho[i]:
cont += 1
return cont | ee3bfc90256713ec5ee3ec9878531d63d14b3450 | 98,683 |
def _get_ordered_params(link):
"""Get a list of parameters sorted by parameter names."""
name_param_pairs = list(link.namedparams())
ordered_name_param_pairs = sorted(name_param_pairs, key=lambda x: x[0])
return [x[1] for x in ordered_name_param_pairs] | ffb42541b005c048f7bcb507bb4733712e2db5ac | 98,684 |
def pop(interp, stackname):
"""
POP stackname
outputs the most recently PUSHed member of the stack that is the
value of the variable whose name is ``stackname`` and removes that
member from the stack.
"""
var = interp.get_variable(stackname)
return var.pop() | 43e96b0d84090c488b5594c560d9f8222cfdc212 | 98,685 |
def conv_outdim(i_dim, k, padding=0, stride=1, dilation=1):
"""Return the dimension after applying a convolution along one axis"""
return int(1 + (i_dim + 2 * padding - dilation * (k - 1) - 1) / stride) | 56436dde53f4463494011a0fb1f6b3bfc93bbb0a | 98,686 |
def get_problem_name(base_name, was_reversed=False, was_copy=False):
"""Construct a problem name from base and reversed/copy options.
Inverse of `parse_problem_name`.
Args:
base_name: base problem name. Should not end in "_rev" or "_copy"
was_reversed: if the problem is to be reversed
was_copy: if the problem is to be copied
Returns:
string name consistent with use with `parse_problem_name`.
Raises:
ValueError if `base_name` ends with "_rev" or "_copy"
"""
if any(base_name.endswith(suffix) for suffix in ("_rev", "_copy")):
raise ValueError("`base_name` cannot end in '_rev' or '_copy'")
name = base_name
if was_copy:
name = "%s_copy" % name
if was_reversed:
name = "%s_rev" % name
return name | 03deb05c4565842f234921c06bee1a647f7ce8c0 | 98,688 |
def remove_namespace_from_string(name):
"""
Removes namespace from given string. Does not matter if the given name is a short or long one
:param name: str
:return: str
"""
sub_name = name.split('.')
if not sub_name:
return ''
return sub_name[-1] | fd2f0307ea6c419ffefba1e4cbea829e8400901b | 98,689 |
import math
def sieve(pos):
"""Given pos this function returns the determinated prime"""
# limit was a totally abritary number that I assigned
limit = 5000000
numbers = [True for _ in range(1, limit)]
numbers[0] = numbers[1] = False
for index_i in range(2, int(math.sqrt(limit))):
if numbers[index_i]:
for index_ii in range(index_i**2, limit, index_i):
numbers[index_ii] = False
return [x for x, y in enumerate(numbers) if y][pos-1] | 1a9aaf30508fb23a5404c563eecd4cd52955cb58 | 98,693 |
def snspectralist(fname, logffname=None):
"""
List all the spectra files associated with a phosim instance catalog
"""
x = []
with open(fname, 'r') as f:
for line in f:
if 'spectra_file' in line:
x.append(line.split()[5])
return x | 905b1985e3cc7bd25177b06e6adffd54c0c41cc8 | 98,695 |
def transpose_to_new_key(measures, key):
"""
Translates all notes from their current key to the new key
Args:
measures (List[List[AnalyzedNotes]]): Song notes grouped by measure
key (music21.key.Key): The key signature context.
Returns:
transposed_measures (List[List[AnalyzedNote]]): List of transposed notes grouped by their
corresponding measures.
"""
transposed_measures = []
for measure in measures:
m = [note.in_new_key(key) for note in measure]
transposed_measures.append(m)
return transposed_measures | afc388410650f518da3c25e6ad70a69e3b345693 | 98,699 |
import re
def cpu_share_to_cores(share):
"""
Convert CPU shares to core fractions
"""
number = float(re.findall(r"[0-9\.]+|$", str(share))[0])
unit = re.findall(r"[a-zA-Z]+|$", str(share))[0]
if unit == "":
return float(number)
elif unit.lower() == "m":
return float(number)/1000.0 | 31660b5338326d6c1d5e22e24d01680d759850ab | 98,703 |
def get_suit_of_card(position_of_card, deck):
"""Returns the suit of the card that has the specific position in the deck"""
suit_int = deck[position_of_card][0]
if suit_int == 0:
return "Spades"
elif suit_int == 1:
return "Hearts" | 0ebef9b82530684d6056ec2d0f11a9ab4babdbe8 | 98,709 |
import json
def load_dict(file_name):
"""Loads a dictionary saved in a JSON file."""
with open(file_name) as json_file:
key_value = json.load(json_file)
return key_value | f68ff426958bbbc38ac2b9ed31dab70191910306 | 98,713 |
def dist(v1, v2):
"""
Root-mean-squared-error between vector 1 and vector 2
"""
return (sum([(v1[i]-v2[i])**2 for i in range(len(v1))])/len(v1))**(1/2) | d47a1c14403ca928e7ddcd2c27bad0153231717a | 98,716 |
def parse_llil(context, llil_list):
""" Helps the GUI go from lists of instruction data to a cleanly formatted string """
newText = ""
for llil in llil_list:
if llil is not None:
tokens = llil.deref_tokens if hasattr(llil, 'deref_tokens') else llil.tokens
newText += "{}: ".format(llil.instr_index)
newText += ''.join(context.escape(str(token)) for token in tokens)
else:
newText += 'None'
newText += context.newline
if(len(llil_list) > 0):
return newText.strip(context.newline)
else:
return 'None' | 4540cec30a90912f54efd8d9134baaff79192f46 | 98,719 |
def _get_inter_grid(hw_isol, r_coarse, ngrid, ncell_grad=2):
"""
Find the half-width and r_fine for an intermediate grid that guarantees
that particles within the central hw_isol box will *not* be ghosts of the
particles outside the box (i.e. they are completely excluded from that
calculation)
hw_isol - half-width from centre which is excluded from outer region
r_coarse - force-splitting scale of the coarse grid
ngrid - number of grid cells (per dimension)
[ncell_grad=2] - 5 point gradient uses 2 cells either side, so we need this
buffer to avoid periodicity
returns box_hw, dx, r_B
box_hw - half width of box
dx - size of single cell
r_B - hw in which force is calculated via this grid (outside is ghosts)
Illustration of one corner of the intermediate force grid:
-->| hw_isol
---------->| hw_isol + r_coarse
------------------>| hw_isol + 2*r_coarse
-------------------->| box_hw := hw_isol + 2*r_coarse + ncell_grad * dx / 2
A A B B B B C C C C .
A A B B B B C C C C .
B B B B B B C C C C .
B B B B B B C C C C .
B B B B B B C C C C .
B B B B B B C C C C .
C C C C C C C C C C .
C C C C C C C C C . .
C C C C C C C C . . .
C C C C C C C . . . .
. . . . . . . . . . .
where dx is given by box_hw / (0.5 * ngrid)
Zone . - empty cells just there as a buffer (for 5-point gradient)
Zone C - contains 'ghost' particles for correct force on B, but whose force calculated elsewhere
Zone B - Forces calculated here, but are also the ghosts for particles in C
Zone A - Forces calculated here and only here
"""
ncell_grad = 2
r_A = hw_isol # particles which will *only* have force calculations from short+med+long
r_B = r_A + r_coarse # Zone B - these particles have short+med+long forces, but they are also the ghosts of particles in C, so they will be used outside too
r_C = r_B + r_coarse # Zone C - these particles are just here as 'ghosts' to get the force on B correct
hw = ngrid * r_C / (ngrid - ncell_grad) # need an extra buffer to account for periodicity when doing displacement
dx = 2 * hw / ngrid
if hw>=0.5:
raise Exception('Isolated box %.3f times bigger than periodic box (should be smaller)'%(hw*2))
return hw, dx, r_B | bf7b790948e071bb04ba4b1ab352f1608a8e85d9 | 98,720 |
def heaviside(x):
"""Heaviside step function"""
theta = None
if x < 0:
theta = 0.
elif x == 0:
theta = 0.5
else:
theta = 1.
return theta | 60ea270378598376634d0d666e02ad822a0253a8 | 98,721 |
def _get_hostname(url):
"""
Find the hostname in a url.
Assume url can take these forms. The () means optional.:
1: (http(s)://)hostname
2: (http(s)://)hostname:port
3: (http(s)://)hostname:port/path
"""
if url.find('http://') == 0:
url = url.replace('http://', '')
if url.find('https://') == 0:
url = url.replace('https://', '')
hostname = url.split('/')[0]
#is a user-defined port specified?
port_parts = url.split(':')
if len(port_parts) > 1:
hostname = port_parts[0]
return hostname | 2b28742df9e6a45fb450424dae7a812d5f901b01 | 98,727 |
import math
def get_vehicle_max_steering_angle(vehicle_info):
"""
Get the maximum steering angle of a carla vehicle
:param vehicle_info: the vehicle info
:type vehicle_info: carla_ros_bridge.CarlaEgoVehicleInfo
:return: maximum steering angle [radians]
:rtype: float64
"""
# 70 degrees is the default max steering angle of a car
max_steering_angle = math.radians(70)
# get max steering angle (use smallest non-zero value of all wheels)
for wheel in vehicle_info.wheels:
if wheel.steer_angle:
if wheel.steer_angle and wheel.steer_angle < max_steering_angle:
max_steering_angle = wheel.steer_angle
return max_steering_angle | fa72d9c71e1cfa6710d5ce6f4870ce506b1732e7 | 98,731 |
def find_terminals(grammar):
"""
For a given grammar, return a set of the terminal symbols.
:param grammar: The grammar (set of productions rules).
:return: set of terminal symbols.
"""
terminals = set()
for key, val in grammar.items():
for word_list in val:
for word in word_list:
if word not in grammar:
terminals.add(word)
return terminals | 98704fa4adf164e974bef95d11021a2bb2481670 | 98,748 |
def cygwin_to_windows_path(path):
"""Turn /cygdrive/c/foo into c:/foo, or return path if it
is not a cygwin path.
"""
if not path.startswith('/cygdrive/'):
return path
path = path[len('/cygdrive/'):]
path = path[:1] + ':' + path[1:]
return path | 96aa3279544a835b41cc91ffca85b186310fe78d | 98,754 |
def trailing_zero_bits(i):
"""Returns the number of trailing zero bits."""
n = 0
while not i & 0x1:
i >>= 1
n += 1
return n | 86882d80f493ea28e3cb01fcdcc57997ced9257d | 98,757 |
import signal
def translate_exit_code(exit_code):
""" @brief Check exit code and convert it to a human readable string
@param exit_code The exit code from a program
@return tuple containing a human readeable representation of the exit code
and a boolean value indicating succcess of the exit code, in that
order
"""
hr_str = ''
success = False
if exit_code == None:
hr_str = 'Process timed out'
success = True
elif exit_code == 0:
hr_str = 'Process exited with code 0'
success = True
elif exit_code < 0:
signal_name = signal.Signals(abs(exit_code)).name
hr_str = 'Process exited with Signal ' + signal_name
success = False
else:
hr_str = 'Process exited with a non-zero exit code ' + str(exit_code)
success = False
return hr_str, success | cffa455fc96fa3fa3b0f1bea66255e4617328917 | 98,759 |
from typing import Iterable
def get_components_of_message(data: str) -> Iterable[str]:
"""Returns the three components (origin, destination, message) of a data packet."""
data_list = data.split(',')
origin, destination, message = data_list
return origin, destination, message | cc01d0107ef456d8c7d8a9dd0e7c5068e189d665 | 98,762 |
def flatten_list(arg) -> list:
"""Function used for flattening lists recursively"""
ls = []
for i in arg:
if isinstance(i, list):
ls.extend(i)
else:
ls.append(i)
return ls | 27ab4f454ff07bc8a0a85e109fb3691b5858ebca | 98,772 |
def to_int(value: str) -> int:
"""Convert value to an integer number"""
return int(value) | 5bd6c8681c84458db9fe36ee409efdfc62d46bc7 | 98,773 |
def tag_if(self, condition, name):
"""
Tag the current state of the stack if a condition matches; do noting otherwise.
:param condition: The condition to determine if to execute the tagging operation.
:param name: The name to use for the tagging.
"""
if condition:
return self.tag(name)
else:
return self.newObject([self.findSolid()]) | 7675213f3861832e6dd26d7967075afa1e14823a | 98,775 |
def services_by_user(account, days_back, user_arn, user_name):
""" Returns query for eventsource (service) / actions performed by user. """
query_string = f"""SELECT DISTINCT eventsource, eventname FROM behold
WHERE account = '{account}'
AND (useridentity.arn = '{user_arn}')
AND from_iso8601_timestamp(eventtime) > date_add('day', -{days_back}, now())
ORDER BY eventsource, eventname;"""
return (query_string, f"athena_results/services_by_user/{account}/{user_name}") | 94865c95560581cdacc544dde99e745ce5c006f6 | 98,776 |
from typing import Dict
from typing import Any
def is_existing_emulator_provided(launcher_args: Dict[str, Any]) -> bool:
"""Returns true if all necessary args were provided."""
return bool(launcher_args.get('adb_port') and
launcher_args.get('emulator_console_port') and
launcher_args.get('grpc_port')) | f85bc86581d000f3119dfbf005887eb427667629 | 98,781 |
def simpleplanet(armageddon):
"""
This returns a simple planet with g=0, Cl=0, alpha=0, and with an exponential atmosphere.
This planet will be used to compare the solver against the analytical solution as well as solve_ivp (Scipy's solver).
"""
return armageddon.Planet(atmos_func='exponential', Cd=1., Ch=0.1, Q=1e7, Cl=0, alpha=0, Rp=1e10, g=0, H=8000., rho0=1.2) | 09420c6c6174f01437b78ca071381cb40dee2348 | 98,783 |
def chunker(seq, size):
"""
:param seq: any sequence structure
:param size: the size of the chunk to return when looping through the generator
:return: a generator which produces smaller chunks of the seq based on the size parameter
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size)) | 150234020e1fe25fa55ef4c7c014d87b71ebcc63 | 98,785 |
def pg_array_escape(tok):
"""
Escape a string that's meant to be in a Postgres array.
We double-quote the string and escape backslashes and double-quotes.
"""
return '"%s"' % str(tok).replace('\\', '\\\\').replace('"', '\\\\"') | f557b5d7ef9c2a077f03afa22908177acf4bf8ba | 98,788 |
def get_sec(time_str):
"""Get Seconds from time."""
h, m, s = time_str.split(':')
return int(h) * 3600 + int(m) * 60 + float(s) | 39e77438e05b30f16265e9cadedc5d8b786eca70 | 98,793 |
import math
def direction(a, b) -> float:
"""Returns the angle of a pointing to b."""
dx = b[0] - a[0]
dy = a[1] - b[1]
return math.degrees(math.atan2(dy, dx)) | d0eb3f692f67e18c087ada820582ca1d2bb78b29 | 98,797 |
def _get_arg(key, from_kwargs, default):
"""Remove value from kwargs and return it, else return default value"""
if key in from_kwargs:
arg = from_kwargs[key]
del from_kwargs[key]
return arg
return default | 8bf388b507158fddc3e9d1e605b20e374b3f00b5 | 98,799 |
def index_of_in(a, b):
"""
a = [1, 2, 3, 4]
b = [2, 4, 5, 6]
index_of_in(a, b) == [1, 3, -1, -1]
"""
al = list(a)
res = [al.index(bitem) if bitem in al else -1 for bitem in b]
return res | 07738e1d7d6fa552915fd9b3194c98b78ba81b27 | 98,801 |
def convert_minutes_to_mins_secs(minutes_in):
"""Converts minutes to MM:SS format."""
minutes = int(minutes_in)
seconds = int((minutes_in - minutes) * 60)
out_str = "{:0>2d}:{:0>2d}".format(minutes, seconds)
return out_str | 1ad443cfb415e70ba88bd5151d65e0395af322e0 | 98,808 |
def searchMovieByTitle(movies, title):
"""
We search for the title with a simple string match in our list movie titles. Keeping it simple, as this is jsut a demo!
"""
for k in movies:
if title.lower() in movies[k]['title'].lower():
return movies[k]
return None | dab57f24898df97a2db9171856f900e866020a51 | 98,814 |
def encode(s_plaintext: str, bit_length: int) -> list[int]:
"""Encodes bytes to integers mod p.
Example
if n = 24, k = n / 8 = 3
z[0] = (summation from i = 0 to i = k)m[i]*(2^(8*i))
where m[i] is the ith message byte
Args:
s_plaintext: String text to be encoded
bit_length: bit length of the prime number
Returns:
A list of encoded integers
"""
byte_array = bytearray(s_plaintext, 'utf-16')
# z is the array of integers mod p
z = []
# each encoded integer will be a linear combination of k message bytes
# k must be the number of bits in the prime divided by 8 because each
# message byte is 8 bits long
k = bit_length // 8
# j marks the jth encoded integer
# j will start at 0 but make it -k because j will be incremented during first iteration
j = -1 * k
# num is the summation of the message bytes
# num = 0
# i iterates through byte array
for idx in range(len(byte_array)):
# if i is divisible by k, start a new encoded integer
if idx % k == 0:
j += k
# num = 0
z.append(0)
# add the byte multiplied by 2 raised to a multiple of 8
z[j // k] += byte_array[idx] * (2 ** (8 * (idx % k)))
return z | 8a9b6cee5669742afa641ae8503ff2aa6788ae0f | 98,815 |
def with_metaclass(mcls):
""" 2.7 and 3.5+ compatable metaclass decorator
python2 uses a __metaclass__ class atribute whlie python3
has a class level keyword argument.
"""
# http://stackoverflow.com/questions/22409430/portable-meta-class-between-python2-and-python3
def decorator(cls):
body = vars(cls).copy()
# clean out class body
body.pop('__dict__', None)
body.pop('__weakref__', None)
return mcls(cls.__name__, cls.__bases__, body)
return decorator | 70756c881e802e78b6e215216ea45dc1e7aed31a | 98,818 |
def _profile_tag_from_conditions(conditions):
"""
Given a list of conditions, return the profile tag of the
device rule if there is one
"""
for c in conditions:
if c['kind'] == 'device':
return c['profile_tag']
return None | ca9a640ecfe52714ba901eeaf952e7c18a5a4210 | 98,819 |
def read_lines_from_tar_file(tar_file):
"""
Read the tar file returning the lines
"""
txt = tar_file.read()
txt = txt.decode('utf-8')
return txt.splitlines() | 82c702a4c2fcdb9115774c32730a49f1b3280c8c | 98,828 |
def revrange(a,b):
"""
Returns the tuple (b-1, b-2, ..., a)
Note that this tuple is the reverse of tuple(range(a,b))
Parameter a: the "start" of the range
Precondition: a is an int <= b
Parameter b: the "end" of the range
Precondition: b is an int >= a
"""
assert type(a) == int and a <= b
assert type(b) == int
tup1 = tuple(range(a,b))
tup2 = tup1[::-1]
tup = ()
result = tup
for i in tup2:
if a < b:
result = tup2
else:
result = tup
return result
#pass | 28c0de37d354401f45db33f83d0397b6cde8ed80 | 98,829 |
def list_between(begin, end, r):
"""Returns the list of players from begin to end (inclusive)."""
if begin <= end:
return list(range(begin, end+1))
return list(range(begin, r.nPlayers)) + list(range(end+1)) | 425b34d554e128f0329b8532a5cc39318d9c13d1 | 98,837 |
import pickle
def load_exp(fname):
"""
Load an experiment from the specified file.
"""
with open(fname, 'rb') as f:
exp = pickle.load(f)
return exp | 29e453e49a30f330d7231ef11538ace9ccba467e | 98,843 |
def list_bucket_with_prefix(s3_client, bucket, prefix):
""" Paginated listing of contents within an S3 bucket prefix. """
token = None
more_keys = True
keys = []
while more_keys:
kwargs = {
'Bucket': bucket,
'Prefix': prefix,
}
if token is not None:
kwargs['ContinuationToken'] = token
response = s3_client.list_objects_v2(
**kwargs
)
token = response.get('NextContinuationToken', None)
more_keys = (token is not None)
keys.extend([content['Key'] for content in response['Contents']])
return keys | 17db9d62daa6119951c1023373eb98211f02c244 | 98,845 |
def naive_pass_if_low(player, lowest_to_keep):
"""Strategy logic: simply keeps if >= lowest
Arguments:
player -- self-reference to the player using the strategy
lowest_to_keep {int} -- lowest
"""
if player.current_card.rank.value >= lowest_to_keep:
return False
else:
return True | b8d83ff2a1c9ef8d3b140061d791a27a7291386b | 98,851 |
def calc_tree_depth(n_features, max_depth=5):
"""Calculate tree depth
Args:
n_features (int): Number of features
max_depth (int, optional): Max depth tree. Defaults to 15.
Returns:
[int]: Final depth tree
"""
# Designed using balls-in-bins probability. See the paper for details.
m = float(n_features)
depth = 0
expected_empty = m # the number of unique attributes not selected so far
while expected_empty > m / 2: # repeat until we have less than half the attributes being empty
expected_empty = m * ((m - 1) / m) ** depth
depth += 1
# the above was only for half the numerical attributes. now add half the categorical attributes
return min(max_depth, depth) | c818e7606d0b6661c33d2d32487e35c316279420 | 98,852 |
def k_half_mode(alpha, beta, gamma):
"""finds the wavenumber where the transfer function is surpressed by 1/2"""
fraction = 0.5
return 1./alpha * (fraction**(-1./gamma) - 1. )**(1./beta) | b080ba88a213c88b17862b47b6f47e600f6185ca | 98,853 |
import logging
def _setup_logger() -> logging.Logger:
"""Setup logging."""
log_format = '[%(asctime)s][%(levelname)s] %(message)s'
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(log_format))
stream_handler.setLevel(logging.INFO)
custom_logger = logging.getLogger(__name__)
custom_logger.addHandler(stream_handler)
custom_logger.setLevel(logging.INFO)
custom_logger.propagate = False
return custom_logger | 276c5c301cfe0a3a58f8ba3a0ef38221a624bd9f | 98,854 |
import re
def is_ok_words(text: str) -> bool:
"""Check if the redaction is one of several words that are OK
:param text: A string to check
:returns: True if it's an OK word, else False
"""
text = " ".join(text.strip().split())
text = re.sub(
r"confidential|name +redacted|privileged?|re|red|reda|redac|redact|"
r"redacte|redacted|redacted +and +publicly +filed|",
"",
text,
flags=re.IGNORECASE | re.MULTILINE,
)
return len(text) > 0 | 89d8e0e29db8d3a39e330b7aa40aa2ddd7d1cbe4 | 98,859 |
from typing import Sequence
from typing import Mapping
def _build_query_to_hit_index_mapping(
hit_query_sequence: str,
hit_sequence: str,
indices_hit: Sequence[int],
indices_query: Sequence[int],
original_query_sequence: str) -> Mapping[int, int]:
"""Gets mapping from indices in original query sequence to indices in the hit.
hit_query_sequence and hit_sequence are two aligned sequences containing gap
characters. hit_query_sequence contains only the part of the original query
sequence that matched the hit. When interpreting the indices from the .hhr, we
need to correct for this to recover a mapping from original query sequence to
the hit sequence.
Args:
hit_query_sequence: The portion of the query sequence that is in the .hhr
hit
hit_sequence: The portion of the hit sequence that is in the .hhr
indices_hit: The indices for each aminoacid relative to the hit sequence
indices_query: The indices for each aminoacid relative to the original query
sequence
original_query_sequence: String describing the original query sequence.
Returns:
Dictionary with indices in the original query sequence as keys and indices
in the hit sequence as values.
"""
# If the hit is empty (no aligned residues), return empty mapping
if not hit_query_sequence:
return {}
# Remove gaps and find the offset of hit.query relative to original query.
hhsearch_query_sequence = hit_query_sequence.replace('-', '')
hit_sequence = hit_sequence.replace('-', '')
hhsearch_query_offset = original_query_sequence.find(hhsearch_query_sequence)
# Index of -1 used for gap characters. Subtract the min index ignoring gaps.
min_idx = min(x for x in indices_hit if x > -1)
fixed_indices_hit = [
x - min_idx if x > -1 else -1 for x in indices_hit
]
min_idx = min(x for x in indices_query if x > -1)
fixed_indices_query = [x - min_idx if x > -1 else -1 for x in indices_query]
# Zip the corrected indices, ignore case where both seqs have gap characters.
mapping = {}
for q_i, q_t in zip(fixed_indices_query, fixed_indices_hit):
if q_t != -1 and q_i != -1:
if (q_t >= len(hit_sequence) or
q_i + hhsearch_query_offset >= len(original_query_sequence)):
continue
mapping[q_i + hhsearch_query_offset] = q_t
return mapping | 014c9b7d52b6318b97855e8c42149051d0b93fcc | 98,860 |
import torch
def indices_to_dense_vector(indices, size, indices_value=1., default_value=0):
"""
Creates dense vector with indices set to specific value and rest to zeros. This function exists because
it is unclear if it is safe to use tf.sparse_to_dense(indices, [size], 1, validate_indices=False) with
indices which are not ordered. This function accepts a dynamic size (e.g. tf.shape(tensor)[0])
:param indices: 1d Tensor with integer indices which are to be set to indices_values.
:param size: scalar with size (integer) of output Tensor.
:param indices_value: values of elements specified by indices in the output vector
:param default_value: values of other elements in the output vector.
:return: dense 1D Tensor of shape [size] with indices set to indices_values and the rest set to default_value.
"""
dense = torch.zeros(size).fill_(default_value)
dense[indices] = indices_value
return dense | 2199a732e3196bcbc6c4609292c41c39c790fcb9 | 98,862 |
import glob
def LabelPaths(label, src_suffix):
"""Compute single proto file source/destination paths from a Bazel proto label.
Args:
label: Bazel source proto label string.
src_suffix: suffix string to append to source path.
Returns:
source, destination path tuple. The source indicates where in the Bazel
cache the protoxform.py artifact with src_suffix can be found. The
destination is a provisional path in the Envoy source tree for copying the
contents of source when run in fix mode.
"""
assert (label.startswith('@envoy_api//'))
proto_file_canonical = label[len('@envoy_api//'):].replace(':', '/')
# We use ** glob matching here to deal with the fact that we have something
# like
# bazel-bin/external/envoy_api/envoy/admin/v2alpha/pkg/envoy/admin/v2alpha/certs.proto.proto
# and we don't want to have to do a nested loop and slow bazel query to
# recover the canonical package part of the path.
# While we may have reformatted the file multiple times due to the transitive
# dependencies in the aspect above, they all look the same. So, just pick an
# arbitrary match and we're done.
glob_pattern = 'bazel-bin/external/envoy_api/**/%s.%s' % (proto_file_canonical, src_suffix)
src = glob.glob(glob_pattern, recursive=True)[0]
dst = 'api/%s' % proto_file_canonical
return src, dst | d4494317bdcb96404bf33354846f5ad875139fc4 | 98,864 |
def OneLett_to_ThrLett(resi, cap = 'standard', suppress_alert = True):
"""
Usage: OneLett_to_ThrLett(resi, cap = 'standard',
suppress_alert = True)
This function receives resi, a one-letter amino acid
code, and returns the three letter amino acid code.
If cap(italization) is standard, then only the first
letter will be capitalized. If cap is all, then all
letters will be capitalized (as in a PDB file). If
the one-letter amino acid is not recognized, it is
simply returned. If option suppress_error is False,
a message will also be printed to alert to user that
the code was not recognized.
"""
if resi == 'A':
res3 = 'Ala'
elif resi == 'C':
res3 = 'Cyx'
elif resi == 'D':
res3 = 'Asp'
elif resi == 'E':
res3 = 'Glu'
elif resi == 'F':
res3 = 'Phe'
elif resi == 'G':
res3 = 'Gly'
elif resi == 'H':
res3 = 'His'
elif resi == 'I':
res3 = 'Ile'
elif resi == 'K':
res3 = 'Lys'
elif resi == 'L':
res3 = 'Leu'
elif resi == 'M':
res3 = 'Met'
elif resi == 'N':
res3 = 'Asn'
elif resi == 'P':
res3 = 'Pro'
elif resi == 'Q':
res3 = 'Gln'
elif resi == 'R':
res3 = 'Arg'
elif resi == 'S':
res3 = 'Ser'
elif resi == 'T':
res3 = 'Thr'
elif resi == 'V':
res3 = 'Val'
elif resi == 'W':
res3 = 'Trp'
elif resi == 'Y':
res3 = 'Tyr'
else:
if not suppress_alert:
print(resi, "not recognized as residue. Returning", resi)
res3 = resi
if cap == 'all':
res3 = res3.upper()
elif cap != 'standard':
print('Ignoring invalid option for cap:', cap)
return res3 | 43fdbbf3f9a47846ee8ff07d162b975645b9e9bf | 98,865 |
def depth_sw_interface_Glover1959(x, Q, K, rho_f, rho_s):
"""
Calculate depth of the fresh-salt water interface in a coastal aquifer, following Glover (1959).
"""
gamma = (rho_s - rho_f) / rho_f
y2 = 2 * Q / (gamma * K) * x + Q**2 / (gamma**2 * K**2)
depth = y2**0.5
return depth | 56814e40e4b8523427e5b07872379741ad68cb9b | 98,867 |
def biot(h, d, k):
"""
Calculate the dimensionless Biot number.
.. math:: Bi = \\frac{h\\, d}{k}
Parameters
----------
h : float
Convective heat transfer coefficient [W/(m²⋅K)]
d : float
Characteristic length or dimension [m]
k : float
Thermal conductivity [W/(m⋅K)]
Returns
-------
bi : float
Biot number [-]
Example
-------
>>> biot(4.63, 0.001, 3.84)
0.0012057
References
----------
Daizo Kunii and Octave Levenspiel. Fluidization Engineering.
Butterworth-Heinemann, 2nd edition, 1991.
"""
bi = (h * d) / k
return bi | 26e73c263700d7b82021e9437e25049dcdc81b20 | 98,868 |
def clean_null_values(dic):
"""Cleans the entries with value None in a given dictionary."""
return {key: value for key, value in dic.items() if value is not None} | 293438bb01dd1344484683521233b7f6064960e7 | 98,869 |
import re
def clean_sheet_dir_name(name):
"""
Clean up / sanitize sample sheet directory name.
:param name: String
:return: String
"""
return re.sub(r'[\s]+', '_', re.sub(r'[^\w\s-]', '', name).strip()) | 2ea16776785564781359d2dc998ccf30d2c71a87 | 98,873 |
def _bbox_crop(image, bbox):
"""Crop PIL.Image according to bbox.
Args:
image(PIL.Image): image to crop
bbox(iterable): iterable with 4 elements.
Returns:
Cropped image.
"""
width, height = image.size
ratio = int(max(bbox[2], bbox[3]) * 0.75)
center_x = int((2 * bbox[0] + bbox[2]) / 2)
center_y = int((2 * bbox[1] + bbox[3]) / 2)
y_low = max(0, center_y - ratio)
y_high = min(height, center_y + ratio)
x_low = max(0, center_x - ratio)
x_high = min(width, center_x + ratio)
image = image.crop([x_low, y_low, x_high, y_high])
return image | 2bdac5ed84a48663120366fbbb9aae5fae0e4e28 | 98,876 |
def sma(prices, n=20):
""" Calculate simple moving average of one stock using rolling mean
Params:
@prices: price data of stock with dates as index
@n: number of days in smoothing period (typically 20)
Return:
@moving_avg: Moving average price of the stock
"""
moving_avg = prices.rolling(window=n).mean()
return moving_avg | 91bbc2ec257d27fa51c16691cc00ebf5132ae99f | 98,884 |
import math
def _NextHighestSquare(n):
"""Takes an integer and returns the next highest square number.
Args:
n (int): Any integer.
Returns:
(int): The nearest square number larger than n. For example, input 1 returns
4, input 4 returns 9, input 10 returns 16.
"""
return int(math.pow(math.floor(math.sqrt(n) + 1), 2)) | afec8c707e811b07e2d9e5d3fc2a8d22e25bbce4 | 98,890 |
def get_schema_from_path(path):
"""Match table from path."""
return path.split('/')[-2] | 77ca333607666d1483a1527972ea119739b1aac5 | 98,891 |
def usage(err=''):
""" Prints the Usage() statement for the program """
m = '%s\n' %err
m += ' Default usage is to get Branch info from SalesForce.\n'
m += ' '
m += ' sfTaskBranch -cl do (load TBs from MODs)\n'
m += ' or\n'
m += ' sfTaskBranch -cs -p all do|3600 (check status of all or last 3600 secs) \n'
m += ' or\n'
m += ' sfTaskBranch -cba do (show status of all approvers) \n'
m += ' or\n'
m += ' sfTaskBranch -cu -pr 3600 (check action required on BAs modified since secs) \n'
m += ' or\n'
m += ' sfTaskBranch -cg blast4_michel (get info on specific branch)\n'
m += ' or\n'
m += ' sfTaskBranch -cdel -pba do (delete ALL Branches and approvals, you are warned) \n'
return m | f9a057b913f5c6d67a8f7812a800bf31cfadd288 | 98,894 |
def get_network_size(netmask):
"""Get cidr size of network from netmask"""
b = ''
for octet in netmask.split('.'):
b += bin(int(octet))[2:].zfill(8)
return str(len(b.rstrip('0'))) | be55892656ce78d130770fcb3d1cda3169e27e52 | 98,896 |
def color_code(color_number):
"""Generate an ANSI escape sequence with the given color number or description string."""
return '\033[' + str(color_number) + 'm' | 1859919bccc07befd74e6c335064e8a74d3620c4 | 98,900 |
from typing import TextIO
from typing import Dict
from typing import List
from typing import Optional
def parse_record_log(file: TextIO) -> Dict[str, List[Dict[str, Optional[str]]]]:
"""
Parse record.log from Doozer into a dict.
The dict will be keyed by the type of operation performed.
The values will be a list of dicts. Each of these dicts will contain the attributes for a single recorded operation of the top
level key's type.
"""
result = {}
for line in file:
fields = line.rstrip().split("|")
type = fields[0]
record = {entry_split[0]: entry_split[1] if len(entry_split) > 1 else None for entry_split in map(lambda entry: entry.split("=", 1), fields[1:]) if entry_split[0]}
result.setdefault(type, []).append(record)
return result | 4c84ace475a26a96bb8bc5fe22c333d81e5a82bb | 98,902 |
def topological_order(graph):
"""Topological sorting by maintaining indegree
:param graph: directed graph in listlist format, cannot be listdict
:returns: list of vertices in order
:complexity: `O(|V|+|E|)`
"""
V = range(len(graph))
indeg = [0 for _ in V]
for node in V: # déterminer degré entrant
for neighbor in graph[node]:
indeg[neighbor] += 1
Q = [node for node in V if indeg[node] == 0]
order = []
while Q:
node = Q.pop() # sommet sans arc entrant
order.append(node)
for neighbor in graph[node]:
indeg[neighbor] -= 1
if indeg[neighbor] == 0:
Q.append(neighbor)
return order | b501a2be13da21549281e49898ddd5011ae13647 | 98,903 |
def vector_orientation (x, y):
"""Assigns an orientation to a 2D vector according to the Spanish CTE compass rose."""
if x <= 0.3826 and x >= -0.3826 and y <= 1 and y >= 0.9238:
return "North"
elif x < 0.8660 and x > 0.3826 and y < 0.9238 and y > 0.5000:
return "Northeast"
elif x <= 1 and x >= 0.8660 and y <= 0.5000 and y >= -0.3583:
return "East"
elif x < 0.9335 and x > 0.3090 and y < -0.3583 and y > -0.9510:
return "Southeast"
elif x <= 0.3090 and x >= -0.3090 and y <= -0.9510 and y >= -1:
return "South"
elif x < -0.3090 and x > -0.9335 and y < -0.3583 and y > -0.9510:
return "Southwest"
elif x <= -0.8660 and x >= -1 and y <= 0.5000 and y >= -0.3583:
return "West"
elif x < -0.3826 and x > -0.8660 and y < 0.9238 and y > 0.5000:
return "Northwest"
else:
return "No orientation" | d23030e759dcb697cd4ed9d1ad8a50ac51204ac7 | 98,907 |
def pick_fields(obj, fields):
"""
Returns a new object with only the fields in fields.
"""
return {field: getattr(obj, field) for field in fields} | a03093aece4e7d46b8b5a00ec5b7602012d73623 | 98,911 |
def remove_final_whitespace(string):
"""
Return a copy of *string* with final whitespace removed from each line.
"""
return '\n'.join(x.rstrip() for x in string.split('\n')) | 136692e32aa14ec607c908e7cd455e7885d6d463 | 98,912 |
def create_exclude_set(exclude_file):
"""
Create a list of interactions to be excluded from analysis,
e.g. if they are part of the positive set.
"""
excludes = []
with open(exclude_file, 'r') as fin:
for line in fin:
fields = line.split('\t')
p1, p2 = fields[0].strip(), fields[1].strip()
interaction = (p1,p2)
excludes.append(sorted(interaction))
exclude_pairs = sorted(set([tuple(sorted(i)) for i in excludes]))
return exclude_pairs | e4bc50e3766d6005ef67c55cb515eb53b381df86 | 98,920 |
import time
def annotate_heatmap(im, # the AxesImage to be labeled
# a 2D numpy array of shape (N, M) containing the standard deviations to be used to annotate
# the heatmap
std,
# a 2D python array containing Boolean values indicating whether each single cell contains
# data or not
mask,
# the format of the annotations inside the heatmap. This should be either 'time' or 'speed'
ann_format='time',
# a pair of colors. The first is used for values below a threshold, the second for those above
textcolors=("black", "white"),
# value in data units according to which the colors from textcolors are applied.
# If None (the default) uses the middle of the colormap as separation
threshold=None,
# all other arguments are forwarded to each call to `text` used to create the text labels
**textkw):
""" A function to annotate a heatmap.
Args:
im: The AxesImage to be labeled
std: A 2D numpy array of shape (N, M) containing the standard deviations to be used to annotate the heatmap
mask: A 2D python array containing Boolean values indicating whether each single cell contains data or not
ann_format: The format of the annotations inside the heatmap. This should be either 'time' or 'speed'
(default: 'time')
textcolors: A pair of colors. The first is used for values below a threshold, the second for those above
(default: ("black", "white"))
threshold: Value in data units according to which the colors from textcolors are applied.
If None (the default) uses the middle of the colormap as separation (default: None)
**textkw: All other arguments are forwarded to each call to `text` used to create the text labels
"""
# get data from 'im' AxesImage
data = im.get_array()
# normalize the threshold to the images color range
if threshold is not None:
threshold = im.norm(threshold)
else: # if threshold was not provided set threshold using the middle of the colormap as separation
threshold = im.norm(data.max()) / 2.
# Set default alignment to center, but allow it to be overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center")
kw.update(textkw)
# if the annotation is not of type 'time' nor 'speed', raise error
if ann_format != 'time' and ann_format != 'speed':
raise ValueError('Unknown format type.')
# initialize texts list to be empty
texts = []
# loop over the data
for i in range(data.shape[0]):
for j in range(data.shape[1]):
if mask[i][j]: # skip data point if its mask value is true
continue
# update color depending on the current data point
kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])
if ann_format == 'time': # if the annotation is of type 'time'
# create text of type 'time' for the current data point
text = im.axes.text(j, i, '{}\n+/-{:4.0f}s'
.format(time.strftime("%H:%M:%S",
time.gmtime(float(data[i, j]))), std[i, j]), **kw)
else: # if the annotation is of type 'speed'
# create text of type 'speed' for the current data point
text = im.axes.text(j, i, '{:6.3f}it/s\n+/-\n{:6.3f}it/s'.format(data[i, j],
std[i, j]), **kw)
# append generated text to texts list
texts.append(text)
return texts | dc85f08ecdecb49c4f8b170ce331117f677f5075 | 98,932 |
from pathlib import Path
def traces_path(app):
"""Returns the path to the traces.json for a given app name."""
tests_path = Path(__file__).parent.parent
sub_path = f"fixtures/aws_trace_analyzer/{app}/traces.json"
return (tests_path / sub_path).resolve() | 94a259b7973fcf33997a2cdab4ebf80f4d584f46 | 98,933 |
def split(str, delimiters, joiner=None):
"""Split a string into pieces by a set of delimiter characters. The
resulting list is delimited by joiner, or the original delimiter if
joiner is not specified.
Examples:
>>> split('192.168.0.45', '.')
['192', '.', '168', '.', '0', '.', '45']
>>> split('terry@wayforward.net', '@.')
['terry', '@', 'wayforward', '.', 'net']
>>> split('terry@wayforward.net', '@.', '.')
['terry', '.', 'wayforward', '.', 'net']
"""
result, element = [], ''
for c in str:
if c in delimiters:
result.append(element)
element = ''
if joiner:
result.append(joiner)
else:
result.append(c)
else:
element += c
result.append(element)
return result | 4aaaf0fa85bbb67c465c06cb6c61dc72258ba749 | 98,936 |
def control_plane_ingress_ip(k8s_client):
"""Return the Control Plane Ingress IP from Kubernetes service"""
ingress_svc = k8s_client.read_namespaced_service(
name="ingress-nginx-control-plane-controller",
namespace="metalk8s-ingress",
)
return ingress_svc.spec.load_balancer_ip or ingress_svc.spec.external_i_ps[0] | dd1b78f30ec60defb13713c9324882069eda0499 | 98,939 |
def find_note_index(scale, search_note):
""" Given a scale, find the index of a particular note """
for i, note in enumerate(scale):
# Deal with situations where we have a list of enharmonic
# equivalents, as well as just a single note as and str.
if type(note) == list:
if search_note in note:
return i
elif type(note) == str:
if search_note == note:
return i | e1317fd2e726c4bfd2fc507532a34681741114b3 | 98,942 |
def qa_filter(
mhm_df,
has_genus=False,
min_larvae_count=-9999,
has_photos=False,
is_container=False,
):
"""
Can filter a cleaned and flagged mosquito habitat mapper DataFrame based on the following criteria:
- `Has Genus`: If the entry has an identified genus
- `Min Larvae Count` : Minimum larvae count needed for an entry
- `Has Photos` : If the entry contains valid photo entries
- `Is Container` : If the entry's watersource was a container
Returns a copy of the DataFrame
Parameters
----------
has_genus : bool, default=False
If True, only entries with an identified genus will be returned.
min_larvae_count : int, default=-9999
Only entries with a larvae count greater than or equal to this parameter will be included.
has_photos : bool, default=False
If True, only entries with recorded photos will be returned
is_container : bool, default=False
If True, only entries with containers will be returned
Returns
-------
pd.DataFrame
A DataFrame of the applied filters.
"""
mhm_df = mhm_df[mhm_df["mhm_LarvaeCount"] >= min_larvae_count]
if has_genus:
mhm_df = mhm_df[mhm_df["mhm_HasGenus"] == 1]
if has_photos:
mhm_df = mhm_df[mhm_df["mhm_PhotoBitDecimal"] > 0]
if is_container:
mhm_df = mhm_df[mhm_df["mhm_IsWaterSourceContainer"] == 1]
return mhm_df | db70da2dcc96088a84a257eabb5f15509faca3ec | 98,943 |
def format_changes(github_config, owner, repo, prs, markdown=False):
"""Format the list of prs in either text or markdown"""
lines = []
for pr in prs:
number = "#{number}".format(number=pr.number)
if markdown:
link = "{github_url}/{owner}/{repo}/pull/{number}".format(
github_url=github_config.base_url,
owner=owner,
repo=repo,
number=pr.number,
)
number = "[{number}]({link})".format(number=number, link=link)
lines.append(
"- {title} {number}".format(title=pr.title, number=number)
)
return lines | 061457b0b893ef0c0af4dd7fe576c43760ac7570 | 98,945 |
def FilterBuildStatuses(build_statuses):
"""We only want to process passing 'normal' builds for stats.
Args:
build_statuses: List of Cidb result dictionary. 'stages' are not needed.
Returns:
List of all build statuses that weren't removed.
"""
# Ignore tryserver, release branches, branch builders, chrome waterfall, etc.
WATERFALLS = ('chromeos', 'chromiumos')
return [status for status in build_statuses
if status['waterfall'] in WATERFALLS] | 816361edd5db9d6f9bc758629a2d71a3e1431f8d | 98,946 |
def passageLinks(passages, sec0Type, sec0, sec1, tillLevel):
"""Provide navigation links for passages,
in the form of links to sections of level 0, 1 and 2 (books, chapters and verses).
If `sec0` is not given, only a list of sec0 links is produced.
If `sec0` is given, but `sec1` not, a list of links for sec1s within the given `sec0`
is produced.
If both `sec0` and `sec1` are given, de sec1 entry is focused.
"""
sec0s = []
sec1s = []
for s0 in passages[0]:
selected = str(s0) == str(sec0)
sec0s.append(
f'<a href="#" class="s0nav {" focus" if selected else ""}">{s0}</a>'
)
if sec0:
for s1 in passages[1]:
selected = str(s1) == str(sec1)
sec1s.append(
f'<a href="#" class="s1nav {" focus" if selected else ""}">{s1}</a>'
)
return (
f'<div class="sline"><span><span id="s0total"></span>'
f' <span class="s0total">{sec0Type}s</span></span>'
+ "".join(sec0s)
+ '</div><div class="sline">'
+ "".join(sec1s)
+ "</div>"
) | becc9428aa6fba655679aaa98c9faf0f5599fc31 | 98,949 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.