content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def AbsorbeMinterme(self, minterme):
""" Retourne True si self est vrai lorque minterme est vrai """
if self.Terminal:
return self.TjsVrai
elif self.Ord in minterme:
return self.Hi.AbsorbeMinterme(minterme)
else:
return self.Lo.AbsorbeMinterme(minterme)
|
981bb64a83f57285f7787903dfba0ec36a9365f0
| 26,366
|
import numpy
def bearing_from_north(gdf, shift=1):
"""
Computes the bearing from north along a series of points.
Parameters
----------
gdf : geopandas.GeoDataFrame
A geodataframe of points.
Returns
-------
bearing : numpy.array
"""
HALF_PI = numpy.pi * 0.5
TWO_PI = 2 * numpy.pi
x = gdf['geometry'].x - gdf['geometry'].x.shift(periods=shift)
y = gdf['geometry'].y - gdf['geometry'].y.shift(periods=shift)
NE = (x >= 0) & (y >= 0)
SE = (x >= 0) & (y < 0)
SW = (x < 0) & (y < 0)
NW = (x < 0) & (y >= 0)
bearing = numpy.arctan(y / x).abs()
directions = [NE, SE, SW, NW]
corrections = [
HALF_PI - bearing,
HALF_PI + bearing,
TWO_PI - HALF_PI - bearing,
TWO_PI - HALF_PI + bearing,
]
for direction, correction in zip(directions, corrections):
bearing = numpy.where(direction, correction, bearing)
return bearing
|
74062cc51ee4ca190fe03ce80fba60fa9d6464fe
| 26,367
|
def char_to_int_label(char):
"""
Converts a character to an integer label.
:param char:
:return:
"""
# ascii index
index = int(ord(char.lower()))
# is character?
if index > 60:
index = index - 75
return index - 22
|
3f366547d2cf9617e6192223d79867612c0bf447
| 26,368
|
def _full_kind(details):
"""
Determine the full kind (including a group if applicable) for some failure
details.
:see: ``v1.Status.details``
"""
kind = details[u"kind"]
if details.get(u"group") is not None:
kind += u"." + details[u"group"]
return kind
|
5545b15c5aa5798ca4f993bbcf49252fe3141ac9
| 26,369
|
def get_duplex_direct_network_config(context, iface, config, sysctl_ifname):
"""
Disable dad on the specified interface for duplex-direct config
"""
new_pre_up = "sysctl -wq net.ipv6.conf.%s.accept_dad=0" % sysctl_ifname
old_pre_up = config['options'].get('pre_up')
if old_pre_up:
new_pre_up = "%s ; %s" % (old_pre_up, new_pre_up)
options = {'pre_up': new_pre_up}
config['options'].update(options)
return config
|
9eed3c4c8cdaf4edcfd693f4bdfa13f611dea26f
| 26,370
|
def crop_2d(array, array_e, x_start=0, x_end=-1, y_start=0, y_end=-1):
"""
Crop the data (:py:attr:`array`) with some given start and stop point.
Args:
array (:py:attr:`array_like`): The intensity map collected by the 2-D
detector.
array_e (:py:attr:`array_like`): Uncertainty map collected by the 2-D
detector.
x_start (:py:attr:`int`): Start point in x-axis.
x_end (:py:attr:`int`): End point in x-axis.
y_start (:py:attr:`int`): Start point in y-axis.
y_end (:py:attr:`int`): End point in y-axis.
Returns:
:py:attr:`array_like`: A cropped intensity map.
"""
cropped_array = array[y_start:y_end, x_start:x_end]
if array_e is not None:
cropped_error = array_e[y_start:y_end, x_start:x_end]
return cropped_array, cropped_error
return cropped_array
|
4b77b0fa42c429d1f178ea728036ac7130dee629
| 26,373
|
import builtins
def len(objet): #FR !
"""
objet peut être une chaine de caractères ou une liste.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Retourne la longueur de cette chaine ou de cettte liste
"""
return builtins.len(objet)
|
71c936b40950727a57aa2268508de5fc3d78b731
| 26,374
|
import random
def get_framesets(cls, maximum=10, pattern=None):
""" Gather FrameSet objects from either Frames or Bars.
If `pattern` is set to a compiled regex pattern,
return all FrameSets matching the pattern.
Otherwise, return up to `maximum` random FrameSets.
"""
frametypes = set()
framenames = cls.names()
if pattern is None:
while len(frametypes) < maximum:
frametypes.add(cls.get_by_name(random.choice(framenames)))
else:
frametypes.update(
cls.get_by_name(s)
for s in framenames
if pattern.search(s) is not None
)
return frametypes
|
f6f094b67243685f352ea624e2bf45b62693e38d
| 26,375
|
import re
def access_token(request):
"""Read the access token from the request, or None"""
header = request.headers.get('Authorization')
if header:
match = re.match(r'Bearer\s+([^\s]+)', header, re.I)
if match:
return match.group(1)
# Get the access token from parameters.
token = request.params.get('access_token')
if token:
return token
if getattr(request, 'content_type', None) == 'application/json':
try:
json_body = request.json_body
except ValueError:
json_body = None
if isinstance(json_body, dict):
# Get the access token from the JSON request body.
token = json_body.get('access_token')
if token and isinstance(token, str):
return token
return None
|
76ce91dede7d02cbd6cc8dd587b21fe9ab770611
| 26,376
|
def wordvector_distance(indices, class_wordvector_distances):
"""Get the distance of two class word vectors, given a pre-computed distance matrix. This can be used to determine
edge weights between two batch graph nodes.
Args:
indices: the indices of the two classes. In the graph structure, the first index corresponds to the origin
vertex and the second index corresponds to the destination vertex. Tensor of shape `[2]`.
class_wordvector_distances: pre-computed class word vector distances for all the classes in the dataset. The
matrix is symmetrical.
Returns:
the distances between the word vectors of the classes specified in `indices`.
"""
return class_wordvector_distances[indices[0]][indices[1]]
|
8e0c7e3c2318b20af41c24f8940c85bb40340f16
| 26,377
|
def psi2(ctx, z):
"""Shortcut for psi(2,z) (the tetragamma function)"""
return ctx.psi(2, z)
|
68b3ade0f3844cf67c57b37b95744ffc09b46e52
| 26,378
|
def remove_char(string,
iterable):
"""
Return str without given elements from the iterable. More convenient than
chaining the built-in replace methods.
Parameters
-------
string: str
String from which the characters from the iterable are removed.
iterable: str, list, tuple, set
Iterable with characters that are removed from the string.
Returns
-------
str
Without elements from the iterable.
"""
for i in iterable:
string = string.replace(i, "")
return string
|
a7106236fc15adf7b7aa4cdc7e4f3b6b86e6a889
| 26,379
|
def battery_level():
"""
Reads battery level from /sys/class/power_supply/BAT0/capacity
"""
with open("/sys/class/power_supply/BAT0/capacity", "r") as f:
return int(f.read())
|
d923a30bbc4ad7ebcf87936dc4576681e8b51600
| 26,380
|
def fillna(cp, cp0):
"""
This fills in conditional probability with count 0
if the word does not exist in the training set
"""
if cp == None: return cp0
else: return cp
|
bc62d73fd34c0e186966d47d669441c624d1be0e
| 26,381
|
def parse_command(cmd_str):
"""
# the line has one word for the command and n pairs that go to key, value (separator is space)
:param cmd_str: string with name of command and pairs of params and values
:return: cmd : str (name of the command)
cmd_par: dictionary {par_name: str(par_value)} with the parameters for the command
"""
split_cmd = cmd_str.split(' ')
assert (len(split_cmd) % 2)
cmd_par = {split_cmd[i]: split_cmd[i + 1] for i in range(1, len(split_cmd), 2)}
cmd = split_cmd[0]
return cmd, cmd_par
|
ac48d05bcd88c7eb5e04cedeb26c5d5278bbc3bd
| 26,382
|
import os
def split_spec(spec):
"""Given a spec, returns a (directory_name, spec_name) tuple.
The spec is allowed to be in the form 'directory:name', or just the implicit 'directory'.
"""
try:
path, name = spec.split(':', 1)
except ValueError:
path, name = spec, ''
return path, name or os.path.basename(path)
|
d07b133188644a7f879951407ff4c659d2c8452b
| 26,384
|
def central_slice(k):
"""Return central slice objects (last 2 dimensions)."""
if k < 1:
return ..., slice(None), slice(None)
return ..., slice(k, -k), slice(k, -k)
|
a69a18adf07c9e841f58328c1869e44bd0de24e2
| 26,385
|
from typing import List
from typing import Tuple
def concatenate_shapes(shapes: List[Tuple[int]], axis: int):
"""Concatenate shapes along axis"""
out = list(shapes[0])
out[axis] = sum(list(s)[axis] for s in shapes)
return tuple(out)
|
959a66fec11fa7d67218f2fb1b76d5bcf990d463
| 26,386
|
def average_metric(img):
""" return the value of a metric on an image """
return img.mean(axis=0).mean(axis=0)
|
d99e31124d6cd328db2e6ab781a4d1e141bb943d
| 26,387
|
def nm_index(n, m):
"""Return flat index into arrray of [n, m] pairs.
Assumes array is ordered as
[
[n, m]
for n in range(n_max+1)
for m in range(-n, n+1)
]
"""
return m + n * (n + 1)
|
ef619161b6b44e1d3222a901c8c15b51711b6bcc
| 26,388
|
import subprocess
def item_exists(doi, wikidata_cli_executable):
"""
Check by querying fors items with that DOI.
"""
tmp_sparql_file = "tmp.sparql"
with open(tmp_sparql_file, "w") as output_fh:
sparql_query = f'SELECT ?jo WHERE {{?jo wdt:P356 "{doi}".}}'
output_fh.write(sparql_query)
try:
query_result = subprocess.check_output(
f"{wikidata_cli_executable} sparql "
f"{tmp_sparql_file} -e https://query.wikidata.org/sparql".split()
)
except subprocess.CalledProcessError:
return False
# If this string is return the item is not existing
return not "no result found" in str(query_result)
|
074f45adce6de20a787077a3f6070554acf27eb1
| 26,390
|
def GetModSaveMetaDataFileName () -> str:
"""
Get the file name of every mod save meta data file.
"""
return "Meta_Data.json"
|
68af6a2a4428242aedf7f1b313322d8a4e148e78
| 26,391
|
def insertionsort(x):
"""
input: insertsion sort takes in a list or numpy array
output: returns the list or array in sorted order
notes: use insertion sort algorithm -- adapted from pseudocode in Cormen textbook
"""
assign = 0
cond = 0
for j in range(1, len(x)):
key = x[j]
i = j-1
assign += 2
while i >= 0 and x[i] > key:
cond += 2
x[i+1] = x[i]
i = i-1
assign += 2
x[i+1] = key
assign += 1
return [x, cond, assign]
|
fcb84128a907f6b457363a81f4ce4cc2f92d9d11
| 26,392
|
def support(transactions, itemsets):
"""Returns the percentages of transactions that contain the itemsets.
Parameters
----------
transactions : list of list
itemsets : list of frozenset
Returns
-------
dict
Key of each item is the itemset and the value is the itemset's support
"""
counts = {}
for itemset in itemsets:
counts[itemset] = 0
for transaction in transactions:
for itemset in itemsets:
if itemset.issubset(transaction):
counts[itemset] += 1
supports = {}
total_transactions = len(transactions)
for itemset, count in counts.items():
supports[itemset] = count / total_transactions
return supports
|
f6d9751b2560dd8ac636dac965d912595ccb9831
| 26,394
|
import functools
def compose(*functions):
"""
Compose multiple functions into a single function
Create a function that calls a series of functions, passing the output of
one function as the input of the next.
See https://mathieularose.com/function-composition-in-python/
"""
return functools.reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
|
32190bf758f4e7198fff96b684001bf50229b4c2
| 26,395
|
import os
def local_unit():
"""Local unit ID"""
return os.environ['JUJU_UNIT_NAME']
|
9986be7e00d19cdde96ad7d3e31be0b502864315
| 26,396
|
def swapGroups(grpLabels, edgeList, grpInfo, distFunc, distThresh):
""" iterate over the edges instead of the objects """
newGrpLabels = {}
for (i, j) in edgeList:
(iGrp, iGrpVec, iScore) = grpLabels[i]
(jGrp, jGrpVec, jScore) = grpLabels[j]
if (iGrp != jGrp) & (iGrp > 0) & (jGrp > 0): # not already in the same group
newIgrp = iGrp
newJgrp = jGrp
iGrpMeanVec = grpInfo[iGrp][2].v0()
jGrpMeanVec = grpInfo[jGrp][2].v0()
distItoImean = distFunc(iGrpVec, iGrpMeanVec)
distItoJmean = distFunc(iGrpVec, jGrpMeanVec)
distJtoImean = distFunc(jGrpVec, iGrpMeanVec)
distJtoJmean = distFunc(jGrpVec, jGrpMeanVec)
if distItoImean > distThresh:
distItoImean = distThresh
newIgrp = 0
if distJtoJmean > distThresh:
newJgrp = 0
distJtoJmean = distThresh
if distItoJmean < distItoImean: # swap i
newIgrp = jGrp
distItoImean = distItoJmean
if distJtoImean < distJtoJmean: # swap j
newJgrp = iGrp
distJtoJmean = distJtoImean
iVals = newGrpLabels.get(i, (iGrp, iGrpVec, distThresh * 20))
if distItoImean < iVals[2]:
newGrpLabels[i] = (newIgrp, iGrpVec, distItoImean)
jVals = newGrpLabels.get(j, (jGrp, jGrpVec, distThresh * 20))
if distJtoJmean < jVals[2]:
newGrpLabels[j] = (newJgrp, jGrpVec, distJtoJmean)
return newGrpLabels
|
846d31eae135a3985415720d90e962cdb30e6ddf
| 26,397
|
def extended_GCD(a, b):
"""
The extended Euclidean algorithm computes the greatest common divisor and the Bézout
coefficients s, t.
Returns (remainder, (s, t))
"""
(r, rP) = (a, b)
(s, sP) = (1, 0)
(t, tP) = (0, 1)
while rP != 0:
q = r // rP
(r, rP) = (rP, r - q * rP)
(s, sP) = (sP, s - q * sP)
(t, tP) = (tP, t - q * tP)
return (r, (s, t))
|
ca963ab6fff79e8cd375d4ce6a866cf28fd94f3b
| 26,400
|
import argparse
def parse_arguments():
"""Parse command line arguments
Use environment variables as default if passed.
Returns: argument objects with flags as attributes
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--config_loc", help="Location of the configuration file", required=True
)
parser.add_argument("--node_size", help="Size of nodes", required=False)
parser.add_argument(
"--label_font_size", help="Size of font for text labels", required=False
)
parser.add_argument(
"--text_angle", help="Angle to rotate text (counterclockwise)", required=False
)
parser.add_argument(
"--image_width", help="width of image in inches", required=False
)
parser.add_argument(
"--image_height", help="height of image in inches", required=False
)
parser.add_argument(
"--nodesequence",
help='Show node sequences numbers? "true" or "false"?',
required=False,
)
parser.add_argument(
"--outfile", help="Path for the output image file", required=True
)
known_args, pipeline_args = parser.parse_known_args()
return known_args, pipeline_args
|
7ae1cb658dde58e31bd66502dcf45267428ce166
| 26,401
|
def find_digits_in_str(string: str) -> str:
"""Find digits in a given string.
Args:
string: str, input string with the desired digits
Returns:
digits: str, found in the given string
"""
return "".join(x for x in string if x.isdigit())
|
7b9e824f8100d6289a8ed135b50e10d3b3046ed1
| 26,402
|
import pickle
def load_q_table():
"""Reload a saved q_table"""
load_file = open("q_table.pickle", "rb")
return pickle.load(load_file)
|
c69b0e002d1ba5ddd5ec1c33ee1fc0f65d60d392
| 26,403
|
from pathlib import Path
def exists( fileName:str ) -> bool:
"""
Check whether the given fileName exists
"""
fname = fileName + ".pkl"
my_file = Path(fname)
return my_file.exists()
|
5173fbe936564c67f9f32b33ba0a7f85bb172ec8
| 26,404
|
def _format_defaults(value):
"""
Format value to CLI syntax.
Example usage: if default value of a parameter is ['gene'], the description
in CLI would be::
(default: ['gene'])
After using this function the description is tailored to CLI usage::
(default: gene)
"""
if isinstance(value, list):
return ' '.join(value)
else:
return value
|
e665ba6cf28ccb03db5a3ee69721bf03dfa4b41e
| 26,405
|
def bit_neighbors(bit):
""" get the neighboring bits to a set of bits """
bitboard = (bit & 0xFEFEFEFEFEFEFEFE) >> 1
bitboard |= (bit & 0x7F7F7F7F7F7F7F7F) << 1
bitboard |= (bit & 0xFFFFFFFFFFFFFF00) >> 8
bitboard |= (bit & 0x00FFFFFFFFFFFFFF) << 8
return bitboard
|
37cb385be05edbf567b99a5509ab53031e4154f3
| 26,406
|
import ast
def parseTypeFromString(value):
"""
Parse a string representation of a variable into a true, typed, python variable
"""
return ast.literal_eval(value)
|
2a7664af015a60a9070e3090772c73af4ef76fb5
| 26,409
|
import re
def remove_tabs(html_str: str):
"""
remove all tabs from the html_str
"""
html_str = re.sub(r'\t', '', html_str)
return html_str
|
ba4a85878bece977c34aa5587384ca4ec6d68e99
| 26,413
|
def number_to_digits(number, base):
"""Convert a positive number to its digit representation in base."""
digits = []
while number > 0:
digits.insert(0, number % base)
number = number // base
return digits
|
d04091511cbf2c1a86a315239321612b60a27a2d
| 26,414
|
def select_bboxes(selection_bbox: dict, page_bboxes: list, tolerance: int = 10) -> list:
"""
Filter the characters bboxes of the document page according to their x/y values.
The result only includes the characters that are inside the selection bbox.
:param selection_bbox: Bounding box used to select the characters bboxes.
:param page_bboxes: Bounding boxes of the characters in the document page.
:param tolerance: Tolerance for the coordinates values.
:return: Selected characters bboxes.
"""
selected_char_bboxes = [
char_bbox
for char_bbox in page_bboxes
if int(selection_bbox["x0"]) - tolerance <= char_bbox["x0"]
and int(selection_bbox["x1"]) + tolerance >= char_bbox["x1"]
and int(selection_bbox["y0"]) - tolerance <= char_bbox["y0"]
and int(selection_bbox["y1"]) + tolerance >= char_bbox["y1"]
]
return selected_char_bboxes
|
6b3a091aafc0b2af7045e964daf02237ae18f1f4
| 26,415
|
def merge(data, delimiter=","):
"""
Merge rows with an equal starting index from an array of CSV data rows.
Args:
data (list): input list of string rows or row values as a list to merge.
delimiter (str): delimiter of the CSV format to use for value splitting.
Returns:
merged list of rows in string format.
"""
data_merged = []
row_merged = []
# Register an empty field.
id = None
for row in data:
# Convert our string row into a list of strings if it's not already one.
values = row.split(delimiter) if type(row) is str else row
# Assign a value if this is the first run.
if not id:
id = values[0]
row_merged.append(id)
# If our identifier does not match up with the last, append the row and reset.
if values[0] != id:
data_merged.append(row_merged)
row_merged = []
id = values[0]
row_merged.append(id)
# Begin iteration over values skipping our identifier.
for value in values[1:]:
row_merged.append(value)
# If this is the last row append it.
if row == data[-1]:
data_merged.append(row_merged)
return data_merged
|
121f3b5d3057b7e8aa639fad9063fff82e7033ab
| 26,417
|
def sum_ratios_to_percentage(ratios):
"""Sums ratios and converts to two-decimal rounded percentage"""
return round(sum(ratios) * 100, 2)
|
2341436755fbf67559b164919bbab275d9a935ea
| 26,418
|
import re
def _remove_whitespace(string):
"""Return a version of the input string with whitespace removed"""
whitespace_re = re.compile(r"\s+")
return whitespace_re.sub('', string)
|
0da9083b1f4d4e4c8cb4a375b2e70cd3b70a4564
| 26,422
|
def count(s, value):
"""
for循环版本的遍历
:param s: 待遍历链表
:param value: 待查找值
:return: value的出现次数
"""
total = 0
for elem in s:
if elem == value:
total = total + 1
return total
|
bbe0ff7d00943a45c912d02f8e6e52748f9725e3
| 26,424
|
def _split_attrs(attrs, second_part_keys=None):
"""Split `attrs` dictionary into two parts:
* Dict with keys that are not in `second_part_keys`
* Remainder dict with keys in `second_part_keys`
"""
dict_1 = {k: v for k, v in attrs.iteritems() if k not in second_part_keys}
dict_2 = {k: v for k, v in attrs.iteritems() if k in second_part_keys}
return dict_1, dict_2
|
ed19608ec7bc5bf5b40a0a1315e952e7af103dc7
| 26,425
|
from urllib.parse import urlparse, urlencode, parse_qs, unquote
def merge_url_query(url, _doseq=True, **kwargs):
"""Add (new) query params to url, overwriting existing query parameters.
Note: Python 3.5+ only
Args:
url (str): A URL, e.g. 'http://stackoverflow.com/search?q=question'
_doseq (bool): Wether to urlencode using doseq
kwargs (dict): Query parameters to add to url, e.g. {'q': ['a', 'b']}
Returns:
str: Modified URL, e.g. 'http://stackoverflow.com/search?q=a&q=b'
"""
parsed = urlparse(unquote(url))
return parsed._replace(
query=urlencode({**parse_qs(parsed.query), **kwargs}, doseq=_doseq)
).geturl()
|
fad16c498b9aa8fcac10cbcd995166a5f443a59d
| 26,427
|
from typing import Iterable
from typing import Callable
def count_nice_words(all_words: Iterable[str], checker: Callable[[str], bool]) -> int:
"""
Count the number of words which are nice, using the given nice-word-checking function.
"""
return sum(1 for word in all_words if checker(word))
|
53d7de35aad33d159c985acadad1a68fbb5ef11e
| 26,428
|
def _clean_dna(dna_seq):
"""
This function removes any characters from the dna string if it is not A,
G,C,T
:param dna_seq: input dna sequences (string)
:return:
"""
return ''.join(c for c in dna_seq.lower() if c in 'agct')
|
0e16ddded6d9b7aa465ee5cecfbcb8a94e267a03
| 26,429
|
def getAngleStatus(angle):
""" returns status based on the angle. Status codes:
0 - straight (-10 to +10)
1 - left or down (less than -10)
2 - right or up (greater than 10)
"""
if angle < -10:
return 1
elif angle > 10:
return 2
return 0
|
235c988bc285be4fbfbb3f3113030b79585c1976
| 26,430
|
import contextlib
import tempfile
import glob
import os
import shutil
def make_sample_fn(model, **sample_kwargs):
"""Create a sample function from a model with better error reporting.
The returned generator generates samples from the model, printing an error if
something went wrong.
Args:
model: A `CmdStanModel`.
**sample_kwargs: Arguments to pass to `CmdStanModel.sample`.
Returns:
sample_fn: A generator with signature
`(output_dir, **kwargs) -> cmdstanpy.CmdStanMCMC`.
`output_base_dir` specifies the base directory to use for the temporary
directory created to hold Stan's outputs.
"""
@contextlib.contextmanager
def _sample_fn(output_dir=None, **kwargs):
"""The sample function."""
# Error reporting isn't great in CmdStanPy yet
# (https://github.com/stan-dev/cmdstanpy/issues/22), so we do a little work
# to intercept the console output and print it if there's an issue.
#
# We use a context manager because CmdStanPy lazily loads quantities from
# the output directory, and since we're in control of deleting it, we need
# to have a mechanism to keep it around until the user is done with it.
if output_dir is None:
output_dir = tempfile.mkdtemp()
keep_outputs = False
else:
keep_outputs = True
final_kwargs = sample_kwargs.copy()
final_kwargs.update(kwargs)
try:
yield model.sample(output_dir=output_dir, **final_kwargs)
except RuntimeError as e:
for console_filename in glob.glob(os.path.join(output_dir, '*.txt')):
with open(console_filename, 'r') as f:
print(console_filename)
print(f.read())
raise e
finally:
if not keep_outputs:
shutil.rmtree(output_dir)
return _sample_fn
|
b60a340266b7fed4aeb912c6b011155e0d3f5077
| 26,431
|
import random
def generate_random_words(num_words: int) -> str:
"""
This method generates a bunch of random words
:param num_words: The number of random words to be generated
:return: A bunch of random words
"""
alpha_nums = [chr(x + 65) for x in range(26)] + [chr(x + 97) for x in range(26)] + [str(x) for x in range(10)]
return " ".join(["".join([random.choice(alpha_nums)
for _ in range(int(random.random() * 10) + 2)])
for _ in range(num_words)])
|
b6e526cab07a0a5b72abc81e03dc8f7c0bb0b49b
| 26,432
|
import time
def calc_delay_til_next_tick(seconds: float) -> float:
"""Calculate the delay to next tick."""
now: float = time.time()
current_tick: int = int(now // seconds)
delay_til_next_tick: float = (current_tick + 1) * seconds - now
return delay_til_next_tick
|
a524646c013b0450c21c793d10b1df40e3dc1100
| 26,433
|
def get_direct_prop(obj, fields):
""" Gets a model property of an object """
# shouldn't happen, but whatever
if len(fields) == 0:
return obj
# if we have a single field to get
elif len(fields) == 1:
field = fields[0]
# we may have a display getter
try:
display_getter = 'get_{}_display'.format(field)
if hasattr(obj, display_getter):
return getattr(obj, display_getter)()
except:
pass
#if we do not, return the field itself
return getattr(obj, field)
# else go recursively
else:
return get_direct_prop(getattr(obj, fields[0]), fields[1:])
|
1e5a8833f51732d81289e7c9a341b52bac3403a3
| 26,434
|
def usleep():
""" Delay execution in microseconds"""
return NotImplementedError()
|
a593f80bcc25415d20ba5aa7d61047479365744d
| 26,436
|
def trace_down(all_bags, bag):
"""
For the input bag "bag", trace down the path along all of its bags that it
can hold and count them.
"""
n = 0 # number of children seen at this level
if len(bag.can_hold) == 0:
return n
for bag_type, n_can_hold in bag.can_hold.items():
child_bag = all_bags[bag_type]
for i in range(n_can_hold):
n += 1 # count once for the child at this level
n += trace_down(
all_bags, child_bag
) # counts for all children at lower levels
return n
|
0eec32f12536d88da6232eb925b95d9bc8fb1a8a
| 26,437
|
def custom_tuple(tup):
""" customize tuple to have comma separated numbers """
tuple_string = "("
for itup in tup:
tuple_string += "{:,d}".format(itup) + ", "
if len(tup) == 1:
return tuple_string[:-2] + ",)"
return tuple_string[:-2] + ")"
|
1067de91a2438fe74498f7ad06d64b480f95c39c
| 26,438
|
import requests
import hashlib
from datetime import datetime
def get_feed_hash(url):
""" Returns hash of a feed and timestamp """
fetched = requests.get(url)
m= hashlib.sha256()
m.update(fetched.text.encode())
text_hash = m.hexdigest()
ts = int(datetime.utcnow().timestamp())
return text_hash, ts
|
b813ffaaed3bb80d82b87633555b18f823b90ab5
| 26,439
|
import itertools
def nxos_decode(password):
""" See https://networkengineering.stackexchange.com/questions/27987/tacacs-implentation-server-key-error """
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
lowercase = uppercase.lower()
decoded_password = list()
cipher = [3, 22, 4, 5, 18, 0, 21, 5, 18, 3, 10, 5, 16, 22, 4, 16, 24, 17, 12, 5, 21, 18, 5, 22, 19, 7]
for character, key in zip(password, itertools.cycle(cipher)):
if character in lowercase:
decoded = ord(character) - 97 - key
if decoded < 0:
decoded += 26
decoded += 97
decoded_password.append(chr(decoded))
elif character in uppercase:
decoded = ord(character) - 65 - key
if decoded < 0:
decoded += 26
decoded += 65
decoded_password.append(chr(decoded))
else:
decoded_password.append(character)
return ''.join(decoded_password)
|
e1d06f1ce08a05133320c27ee9bf58a75b23b90a
| 26,440
|
def read_description():
"""Read README.md and CHANGELOG.md."""
try:
with open("README.md") as r:
description = "\n"
description += r.read()
with open("CHANGELOG.md") as c:
description += "\n"
description += c.read()
return description
except Exception:
return '''
Modeling and simulation of proton-exchange membrane fuel cells (PEMFC) may work as a powerful tool in the research &
development of renewable energy sources. The Open-Source PEMFC Simulation Tool (OPEM) is a modeling tool for
evaluating the performance of proton exchange membrane fuel cells. This package is a combination of models
(static/dynamic) that predict the optimum operating parameters of PEMFC. OPEM contained generic models that
will accept as input, not only values of the operating variables such as anode and cathode feed gas, pressure
and compositions, cell temperature and current density, but also cell parameters including the active area and
membrane thickness. In addition, some of the different models of PEMFC that have been proposed in the OPEM,
just focus on one particular FC stack, and some others take into account a part or all auxiliaries such as
reformers. OPEM is a platform for collaborative development of PEMFC models.'''
|
5f3d27de6175a5e9e3ba76ea9d65e6579e97dd32
| 26,443
|
from typing import Dict
def _calculate_mass(dimensions: Dict[str, float]) -> float:
"""Calculate and return a significantly large mass for a structure."""
# This calculation is arbitrary and can be modified if needed.
return max(
round(dimensions['x'] * dimensions['y'] * dimensions['z'] * 125),
1
)
|
8a3d4c3289fd897ecb42964b06435d699c3e4b63
| 26,444
|
from pathlib import Path
def security_result(name):
"""Load a security result file from the sample directory, and return the content"""
filename = Path(__file__).parent / 'security' / '{name}.out'.format(name=name)
with filename.open() as f:
return f.read()
|
1c804fd0a711376135013a21031e26578317d728
| 26,445
|
def get_string_commas_num( num ):
"""
This is the secret-sauce of formatting integers as strings with commas for every 3 digits. For example, ``1234`` becomes "1,234". I copied code from `this location`_.
:param int num: input number.
:returns: the nicely formatted output :py:class:`string <str>` representing an input number.
:type: str
.. _`this location`: https://intellipaat.com/community/2447/how-to-print-number-with-commas-as-thousands-separators
"""
return "%s" % f"{num:,d}"
|
4f49f4bb755ff012b3ca9bbe039919348a52285d
| 26,446
|
def capitalize(name):
"""Capitalize"""
return name[0].upper() + name[1:]
|
234f3c2ec4fe877df81f5797cd81d29b606ada91
| 26,447
|
import time
def time_mock():
"""Return original time result, as freezegun mocks it specifically."""
return time.time()
|
e66dd90afa0e6ce74a81c729898be30be8b51ded
| 26,449
|
def long_division_cycle(dividend: int, divisor: int, prev=None) -> int:
""" Returns the length of the cycle of repeating digits when dividing dividend by divisor """
prev = prev or []
while True:
if dividend in prev:
return len(prev) - prev.index(dividend)
prev.append(dividend)
dividend = dividend % divisor * 10
|
4d4f6ace07cceead148e578d7356ddf01a568b1d
| 26,451
|
def uniprot_to_pdb(pdb_filename):
"""
Creates dictionary from files of PDB_LIST that has uniprot ids as keys and pdb ids as a list of values.
:return: uniprot_domain_dict
"""
uniprot_domain_dict = {}
with open(pdb_filename) as pdbs:
pdbs.readline()
for line in pdbs:
line = line.strip().split('\t')
uniprot_id = line[0]
if len(line) > 2:
pdb_id = line[2].split(';')
else:
pdb_id = []
uniprot_domain_dict[uniprot_id] = pdb_id
return uniprot_domain_dict
|
b3f390eb883498e8bb0d80fe22fa5449295de1f3
| 26,453
|
def subplots(fig, nrows, ncols, nsubs, share):
"""Create a list of axes with correct ticks set to visible.
Mirrors matplotlib.pyplot.subplots, though not exactly, so see the source
code for tips.
"""
axes = []
for i in range(nsubs):
# add_subplot is 1-indexed
i += 1
axis = fig.add_subplot(nrows, ncols, i)
axes.append(axis)
# true if there are plots below this one (visually)
if share and i + ncols <= nsubs:
for l in axis.get_xticklabels():
l.set_visible(False)
# true if not the first column
if share and i % ncols != 1:
for l in axis.get_yticklabels():
l.set_visible(False)
return axes
|
0ae21be24a798f991c79154f8d4d1b5989b4e795
| 26,454
|
import json
def deserialize(data):
"""
Deserialize the information given in C{data}, back to its original pythonic form.
@param data: the data to deserialize.
@return the python object contained in the serialized data.
"""
return json.loads(data)
|
686b39b9fd58999ace064b222cbedf202ca2edce
| 26,455
|
def modify_range(val):
"""
Modify value from range 0,1 -> -1,1 and preserve ratio
:param val:
:return: value in rage -1,1
"""
return (val * 2) - 1
|
d3604c6ed682483e1e7276b31aece4eda325ed19
| 26,456
|
def sanitize_price(price_str):
"""
Normalise a price string and convert to float value.
:param price_str: Price string to normalise.
:return: Float price value.
"""
if price_str is None:
return None
price_str = price_str.strip('$ \t\n\r')
price_str = price_str.replace(',', '')
return float(price_str)
|
84af7c25c19bcbef690cc19554f01e6e284446f2
| 26,457
|
def line_intersect(a_p1, a_p2, b_p1, b_p2, tolerance = 0.001):
"""
Finds the intersection between two lines a and b defined by their respective endpoints p1 and p2
"""
# Check if lines intersect
if a_p1[0] > b_p1[0] and a_p1[0] > b_p2[0] and a_p2[0] > b_p1[0] and a_p2[0] > b_p2[0]: return False
if a_p1[0] < b_p1[0] and a_p1[0] < b_p2[0] and a_p2[0] < b_p1[0] and a_p2[0] < b_p2[0]: return False
if a_p1[1] > b_p1[1] and a_p1[1] > b_p2[1] and a_p2[1] > b_p1[1] and a_p2[1] > b_p2[1]: return False
if a_p1[1] < b_p1[1] and a_p1[1] < b_p2[1] and a_p2[1] < b_p1[1] and a_p2[1] < b_p2[1]: return False
# Get diffs along each axis
x_diff = (a_p1[0] - a_p2[0], b_p1[0] - b_p2[0])
y_diff = (a_p1[1] - a_p2[1], b_p1[1] - b_p2[1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
# Find the intersection
div = det(x_diff, y_diff)
if div == 0: return False
d = (det(*(a_p1, a_p2)), det(*(b_p1, b_p2)))
x = det(d, x_diff) / div
y = det(d, y_diff) / div
# Check if intersection exceeds the segments
if x < min(a_p1[0], a_p2[0]) - tolerance: return False
if x > max(a_p1[0], a_p2[0]) + tolerance: return False
if y < min(a_p1[1], a_p2[1]) - tolerance: return False
if y > max(a_p1[1], a_p2[1]) + tolerance: return False
if x < min(b_p1[0], b_p2[0]) - tolerance: return False
if x > max(b_p1[0], b_p2[0]) + tolerance: return False
if y < min(b_p1[1], b_p2[1]) - tolerance: return False
if y > max(b_p1[1], b_p2[1]) + tolerance: return False
return x, y
|
bc6a6cf28a2351e0727274130de0053abd65c257
| 26,459
|
def _get_number_of_column_label(label):
"""
This function returns a number which corresponds to the label.
Example : 'A' -> 1 , 'Z' -> 26 , 'AA' -> 27 , 'BA' -> 53
Args :
label : Type-str
Denotes the label given to the column by sheets
Returns :
num : Type-int
Denotes the numbering of columns(1-indexed)
"""
num = 0
power_of_26 = 1
for i in range(len(label)-1,-1,-1):
value = ord(label[i]) - ord('A') + 1
num += power_of_26*value
power_of_26 = 26*power_of_26
return num
|
578dd51f9ac397b079a8b8490837aeedf1000285
| 26,460
|
def model_flow_validation_kind_id_post(kind, id): # noqa: E501
"""validation of all kinds of objects
validation of all kinds of objects # noqa: E501
:param kind: what kind of object to validate
:type kind: str
:param id: id of the object to be validated
:type id: str
:rtype: object
"""
return 'do some magic!'
|
291f33e0ec01a63642e3ff211742238f6175a0fb
| 26,461
|
def get_fieldname_mapping():
"""Dictionary of fieldnames to modify."""
# It is easier to maintain fieldname mappings in a single location
new_names = {
'playerID': 'player_id',
'yearID': 'year',
'teamID': 'team_id',
'lgID': 'lg_id',
'2B': 'double',
'3B': 'triple',
'BAOpp': 'ba_opp',
'IPouts': 'ip_outs',
'InnOuts': 'inn_outs',
'franchID': 'franch_id',
'divID': 'div_id',
'Ghome': 'g_home',
'DivWin': 'div_win',
'WCWin': 'wc_win',
'LgWin': 'lg_win',
'WSWin': 'ws_win',
'teamIDBR': 'team_id_br',
'teamIDlahman45': 'team_id_lahman45',
'teamIDretro': 'team_id_retro',
'birthYear': 'birth_year',
'birthMonth': 'birth_month',
'birthDay': 'birth_day',
'birthCountry': 'birth_country',
'birthState': 'birth_state',
'birthCity': 'birth_city',
'deathYear': 'death_year',
'deathMonth': 'death_month',
'deathDay': 'death_day',
'deathCountry': 'death_country',
'deathState': 'death_state',
'deathCity': 'death_city',
'nameFirst': 'name_first',
'nameLast': 'name_last',
'nameGiven': 'name_given',
'finalGame': 'final_game',
'retroID': 'retro_id',
'bbrefID': 'bb_ref_id',
'park.key': 'park_key',
'park.name': 'park_name',
'park.alias': 'park_alias'
}
return new_names
|
cdfa8d06b37f9285790c3cf116aa0783c2b1f573
| 26,462
|
def get_recommendations_user_filtred_reduce(data):
"""
reduce for the get_recommendations_user_filtred function
"""
item, scores = data
ssim = 0
ssim_x_score = 0
for sim, sim_x_score in scores:
ssim += sim
ssim_x_score += sim_x_score
return (item, ssim, ssim_x_score)
|
c3abf750b2e73389cd1d146396a791eaa9e666d2
| 26,463
|
def base10_to_base16_alph_num(base10_no):
"""Convert base-10 integer to base-16 hexadecimal system.
This function provides a utility to write pdb/psf files such that it can
add many more than 9999 atoms and 999 residues.
Parameters
----------
base10_no: int
The integer to convert to base-16 hexadecimal system
Returns
-------
str
The converted base-16 system string
See Also
--------
mbuild.conversion._to_base: Helper function to perform a base-n conversion
"""
return hex(int(base10_no))[2:]
|
c73c170779735838b4f73fd9dd06ea444dac6ecf
| 26,465
|
def foo():
"""Do some stuff"""
return 1
|
b69fe45b8cf07c251e209ebefdf467e8a9a4c367
| 26,466
|
def remote_branch(stdin_first_line):
"""
Reads the name of the remote git branch from runtime parameters.
In the pre-push.py hook the name of the remote branch is passed as the $1 parameter.
:param stdin_first_line the first line of the standard input
>>> remote_branch("refs/heads/master a9d45baccd631601087a75a6605909c16bbfdbca refs/heads/master 67b6dc7a5e256ae590d305a766e627258b164899")
'master'
>>> remote_branch("refs/heads/master a9d45baccd631601087a75a6605909c16bbfdbca refs/heads/hot-fix 67b6dc7a5e256ae590d305a766e627258b164899")
'hot-fix'
"""
stdin_parts = stdin_first_line.split(" ")
remote = stdin_parts[2]
remote_parts = remote.split("/")
return remote_parts[-1]
|
c227bea23b19c0c3a9196003e9caf5e13d031730
| 26,467
|
def parse_prefixes(root):
"""Returns a dictionary of unit of measure prefixes."""
prefixes = {}
for node in root.find("{*}prefixSet"):
name = node.find('{*}name').text
prefixes[name] = dict(
symbol = node.find('{*}symbol').text,
multiplier = node.find('{*}multiplier').text,
)
common_name = node.find('{*}commonName')
if common_name is not None:
prefixes[name]['common_name'] = common_name.text
return prefixes
|
ee62160bf43f7096c6ee37f20bea78afad7522c7
| 26,468
|
import requests
import json
def get_role_description_from_galaxy_response(response: requests.Response) -> str:
"""
Extract description for a role from galaxy response
Args:
response (requests.Response): response from ansible galaxy (json body)
Returns:
str: description
"""
# Parse json response
try:
data = json.loads(response.text)
except:
data = {}
# description is in .data.repository.description
return data.get("data", {}).get("repository", {}).get("description", None)
|
a092da43d6f003b88f33e705da7f3e88341f08b7
| 26,471
|
def camera_name_from_calibrator(calibrator):
"""
Returns the name of the camera associated with the given camera calibrator.
"""
return calibrator.split('/')[2]
|
8f35c4987f02f5eb62101ac2c9af2edcb8a4746a
| 26,472
|
def urldecode(url):
"""Decode %7B/%7D to {}."""
return url.replace('%7B', '{').replace('%7D', '}')
|
91e2f969e59bc68004e1696434b5b0329012342f
| 26,473
|
def randpath(randstr, tmpdir):
"""Return a randomly generated nonexistant path"""
def _gen_randpath():
return tmpdir / randstr()
return _gen_randpath
|
8e3a5ed3e9efd6c3adfa084a3b2473d36fcec31d
| 26,474
|
def host(url):
"""Функция парсинга URl"""
result = [url]
if url is None:
return ""
if "@" in url:
result[0] = result[0].split("@")[1]
if "http" in url:
result[0] = result[0].split("/")[2]
if ":" in url:
result[0] = result[0].split(":")[0]
if "." in result[0]:
return result[0].split("/")[0]
return ""
|
09406ddcae52c25de7a3faf7d01c8692fbc4364e
| 26,475
|
def gearinches(chainring, cassette_range, wheel_diameter_inch=26.5):
"""Convert gear ratios into gearinches"""
return chainring / cassette_range * wheel_diameter_inch
|
2f5e63713e17632a7f5f90607b1d2087e433b657
| 26,476
|
import re
def remove_spaces_lines(text):
"""
Normalize text
Remove & Replace unnessary characters
Parameter argument:
text: a string (e.g. '....
New York N.Y is a city...')
Return:
text: a string (New York N.Y is a city.)
"""
text = re.sub('[\n\s\t_]+', ' ', str(text))
return text
|
e15a8bdf8989ad29d602702b5f7b483c217a4c5c
| 26,477
|
def parse_gtf_info_field(info_str):
""" Parse gtf info string into a dictionary
Args:
info_str (str): info field from gtf
Return:
{key, value for field in info}
"""
d = {}
for pair in info_str.split('; '):
key, value = pair.split(' ')
d[key] = value.strip('"')
return d
|
bfb94655afabea3674105884d4d9b4da7eb861f3
| 26,478
|
import click
def token_path_option(
long="--token-path",
short="-tp",
name="token_path",
required=False,
help_message="The path where the authentication token will be stored. For a normal use-case, this should not be needed.",
):
"""
token path option standard definition.
Use as decorator for commands.
"""
return click.option(
long,
short,
name,
required=required,
type=str,
help=help_message,
)
|
9ca42b305b1ca310b4f5ced380db7b4206cc21a8
| 26,479
|
def jogadaJogador(tab, escolhasimbolo):
"""
Pergunta ao jogador onde ele quer jogar, e também verifica algumas posiçõs inválidas (pois alguém já jogou nela).
"""
i = int((input("Onde deseja jogar? (1-9) ")))
if (i < 1) or (i > 9):
print("Posição inválida. Escolha outra!")
return jogadaJogador(tab,escolhasimbolo)
elif (tab[i] != " "):
print("Essa posição já foi escolhida. Escolha outra!")
return jogadaJogador(tab,escolhasimbolo)
else:
return i
|
86df4bcbe27ef3440945f6fca659cb71c7799fbb
| 26,480
|
def getCapitalChargeFactor(interestRate, economicLifetime):
""" Computes and returns capital charge factor (inverse of annuity factor) """
CCF = 1 / interestRate - 1 / (pow(1 + interestRate, economicLifetime) * interestRate)
CCF = CCF.fillna(economicLifetime)
return CCF
|
7edf9002e0586624216e93d79cadf56027c228ad
| 26,481
|
def interp_cubic(x):
"""
src: http://stackoverflow.com/questions/1146281/cubic-curve-smooth-interpolation-in-c-sharp
"""
return (x * x) * (3.0 - (2.0 * x))
|
8cdad355acde03c5a99f0f34b56d5fda50c0b2f8
| 26,482
|
def average_position(pos1=(0.0, 0.0, 0.0), pos2=(0.0, 0.0, 0.0), weight=0.5):
"""
Returns the average of the two given positions. You can weight between 0 (first input) or 1 (second_input)
:param pos1: tuple, first input position
:param pos2: tuple, second input position
:param weight: float, amount to weight between the two input positions
:return: tuple
"""
return (
pos1[0] + ((pos2[0] - pos1[0]) * weight),
pos1[1] + ((pos2[1] - pos1[1]) * weight),
pos1[2] + ((pos2[2] - pos1[2]) * weight)
)
|
9d2fe664267fd89ad89013fa98c6118760fab0d3
| 26,483
|
def encode_complex(data, complex_names):
""" Encodes complex data to having arbitrary complex field names.
Encodes complex `data` to have the real and imaginary field names
given in `complex_numbers`. This is needed because the field names
have to be set so that it can be written to an HDF5 file with the
right field names (HDF5 doesn't have a native complex type, so
H5T_COMPOUND have to be used).
Parameters
----------
data : arraylike
The data to encode as a complex type with the desired real and
imaginary part field names.
complex_names : tuple of 2 str
``tuple`` of the names to use (in order) for the real and
imaginary fields.
Returns
-------
d : encoded data
`data` encoded into having the specified field names for the
real and imaginary parts.
See Also
--------
decode_complex
"""
# Grab the dtype name, and convert it to the right non-complex type
# if it isn't already one.
dtype_name = data.dtype.name
if dtype_name[0:7] == 'complex':
dtype_name = 'float' + str(int(float(dtype_name[7:])/2))
# Create the new version of the data with the right field names for
# the real and complex parts. This is easy to do with putting the
# right dtype in the view function.
return data.view([(complex_names[0], dtype_name),
(complex_names[1], dtype_name)])
|
ed32626096d799fb0b5c678b01df4dc7e0645239
| 26,484
|
def extract_simplices(simplex_tree):
"""Extract simplices from a gudhi simplex tree.
Parameters
----------
simplex_tree: gudhi simplex tree
Returns
-------
simplices: List of dictionaries, one per dimension d. The size of the dictionary
is the number of d-simplices. The dictionary's keys are sets (of size d
+ 1) of the 0-simplices that constitute the d-simplices. The
dictionary's values are the indexes of the simplices in the boundary
and Laplacian matrices.
"""
simplices = [dict() for _ in range(simplex_tree.dimension()+1)]
for simplex, _ in simplex_tree.get_skeleton(simplex_tree.dimension()):
k = len(simplex)
simplices[k-1][frozenset(simplex)] = len(simplices[k-1])
return simplices
|
70c2dc9bf660c217449ecaeee24f15b5bfadacf7
| 26,485
|
def make_year_labels(years, yearly_data, format='%d (%s)'):
"""
Combine years with corresponding yearly data and return list of labels.
>>> make_year_labels([2005, 2006], [18, 29])
['2005 (18)', '2006 (29)']
>>> make_year_labels([2006, 2007], ['good', 'bad'], '%d was %s')
['2006 was good', '2007 was bad']
"""
return [format % (y, c) for (y, c) in zip(years, yearly_data)]
|
f543250b066765e211a5057ff21ed12d082b2f62
| 26,486
|
def _getbundleitems(bundle, nametoindexmap, itemsdict):
"""Get a list of the items in a bundle"""
bundleitems = []
descriptions = bundle['descriptions']
for i in range(len(descriptions)):
key = str(i)
value = descriptions[key]['value']
if value in nametoindexmap:
bundleitems.append(itemsdict[nametoindexmap[value]])
return bundleitems
|
443bc51dccd42bf06ecb602de15faa941a0c67f7
| 26,487
|
def make_a_tweet(revision, message, url):
"""
Generate a valid tweet using the info passed in.
"""
return revision + ': ' + message + ' | ' + url
|
19409f4f76c3ef7bdd09478f3a9fe1b1d8ed7017
| 26,488
|
import argparse
def parse_args():
"""Import command-line arguments"""
argparser = argparse.ArgumentParser()
argparser.add_argument("--width",type=int,default=32)
argparser.add_argument("--start",type=str,default="ripple-carry")
argparser.add_argument("--transforms",type=str,default="",
help="example: _LF@6_LF@4")
argparser.add_argument("--top_module",type=str,default="adder")
argparser.add_argument("--mapping",type=str,default="behavioral")
argparser.add_argument("--hdl_root",type=str,default=".")
return argparser.parse_args()
|
6f223c54c5c4509472573fc869ebc244672c96c2
| 26,489
|
def find_rank_type(ranks):
"""Find and return the rank type of the 3 ranks given
Rank type results:
1: no particularly interesting rank order, i.e. High Card
2: pair rank
4: straight
5: three of a kind
"""
ranks.sort()
if ranks[0] == ranks[1] == ranks[2]:
return 5
elif ranks[1] == ranks[2] or ranks[0] == ranks[1] or ranks[0] == ranks[2]:
return 2
elif (ranks[0] + 1) == ranks[1] and (ranks[1] + 1) == ranks[2]:
return 4
elif 14 in ranks and 2 in ranks and 3 in ranks:
return 4
else:
return 1
|
656c93cfeebdebeba8341d87530c87f1155933bb
| 26,492
|
def function_(x):
"""
example for this topic caculus
f(x) = 3 * x^2 - 4 * x
"""
return 3 * x ** 2 - 4 * x
|
ab18e60c4d0c31993f1651d4792b4f017348c4be
| 26,493
|
def get_host_finding_details_hr(host_finding_detail):
"""
Prepare human readable json for "risksense-get-host-finding-detail" command.
Including basic details of host finding.
:param host_finding_detail: host finding details from response
:return: List of dict
"""
return [{
'Title': host_finding_detail.get('title', ''),
'Host Name': host_finding_detail.get('host', {}).get('hostName', ''),
'Ip Address': host_finding_detail.get('host', {}).get('ipAddress', ''),
'Source': host_finding_detail.get('source', ''),
'Network': host_finding_detail.get('network', {}).get('name', ''),
'Risk Rating': host_finding_detail.get('riskRating', '')
}, {}]
|
29c70762ff844d11e140fb9a9489a6706ad8c99b
| 26,495
|
def subtype(blocks, ratio):
"""Computes the blocks in a subtype"""
t = sum(ratio)
parts = []
parts.append(round(blocks * ratio[0]/t))
parts.append(round(blocks * ratio[1]/t))
parts.append(round(blocks * ratio[2]/t))
return parts
|
293983f2a7c14c586a96460134e4cb2530f5222f
| 26,497
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.