content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def issue_statuses(metric, last_measurement) -> list[dict]:
"""Return the metric's issue statuses."""
last_issue_statuses = last_measurement.get("issue_status", [])
return [status for status in last_issue_statuses if status["issue_id"] in metric.get("issue_ids", [])]
|
23d2434727fa1b7a2e06fb621347270efd0627fc
| 12,050
|
def TriStrip(p):
"""for backward compatibility only - TriStrip is no longer a type
please use the triStrip() method of the Polygon type instead"""
return p.triStrip()
|
6e1f3019acfc1c18deba69d11f3d27ead9d07ca6
| 12,051
|
def heroUnit(
headline="",
tagline="",
buttonStyle="primary",
buttonText="",
buttonHref="#"
):
"""
*Generate a heroUnit - TBS style*
**Key Arguments:**
- ``headline`` -- the headline text
- ``tagline`` -- the tagline text for below the headline
- ``buttonStyle`` -- the style of the button to be used
- ``buttonText`` -- the text for the button
- ``buttonHref`` -- the anchor link for the button
**Return:**
- ``heroUnit`` -- the heroUnit
"""
heroUnit = """
<div class="hero-unit" id=" ">
<h1>%(headline)s</h1>
<p>%(tagline)s</p>
<p>
<a href="%(buttonHref)s" class="btn btn-%(buttonStyle)s btn-large">
%(buttonText)s
</a>
</p>
</div>""" % locals()
return heroUnit
|
42826a5d63023b1a87062205255f80a2c85dd0f6
| 12,052
|
def direction_finder(direction, exchange):
"""
Uses the 'src' attribute of an "img" tag to find the
direction of flow. In the data source small arrow images
are used to show flow direction.
"""
if direction == "/uflujpot.nsf/f90.gif":
# flow from Argentina
return 1
elif direction == "/uflujpot.nsf/f270.gif":
# flow to Argentina
return -1
else:
raise ValueError('Flow direction for {} cannot be determined, got {}'.format(exchange, direction))
|
076f80729e34c472a8a12aac15007ed3b2a82eea
| 12,054
|
def is_valid_msisdn(msisdn):
"""A very basic msisdn validation check"""
return msisdn[0] == "+" and len(msisdn) == 12
|
5a8865182f0ff21eafcd836e5ccc9497bd097f93
| 12,055
|
def which_fib(lst, n):
"""
Returning the index of n in the sequence lst starting from 1
"""
return lst.index(n) + 1
|
5019442188151a9feb0bccbbcff1b4fcacb3a5c1
| 12,056
|
def load_text_validation_min(value):
"""load text_validation_min"""
return value
|
777ef7221cd6f0153964fc1fe1d277bc514ed41c
| 12,057
|
def build_lines_data(lines) -> bytes:
"""
Builds the byte string from given lines to send to the server
:param lines:
:return bytes:
"""
result = []
for line in lines:
result.append(f"{(len(line) + 5 ):04x}".encode())
result.append(line)
result.append(b"\n")
result.append(b"0000")
return b''.join(result)
|
f5fd900c606b2e44454bbc15cab61e2b34809fab
| 12,058
|
def find_folder_on_path(path, target='.git', **kwargs):
"""
Given a path, find the repo root. The root is considered the top-level
folder containing the '.git' folder. This method will traverse up the
tree, searching each level.
If not `.git` folder is found, None is returned. Otherwise the parent
folder path of the `.git` folder is returned.
# Parameters
path:pathlib.Path
- the path to search for the repo root.
target:str
- The name of the folder to search for. The parent of this
folder is considered the root folder. parent folder to be the
root folder
- Default - '.git' - We'll use the .git folder to identify the
parent folder.
# Return
If the 'target' is found, the parent of 'target' is
returned. Otherwise None is returned.
"""
# construct the search list, we want to search the path and its
# parents.
search = [path] + list(path.parents)
for p in search:
if p.joinpath(target).exists():
return p
return None
|
317a061210bca432d128a82a8cc7e27b6aa4feee
| 12,059
|
def var_order(bdd):
"""Return `dict` that maps each variable to a level.
@rtype: `dict(str: int)`
"""
return {var: bdd.level_of_var(var) for var in bdd.vars}
|
d9f526be705918bfff43d105750ce97d29e15e52
| 12,060
|
def get_camera_serial_and_firmware(console_text):
"""
Scraps console text for serial and firmware information of all connected realsense devices.
Args:
console_text (str): input console text contaning all connected device information
Returns:
Array[dic]: Array item for each connect devices.
"""
camera_data = []
for line in console_text.split("\n"):
if "serial" in line and "firmware" in line:
serial_num = None
firmware_num = None
for item in line.split(","):
if "serial" in item and "update" not in item: # there are two items which have the word serial in them
serial_num = item.split(" ")[-1]
elif "firmware" in item:
firmware_num = item.split(" ")[-1]
camera_data.append(
{"serial": serial_num, "firmware": firmware_num})
return camera_data
|
9426522f3815d7e73c12b5081c6308733836528d
| 12,063
|
def is_abstract_method(method):
"""
@type: method: object
@param: A method object.
"""
return getattr(method, '__isabstractmethod__', False)
|
09c83f409ec4d9fa1e00d6b60d81f78cb8c8dbd3
| 12,064
|
def _get_config_parameter(config, section, parameter_name, default_value):
"""
Get the parameter if present in the configuration otherwise returns default value.
:param config the configuration parser
:param section the name of the section
:param parameter_name: the name of the parameter
:param default_value: the default to propose the user
:return:
"""
return config.get(section, parameter_name) if config.has_option(section, parameter_name) else default_value
|
2431c37eab3396b79f4f9e649a7cc25fc27208dc
| 12,065
|
def make_bool(x_var):
"""turn an array of ints into array of bool"""
if x_var > 0:
return True
return False
|
ce2f4d51d6126df7723aa90b7b717d613feaad2f
| 12,068
|
def as_escaped_unicode_literal(
text: str
) -> str:
"""Convert the given ``text`` into a string of escaped Unicode
hexadecimal.
Args:
text (:obj:`str`): The string to convert.
:rtype:
:obj:`str`
A string with each character of the given ``text`` converted
into an escaped Python literal.
Example:
>>> from flutils.strutils import as_escaped_unicode_literal
>>> t = '1.★ 🛑'
>>> as_literal(t)
'\\\\x31\\\\x2e\\\\u2605\\\\x20\\\\U0001f6d1'
"""
out = ''
for c in text:
c_hex = hex(ord(c))[2:]
c_len = len(c_hex)
if c_len in (1, 2):
out += '\\x{:0>2}'.format(c_hex)
elif c_len in (3, 4):
out += '\\u{:0>4}'.format(c_hex)
else:
out += '\\U{:0>8}'.format(c_hex)
return out
|
2fc72bb8875023d86561552e9c64e55f5a99dfc1
| 12,069
|
def compute_top_minus_index_spread(mean_returns,
market_index):
"""
Computes the difference between the mean returns of
two quantiles. Optionally, computes the standard error
of this difference.
Parameters
----------
mean_returns : pd.DataFrame
DataFrame of mean period wise returns by quantile.
MultiIndex containing date and quantile.
See mean_return_by_quantile.
market_index: pd.DataFrame
Returns
-------
mean_return_difference : pd.Series
"""
mean_return_difference = mean_returns.xs(mean_returns.index.levels[0].max(),
level='factor_quantile')-market_index
# tmp=pd.merge(mean_returns.xs(mean_returns.index.levels[0].max(),level='factor_quantile'),
# )
return mean_return_difference
|
8da2a1c0d316bd78f5f6efdc93dfdac6ee81a2e7
| 12,070
|
def pytest_exception_interact(node, call, report):
"""
Set a different exit code on uncaught exceptions.
"""
global unhandled_exit
exctype, value, traceback = call.excinfo._excinfo
if exctype == AssertionError:
return report
unhandled_exit = node.config.getoption('--unhandled-exc-exit-code')
|
0206a848c9324fe40fe1875e18a28f1b0e8f9def
| 12,072
|
def get_counter(data, base):
"""
See setCounters() / getCounters() methods in IJ source, ij/gui/PointRoi.java.
"""
b0 = data[base]
b1 = data[base + 1]
b2 = data[base + 2]
b3 = data[base + 3]
counter = b3
position = (b1 << 8) + b2
return counter, position
|
fa960648edce84a465be3b588b265e14d817067d
| 12,073
|
import sys
import multiprocessing
def _is_spawning_semantics():
"""True if we are on spawning semantics."""
return sys.platform == 'win32' or \
multiprocessing.get_start_method(allow_none=True) == 'spawn'
|
6c69639577e5ef8874d1c8b415901d7f1fa65f97
| 12,074
|
import glob
def read_corpus():
"""
读原始数据
"""
src = []
tgt = []
data_path = glob.glob("./corpus/文言文翻译/*")
for p in data_path:
dir = p.split("/")[:-1]
dir = "/".join(dir)
# print(dir)
name = p.split("/")[-1]
if "翻译" in name:
# 找到了一个翻译文件
tgt_name = name
src_name = name[:-2]
with open(dir + "/" + src_name) as fs:
lines = fs.readlines()
for line in lines:
src.append(line.strip("\n").strip())
with open(dir + "/" + tgt_name) as ft:
lines = ft.readlines()
for line in lines:
tgt.append(line.strip("\n").strip())
else:
pass
return src, tgt
|
cc225268c90be6fbfca59f36b81d2520a65c6f95
| 12,076
|
def M(*args, **kwargs) -> dict:
"""Join objects to dict"""
result = {}
for arg in args:
if isinstance(arg, dict):
result.update(arg)
if hasattr(arg, "__dict__"):
result.update(dict(arg))
return result | kwargs
|
87cd7bfecc746f70e56b39239d66a63a93fe9a1c
| 12,078
|
from typing import Any
def _substitute_none_val(data: Any):
"""Trakt represents null-value as {}. Change it to None."""
if data == {}:
return None
if isinstance(data, list):
data = [_substitute_none_val(v) for v in data]
if isinstance(data, dict):
data = {k: _substitute_none_val(v) for k, v in data.items()}
return data
|
3995f49272117fce66afbf1087e8e4a2240b2392
| 12,079
|
def rivers_with_station(stations):
"""For a list of MonitoringStation objects, return a set of rivers which have a monitoring station"""
rivers = []
for station in stations:
rivers.append(station.river)
return set(rivers)
|
a9af1a64acf0b5dbc89cecad83c3f654e77e74a8
| 12,080
|
def calculate_tax(income):
"""Implement the code required to make this function work.
Write a function `calculate_tax` that receives a number (`income`) and
calculates how much of Federal taxes is due,
according to the following table:
| Income | Tax Percentage |
| ------------- | ------------- |
| <= $50,000 | 15% |
| <= $75,000 | 25% |
| <= $100,000 | 30% |
| > $100,000 | 35% |
Example:
income = 30000 # $30,000 is less than $50,000
calculate_tax(income) # $30,000 * 0.15 = 4500 = $4,500
income = 80000 # $80,000 is more than $75,000 but less than $80,000
calculate_tax(income) # $80,000 * 0.25 = 20000 = $20,000
income = 210000 # $210,000 is more than $100,000
calculate_tax(income) # $210,000 * 0.35 = 73500 = $73,500
"""
tax = 0
if income <= 50000:
tax += (income *.15)
return tax
elif income >= 50000 and income <= 75000:
tax += (income * .25)
return tax
elif income >= 75000 and income <= 100000:
tax += (income * .30)
return tax
elif income >= 100000:
tax += (income * .35)
return tax
|
fce9ad9ac9d88b1821a1772b6cc5bb9259ba7ae5
| 12,081
|
import collections
def frequency(input_list):
"""
Finds the occurances of elements in a list.
Works with numbers not consisting of numbers as well.
:param input_list: list or tuple
>>> frequency([1, 2, 2, 3, 3, 3, 4, 4, 4, 4])
Counter({4: 4, 3: 3, 2: 2, 1: 1})
"""
return collections.Counter(input_list)
|
417dcd493da11f72609e4c4aa3c4edfb35e921aa
| 12,083
|
def array_to_dict(result_array, regions):
"""Convert an array with regions to dict
with region as key
Arguments
---------
result_array : array
Results in region_array
regions : list
List with all regions (order is the same)
Returns
--------
result_dict : dict
reg, value
"""
result_dict = {}
for reg_array_nr, region in enumerate(regions):
result_dict[region] = result_array[reg_array_nr]
return result_dict
|
263e69179983eec525bba9f79f1ed02539e1d647
| 12,084
|
import os
import torch
import re
def get_visible_devices_global_ids():
"""Return the global indices of the visible devices"""
if "CUDA_VISIBLE_DEVICES" not in os.environ:
return list(range(torch.cuda.device_count()))
visible_devices = os.environ["CUDA_VISIBLE_DEVICES"]
visible_devices = re.split('; |, ', visible_devices)
visible_devices = [int(idx) for idx in visible_devices]
return visible_devices
|
c6ca4d2c2eadbef1f4353ac70937cce34734aa03
| 12,085
|
def reverse(start, end):
"""
takes in 2 nodes a start and end
swaps all nodes
iterating over start to end
"""
# set a prev to the end
prev = end
# set a next_starting_node to the start.next
next_starting_node = start.next
# set a current_node
current_node = start.next
# while the current_node is not the end
while current_node != end:
# swap things
# set a temp to current nodes next
temp = current_node.next
# set current nodes next to prev
current_node.next = prev
# set prev to current node
prev = current_node
# set current node to temp
current_node = temp
# set our starting node next to prev
start.next = prev
# return our next starting node to the caller
return next_starting_node
|
a68b32868a6b94bfc856624a2fbebb8d9c8d639a
| 12,086
|
def string_from_uuid(uuid_obj):
"""Returns standard hexadecimal string for uuid; same as str(uuid_obj).
arguments:
uuid_obj (uuid.UUID object): the uuid which is required in hexadecimal string format
returns:
string (40 characters: 36 lowercase hexadecimals and 4 hyphens)
"""
return str(uuid_obj)
|
903f3b257854900f2b534658dbfa4e504812c67b
| 12,087
|
def create_setWeight(col: str, weight: str, lemex: str):
"""
:return str: format of 'setweight(col, weight, lemex)' \n
Check postgresql text search setweight function to see the syntax
"""
return """setweight(%s, '%s', '%s')""" % (col, weight, lemex)
|
965b25a5ccb8f3c68ecc3742a512dd7c28dac0a4
| 12,091
|
import torch
def quat_from_xyz(xyz):
"""
Construct 3D rotation from the imaginary component
"""
w = (1.0 - xyz.norm()).unsqueeze(-1)
assert bool((w >= 0).all()), "xyz has its norm greater than 1"
return torch.cat([xyz, w], dim=-1)
|
df62a104cb2a983095576cc71241869d94c0efd9
| 12,092
|
from typing import List
from typing import Tuple
from typing import Union
def read_expected_file(path: str) -> List[Tuple[Union[str, float], ...]]:
"""Reads and parses resource file."""
types = str, float
with open(path, "r") as file:
expected_fermionic_op = [tuple(t(e) for t, e in zip(types, line.split())) for line in file]
return expected_fermionic_op
|
1d5228edffd8fa2486b41187f4a10b24209e52fc
| 12,094
|
def no_space_conjunction(tag1, tag2):
"""If two tags represent one number and one letter."""
if tag1 == 'L' and tag2 == 'N':
return True
elif tag1 == 'N' and tag2 == 'L':
return True
else:
return False
|
ec3fa6c2aa0f54cc2cd2f92676d932880d572506
| 12,095
|
def get_package_requirements(requirement_type: str):
"""
Load in extra_requirements.json from the package
"""
requirements = {
"numpy": ["core"],
"requests": ["core"],
"pytest": ["test"],
"sphinx-rtd-theme>=0.5.0": ["test"],
"imageio": ["encoders-image-tfhub"],
"scikit-image": ["encoders-image-tfhub"],
"soundfile": ["encoders-audio-tfhub"],
"librosa": ["audio-encoder", "encoders-audio-tfhub"],
"tensorflow": ["encoders-text-tfhub", "encoders-audio-tfhub", "encoders-image-tfhub", "encoders-text-tf-transformers", "encoders-text-tfhub-windows"],
"tensorflow_hub": ["encoders-text-tfhub", "encoders-audio-tfhub", "encoders-image-tfhub", "encoders-text-tfhub-windows"],
"tensorflow_text": ["encoders-text-tfhub"],
"tf-models-official": ["encoders-text-tfhub", "encoders-text-tfhub-windows"],
"bert-for-tf2": ["encoders-text-tfhub", "encoders-text-tfhub-windows"],
"sentence-transformers": ["encoders-text-sentence-transformers"],
"torch>=1.6.0": ["encoders-audio-pytorch", "encoders-text-torch-transformers", "encoders-text-sentence-transformers"],
"fairseq": ["encoders-audio-pytorch"],
"transformers": ["encoders-text-torch-transformers", "encoders-text-tf-transformers"],
"moviepy": ["encoders-video"],
"opencv-python": ["encoders-video"]
}
dependencies = []
for k, v in requirements.items():
if requirement_type in v:
dependencies.append(k)
return dependencies
|
4c3607de3accb145eb7855915dedc61bbba41a90
| 12,096
|
from functools import reduce
def find_common_set_of_column_names(dfs):
"""Returns a sorted list of common columns names."""
cols = [set(df.columns) for df in dfs]
return sorted(list(reduce(set.intersection, map(set, cols))))
|
77cf55ace97fbab3ad2e32c3656f5df883b2db7b
| 12,097
|
def get_long_description():
"""
:return: The description for the plugin type.
"""
return """Infrastructure plugins use different techniques to identify the
remote operating system, HTTP daemon, Web Application Firewalls, remote users
and any other information that is related to the target web application but
is not in its source code.
"""
|
cbf895c9312d9336c33f9afffb7d1113e30ea093
| 12,098
|
def navigation(request):
"""Fetches data required to render navigation menu.
The main menu contains the list of user categories to choose.
Here is the query to fetch them.
"""
context = {}
if hasattr(request, "user_categories"):
context.update(
{
"user_categories": request.user_categories,
}
)
return context
|
5641d35f47d4ae6c7045db81ceaaebae12b541d9
| 12,099
|
def append_filename(filename, impute_dates, moving_average):
"""! Creates consistent file names for all output.
"""
if moving_average > 0:
filename = filename + '_ma' + str(moving_average)
elif impute_dates:
filename = filename + '_all_dates'
return filename
|
3d4137ac023ed1470e20a78626803b29e9c14a0f
| 12,100
|
from typing import Optional
from typing import Union
from typing import Dict
def fix_level_type(
level_type: Optional[Union[str, Dict]],
engine: str = "eccodes",
) -> Optional[Union[str, Dict]]:
"""
Convert level type into dict of GRIB keys.
Notes
-----
``typeOfFirstFixedSurface`` is not working in cfgrib engine.
"""
if level_type is None:
return level_type
if isinstance(level_type, Dict):
return level_type
if level_type == "pl":
if engine == "cfgrib":
return {
"typeOfLevel": "isobaricInhPa"
}
else:
return {
"typeOfFirstFixedSurface": 100,
}
elif level_type == "sfc":
return {
"typeOfLevel": "sfc"
}
elif level_type == "ml":
return {
"typeOfFirstFixedSurface": 131,
# "typeOfSecondFixedSurface": 255,
}
return level_type
|
e9a5a608554fa09d6445b03abc790cb787d06f27
| 12,103
|
def clean_up_whitespace_in_template_output(text):
"""Remove some excess whitespace from using Django templates for YAML."""
ret = []
for line in text.split("\n"):
# Truly empty lines are likely intentional, so keep them
if not line:
ret.append("")
continue
# If the line consists entirely of trailing whitespace, it is likely an
# artifact of template tag formatting, so drop it.
line = line.rstrip()
if not line:
continue
ret.append(line)
text = "\n".join(ret)
if not text.endswith("\n"):
text += "\n"
return text
|
8428e5575f0a731d79ae796d3f20408e33a344e6
| 12,105
|
def _validate_spotlight_mode(the_sicd):
"""
Validate the spotlight mode situation.
Parameters
----------
the_sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
Returns
-------
bool
"""
if the_sicd.CollectionInfo is None or the_sicd.CollectionInfo.RadarMode is None \
or the_sicd.CollectionInfo.RadarMode.ModeType is None:
return True
if the_sicd.Grid is None or the_sicd.Grid.TimeCOAPoly is None:
return True
if the_sicd.CollectionInfo.RadarMode.ModeType == 'SPOTLIGHT' and \
the_sicd.Grid.TimeCOAPoly.Coefs.shape != (1, 1):
the_sicd.log_validity_error(
'CollectionInfo.RadarMode.ModeType is SPOTLIGHT,\n'
'but the Grid.TimeCOAPoly is not scalar - {}.\n'
'This cannot be valid.'.format(the_sicd.Grid.TimeCOAPoly.Coefs))
return False
elif the_sicd.Grid.TimeCOAPoly.Coefs.shape == (1, 1) and \
the_sicd.CollectionInfo.RadarMode.ModeType != 'SPOTLIGHT':
the_sicd.log_validity_warning(
'The Grid.TimeCOAPoly is scalar,\n'
'but the CollectionInfo.RadarMode.ModeType is not SPOTLIGHT - {}.\n'
'This is likely not valid.'.format(the_sicd.CollectionInfo.RadarMode.ModeType))
return True
return True
|
3b72e99e7f6e1bae17fbdced72cfaf741a4d1974
| 12,106
|
def mixed_radix_to_base_10(x, b):
"""Convert the `mixed radix`_ integer with digits `x` and bases `b` to base 10.
Args:
x (list): a list of digits ordered by increasing place values
b (list): a list of bases corresponding to the digits
Examples:
Generally, the base 10 representation of the mixed radix number :math:`x_n\ldots x_1` where :math:`x_i` is a digit in place value :math:`i` with base :math:`b_i` is
.. math::
\sum_{i=1}^nx_i\prod_{j=i+1}^nb_j = x_n + b_nx_{n-1} + b_nb_{n-1}x_{n-2} + \cdots + b_n\cdots b_2x_1
Convert 111 with bases :math:`(b_1,b_2,b_3)=(2,3,4)` to base 10:
>>> from fem.discrete.combinatorics import mixed_radix_to_base_10
>>> mixed_radix_to_base_10([1,1,1], [2,3,4])
17
.. _mixed radix:
https://en.wikipedia.org/wiki/Mixed_radix
"""
res = x[0]
for i in range(1, len(x)):
res *= b[i]
res += x[i]
return res
|
a821ca5ee4a720a9c445c98b2bcd2905bd6d87cb
| 12,107
|
def _pprint_dict(seq, _nest_lvl=0):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
"""
fmt = u"{%s}"
pairs = []
pfmt = u"%s: %s"
for k, v in seq.items():
pairs.append(pfmt % (repr(k), repr(v)))
return fmt % ", ".join(pairs)
|
8d48fde0030785139c29b9739a4e056e2ffd176e
| 12,108
|
def apply_mask_v2(mask: str, address: str) -> str:
"""
DOCTEST
>>> apply_mask_v2(mask='000000000000000000000000000000X1001X', address='000000000000000000000000000000101010')
'000000000000000000000000000000X1101X'
"""
output = "".join([address[i] if mask[i] == "0" else mask[i] for i, _ in enumerate(mask)])
return output
|
1865b59317ed394bea4ea658f1acfd4277521b59
| 12,109
|
def _first_true_index(bools):
"""
Given a pandas Series of bools, returns the index of the first occurrence
of `True`. **Index-based, NOT location-based**
I.e., say x = pd.Series({0: False, 2: False, 4: True}), then
_first_true_index(x) will return index 4, not the positional index 2.
If no value in `bools` is True, returns -1.
"""
if not bools.any():
return -1
i_true = bools.idxmax()
return i_true
|
4d0cc2d2d3f53f5c73653aeafce7b6128a99d204
| 12,110
|
def sum_fibs(limit):
"""Sums even Fibonacci numbers upto `limit`
Args:
limit (int) : The limit the sum will be calculated to
Returns:
int : The sum of even Fibonacci numbers upto `limit`
"""
previous = 0
current = 1
future = 1
result = 0
while True:
future = current + previous
if future < limit:
if future % 2 == 0:
result += future
else:
break
previous = current
current = future
return result
|
7e515e46880240f670bfd47fb54ac72768aac841
| 12,111
|
def Tc(grav):
"""Function to Calculate Gas Critical Temperature in °R"""
#grav gas specific gravity
return 169.2 + 349.5 * grav - 74 * grav ** 2
|
3ad5f19b9868b05b77482db31080107723c3b303
| 12,112
|
def _Backward3a_T_Ps(P, s):
"""Backward equation for region 3a, T=f(P,s)
>>> "%.7f" % _Backward3a_T_Ps(20,3.8)
'628.2959869'
>>> "%.7f" % _Backward3a_T_Ps(100,4)
'705.6880237'
"""
I=[-12, -12, -10, -10, -10, -10, -8, -8, -8, -8, -6, -6, -6, -5, -5, -5, -4, -4, -4, -2, -2, -1, -1, 0, 0, 0, 1, 2, 2, 3, 8, 8, 10]
J=[28, 32, 4, 10, 12, 14, 5, 7, 8, 28, 2, 6, 32, 0, 14, 32, 6, 10, 36, 1, 4, 1, 6, 0, 1, 4, 0, 0, 3, 2, 0, 1, 2]
n=[0.150042008263875e10, -0.159397258480424e12, 0.502181140217975e-3, -0.672057767855466e2, 0.145058545404456e4, -0.823889534888890e4, -0.154852214233853, 0.112305046746695e2, -0.297000213482822e2, 0.438565132635495e11, 0.137837838635464e-2, -0.297478527157462e1, 0.971777947349413e13, -0.571527767052398e-4, 0.288307949778420e5, -0.744428289262703e14, 0.128017324848921e2, -0.368275545889071e3, 0.664768904779177e16, 0.449359251958880e-1, -0.422897836099655e1, -0.240614376434179, -0.474341365254924e1, 0.724093999126110, 0.923874349695897, 0.399043655281015e1, 0.384066651868009e-1, -0.359344365571848e-2, -0.735196448821653, 0.188367048396131, 0.141064266818704e-3, -0.257418501496337e-2, 0.123220024851555e-2]
Pr=P/100
sigma=s/4.4
suma=0
for i in range(33):
suma+=n[i]*(Pr+0.240)**I[i]*(sigma-0.703)**J[i]
return 760*suma
|
90e87dd41a590e4c138165d07e67416706c04aef
| 12,113
|
def sample_normalize(masked_counts, settings):
"""Normalize a UMI count table sample-wise."""
filter_level = settings["copyStableLevel"]
filter_values = settings["copyStableValues"]
col = masked_counts.columns
all_values = col.get_level_values(filter_level)
if filter_values == "none":
filter_values = all_values
indexer = [c in filter_values for c in all_values]
cns_counts = masked_counts.loc[:, indexer]
cns_totals = cns_counts.sum(axis=1)
sample_normalized = masked_counts.div(cns_totals, axis=0)
return sample_normalized
|
8d09bebe40327a6ef492a3ead4624de254b88eeb
| 12,114
|
def _parse_to_last_comment(comments):
"""Unpack to get the last comment (hence the -1) or give '' when there is none"""
return [(c[-1]['comment'] if hasattr(c, '__len__') else '') for c in comments]
|
ecde5f3d6df3278c5ac1e600241c1afd2f553a1b
| 12,116
|
def converte_int_char(inteiro):
"""Converte um inteiro para um caractere segundo a tabela ASCII
com correção do valor do inteiro, segundo enunciado e uso da função chr(), para que:
0 = '_' = , 1 = 'a' = , 2 = 'b', ..., 26 = 'z' e 27 = '.'
Args:
inteiro (int): inteiro a ser convertido em caractere
Returns:
[str]: caractere correspondente ao inteiro
"""
if inteiro == 27:
return '.'
elif inteiro == 0:
return '_'
return chr(inteiro + 96)
|
e7550dec4aa3e4a82729728befcb566e9d8315d0
| 12,117
|
def create_iterable_dataset(torch_transforms_module, pipeline_results):
"""
Create a PyTorch iterable dataset that loads samples from pipeline results.
:param torch_transforms_module: The imported torch.transforms module.
:param pipeline_results: Pipeline results iterator.
:return: Dataset that has valid PyTorch images saved as tensors and density maps.
"""
class PipelineDataset:
def __init__(self):
self.images_and_density_maps = pipeline_results
self.image_transform = torch_transforms_module.Compose([
torch_transforms_module.ToTensor()
])
def __iter__(self):
for image, density_map in self.images_and_density_maps:
yield self.image_transform(image.copy().astype("float32")), density_map.copy().astype("float32")
return PipelineDataset()
|
1de2f8be910da07e1a63e177f59d9bf0467edbe1
| 12,118
|
def dtype(request, device):
"""Run a test case in single and double precision."""
return request.param
|
8196d76b2edd4bd66253b675f4636b77208f7617
| 12,119
|
from io import StringIO
def download_image(session, url):
"""" dumps image into memory """
dl = session.get(url, stream=True)
img = StringIO()
img.write(dl.content)
img.seek(0) # rewind to beginning
return img
|
ed3ab31b0eb3419e34085585aeb742252713be51
| 12,121
|
def get_attr_info(key, convention, normalized):
"""Get information about the MMD fields.
Input
=====
key: str
MMD element to check
convention: str
e.g., acdd or acdd_ext
normalized: dict
a normalized version of the mmd_elements dict (keys are, e.g.,
'personnel>organisation>acdd' or
'personnel>organisation>separator')
Returns
=======
required: int
if it is required
repetition: str ('yes' or 'no')
if repetition is allowed
repetition_str: str
a longer string representation for use in the DMH (basically
a comment)
separator: str
sign for separating elements that can be repeated (e.g., ','
or ';')
default:
a default value elements that are required but missing in the
netcdf file
"""
max_occurs_key = key.replace(convention, 'maxOccurs')
if max_occurs_key in normalized.keys():
max_occurs = normalized[max_occurs_key]
else:
max_occurs = ''
repetition_allowed = 'yes' if max_occurs not in ['0', '1'] else 'no'
min_occurs_key = key.replace(convention, 'minOccurs')
if min_occurs_key in normalized.keys():
required = int(normalized[min_occurs_key])
else:
required = 0
separator_key = key.replace(convention, 'separator')
if separator_key in normalized.keys():
separator = normalized[separator_key]
else:
separator = ''
default_key = key.replace(convention, 'default')
if default_key in normalized.keys():
default = normalized[default_key]
else:
default = ''
repetition_key = key.replace(convention, 'repetition')
if repetition_key in normalized.keys():
repetition_str = normalized[repetition_key]
else:
repetition_str = ''
return required, repetition_allowed, repetition_str, separator, default
|
53e6d389935fead65173c3fd66d3608daa9c7ebc
| 12,122
|
def unpad(x, pad_width):
"""
func to unpad slice (FOR LINUX)
----------
x : ndarray
image array
pad_width : tuple
tuple fed to np.pad
Returns
-------
ndarray
unpadded img
"""
slices = []
for c in pad_width:
e = None if c[1] == 0 else -c[1]
slices.append(slice(c[0], e))
return x[tuple(slices)]
|
bedab5694f287d1ccd84f4240306c9d18cb38b8a
| 12,123
|
import os
def ensurePath(root, dir_names):
"""Creates hierarchy of nested folders starting from root, if they did not exist.
Returns resulting path.
"""
path = root
for dir_name in dir_names:
path += str(dir_name) + "/"
if not os.path.exists(path):
os.makedirs(path)
return path
|
3829c4f17e0b72c06d0130df7ef2d1b35a3dd8db
| 12,124
|
from pkg_resources import resource_filename
def get_ffxml_path():
"""
Return the path where OpenMM ffxml forcefield files are stored in this package.
Returns
-------
path : str
The absolute path where OpenMM ffxml forcefield files are stored in this package
"""
filename = resource_filename('openmmforcefields', 'ffxml')
return filename
|
29b9d3de56560ce6ddd690ffe944fe890c80f0ed
| 12,125
|
def toggle_doors(doors, skip):
"""Toggle every skip door in doors.
doors is an array of elements where a false value represents a
closed door and a true value represents an open door.
If skip is 1, toggle door #1, #2, #3...
If skip is 2, toggle door #2, #4, #6...
Returns doors
"""
for i in range(skip-1, len(doors), skip):
if doors[i]:
doors[i] = 0
else:
doors[i] = 1
return doors
|
7631bbc860c4bfa9a7c236a07ebfdb1092bd351c
| 12,126
|
import math
def presion_vapor_tmed(tmed):
"""
calcula la presión del vapor en saturación a la temperatura media kPa
param: tmed : temperatura media
"""
return 0.6108*math.exp(17.27*tmed/(tmed+237.3))
|
968fc144037e57d688a8f0f5e19a0d7779e5bc9b
| 12,127
|
import subprocess
def dmenu(options, dmenu):
"""Call dmenu with a list of options."""
cmd = subprocess.Popen(dmenu,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = cmd.communicate("\n".join(options).encode("utf-8"))
return stdout.decode("utf-8").strip("\n")
|
be627e15dbb62b017dee2cf707d7810d38d4854e
| 12,128
|
def count_times(func):
"""
Checks how many times a function repeats.
:param func: dictionary in the format (k, v): (name of brodmann area, name of the functions)
"""
times = dict()
for val in func.values():
for v in val:
inc = 1
v = v.split("\t\t")[1]
if v in times:
times[v] += inc
else:
times[v] = inc
return times
|
0080eaf022f1de61cc9e4a06ffec1a87c5cc8980
| 12,131
|
def get_insert_cmd(insert_fields, table, update_cmd=None):
"""
Creates the insert command to use with an insertion to a PostgreSQL database,
given the table and the fields to be inserted
Parameters
----------
insert_fields: list of strs
Fields of data that will be inserted
table: str
Name of the table being inserted into
update_cmd:
Update command to use when there is a conflict on the ID and event keys.
If `None`, then defaults to "DO NOTHING" and no update is performed
"""
if update_cmd is None:
update_cmd = "DO NOTHING"
insert_str = ','.join(insert_fields)
insert_cmd = f"INSERT INTO {table} ({insert_str}) VALUES %s ON CONFLICT (id,event) {update_cmd}"
json_array_fields = {'urls', 'description_urls'}
template_strs = []
for f in insert_fields:
if f in json_array_fields:
s = f"%({f})s::jsonb[]"
else:
s = f"%({f})s"
template_strs.append(s)
template_str = ','.join(template_strs)
template = f"({template_str})"
return insert_cmd,template
|
d026911c0574bc2f8a52ede5bbb72b48d81272f8
| 12,132
|
def serialize(gtp_command, gtp_response):
"""Serialize a GTP response as a string.
Needs the command we are responding to so we can match the sequence
number.
"""
return '%s%s %s\n\n' % (
'=' if gtp_response.success else '?',
'' if gtp_command.sequence is None else str(gtp_command.sequence),
gtp_response.body,
)
|
32412766b3d1db92a8aae653dbf8b11a761e35b1
| 12,133
|
def list_entries():
"""Return entries to be displayed in rofi."""
return([''])
|
76223be8cc86d9b9592edd1849ac1a8862c4d01c
| 12,134
|
import copy
def add_missing_fields(og_layout, tab):
"""
Adds missing fields to the layout.
"""
layout = copy.deepcopy(og_layout)
missing_fields = tab.get_missing_fields(layout.get("content"))
if not missing_fields:
return og_layout
tab_data = tab.get_from_tabs(layout.get("content"))
tab_data['fields'].extend(missing_fields)
return layout
|
040f47606c5b89a52ed9defc4a9dbdda56c4cb2a
| 12,135
|
def next_power_2(x: int) -> int:
"""Return the smallest power of 2 greater than or equal to x"""
return 1 << (x-1).bit_length()
|
f5361250c1a6ef8228adcfd0aeab478484e7239b
| 12,137
|
def security_group_exists(self, sg_id=None, name=None):
"""
Checks if a security group already exists on this connection, by name or by ID.
:param boto.ec2.EC2Connection self: Current connection.
:param string sg_id: ID of the security group to check. Default : None.
:param string name: Name of the security group to check. Default : None.
:return: True if the security group is present, False otherwise
:rtype: bool
"""
if sg_id:
return sg_id in [sg.id for sg in self.get_all_security_groups()]
elif name:
return name in [sg.name for sg in self.get_all_security_groups()]
|
042cd05a550b139b3441613269f59a27ad028ea1
| 12,138
|
def get_approximate_cloud_underside(qc, z_tol=100.0):
"""
Using 3D condensate field `qc` create a 2D array representing the underside
height of the lowest cloudy point per column
"""
z_cloud_underside = qc.zt.where(qc > 0.0).min(dim="zt")
m = z_cloud_underside < z_tol + z_cloud_underside.min()
z_cb = z_cloud_underside.where(m)
return z_cb
|
5605531095b47e9552ca00b852188f4189bf43fe
| 12,140
|
import os
def load_asins_list(folders):
"""folders is a list of strings."""
all_files = []
for folder in folders:
all_files.extend(os.listdir(folder))
asins_list = [asin.replace('.jpg', '') for asin in all_files if '.jpg' in asin]
global tot_count
tot_count = len(asins_list)
return asins_list
|
92b22821860c814efd4e9a33945f0d0c03044001
| 12,141
|
def to_int(x, error=0):
"""Convert argument to int."""
try:
return int(x)
except (ValueError, TypeError):
return error
|
2c363a1d9125e396a76007d9986748b98130e1ab
| 12,142
|
def apply_format(data_frame, column_names, format_method):
"""Apply a formatting function to a DataFrame column and return.
Simplify applying format modifications to the data stored in columns
of `data_frame`. Check if the parameters are of the right type, apply
`format_method` to the columns of `data_frame` whose labels are passed
in `column names`. Return the DataFrame with the applied changes.
Parameters
----------
data_frame : pandas.DataFrame
DataFrame containing the data to be modified.
column_names : list
List of string labels of columns in `data_frame` to be modified.
format_method : function
Function to be applied to the columns of `data_frame`, whose labels
are listed in `column_names`.
Returns
-------
data_frame : pandas.DataFrame
The passed in DataFrame with the formatting changes applied to
its columns.
See Also
--------
pandas.apply
Examples
--------
>>> data = pd.read_csv("data.csv")
>>> print(data[['Wage']][0:3]) #print first few lines
Wage
0 €565K
1 €405K
2 €290K
>>> data = apply_format(data, ['Wage'], money_format)
>>> print(data[['Wage']][0:3])
Wage
0 565
1 405
2 290
"""
for column in column_names:
if isinstance(column, str) and (column in data_frame) and callable(format_method):
data_frame.loc[:, column] = data_frame[column].apply(format_method)
return data_frame
|
af8f09d57e1f48da79c576ae542bfc5cc6cd837b
| 12,143
|
def _GenerateInputs(emitter, registers, lanes_count, input_address, stride):
"""."""
inputs = []
last_address_register = input_address
for i in range(lanes_count):
if not i:
inputs.append(input_address)
else:
address_register = registers.GeneralRegister()
inputs.append(address_register)
emitter.EmitAdd(address_register, last_address_register, stride)
last_address_register = address_register
return inputs
|
3344826f6d36ff5a9e689e63693ba6e556ede47c
| 12,146
|
def isascii(s):
"""Return True if there are no non-ASCII characters in s, False otherwise.
Note that this function differs from the str.is* methods in that
it returns True for the empty string, rather than False.
>>> from bridgedb.util import isascii
>>> isascii('\x80')
False
>>> isascii('foo\tbar\rbaz\n')
True
>>> isascii('foo bar')
True
:param str s: The string to check for non-ASCII characters.
"""
return all(map((lambda ch: ord(ch) < 128), s))
|
fca7863de21639818e074507e016574a957ff250
| 12,148
|
def safe_execute(default, exception, function, *args):
"""
Inline Try/Except
Parameters
----------
default : Object
value returned in case of failure
exception : Exception
type of exception you want to catch
function : function
the function to execute
args
argument(s) of the function
>>> def foo(x,y):return x/y
>>> safe_execute("What did you expect !",ZeroDivisionError,foo,12,0)
'What did you expect !'
>>> safe_execute("What did you expect !",ZeroDivisionError,foo,12,3)
4
"""
try:
return function(*args)
except exception:
return default
|
aae61410c96741985cc2e03786bbc5c69ad80fa8
| 12,149
|
import time
def create_task_id():
"""
Creates new task ID. It is probably not too safe, but now it is current time with big precision.
It will be refactored later.
"""
return str(int(round(time.time() * 10**9)))
|
641b9cd2715c1ef065f0e1a8c71c09a8a880dbe2
| 12,150
|
def voltage_evq(energy,charge):
"""Usage: voltage_evq(energy, charge)"""
return energy/charge
|
d8a347cd69e9a822bf8ac7c91437b60cabf2bb33
| 12,151
|
import os
import errno
def is_process_running(pid: int):
"""Check if there is a running process with `pid`."""
try:
os.kill(pid, 0)
return True
except OSError as ex:
if ex.errno == errno.ESRCH:
return False
else:
raise
|
95c4063beabb957e80f1d5ebec1556689c0b3330
| 12,152
|
def linear(x: float, target: float, span: float, symmetric = False) -> float:
"""Create a linearly sloped reward space.
Args:
x (float): Value to evaluate the reward space at.
target (float): The value s.t. when x == target, this function
returns 1.
span (float): The value s.t. when x >= target + span, this function returns
0. symmetric (bool, optional): If true, then this function works if x is
over or under target. Defaults to False.
Returns:
float: A value between 0 and 1. x == target evaluates to 1, x >= target +
span evaluates to 0. Every value in between is evalulated as the linear
interpolation between `target` and `span`.
"""
if span == 0:
return 1. if x == target else 0.
x_delta = x - target
if abs(x_delta) > abs(span):
return 0.
ratio = x_delta / span
if not symmetric and ratio < 0:
return 0
return 1 - abs(ratio)
|
b443d42ec2686830668d1287db9a8e8fda3f5df1
| 12,154
|
def parse_flask_rule(rule: str):
"""Parses a flask rule (URL), and returns an openapi compatible version of the url"""
parsed_rule = []
index = -1
while index < len(rule):
index += 1
if index > len(rule) - 1:
break
char = rule[index]
if char != "<":
parsed_rule.append(char)
continue
# skip '<'
# only interested in variable name after ':'
variable_name = ["{"]
index += 1
cs = False # colon seen flag
char = rule[index]
while char != ">":
if cs:
variable_name.append(char)
elif char == ":":
cs = True
index += 1
char = rule[index]
variable_name.append("}")
parsed_rule += variable_name
return "".join(parsed_rule)
|
bb4b788887d4f3336d88e4d1d81bab04fa37f67a
| 12,156
|
def first_non_empty(items):
"""
Return first non empty item from the list. If nothing is found, we just
pick the first item. If there is no item, we return the whole form
(defaulting to []).
"""
if items:
for it in items:
if it:
return it
return items[0]
else:
return items
|
93dce672b5a28c094b8916c535aac2ae3622e890
| 12,159
|
def add_2_numbers(a, b):
"""
Assume your function returns the addition of 2 numbers
"""
return a + b
|
fbc3a89fb16334594914ce9cdd388d240effc7ea
| 12,160
|
def determine_img_center(img, center_left=True, center_up=True, center=100):
"""
Determines the center of an image.
Args:
img: An opencv instance of an image.
center_left: A boolean to determine whether the you should push the
center width to the left or push it to the right. If the value
is true, the value will be pushed to the left. If it is false,
then it will be pushed to the right.
center_up: A boolean to determine whether the you should push the
center of the image height to the up or down. If the value is true,
the value will be pushed to up. If it is false, then it will be
pushed down.
Returns:
A tuple, representing the center of the image height and the center of
the image width.
"""
if img.shape[0] % 2 == 0:
center_height = img.shape[0]/2
elif center_up == True:
center_height = img.shape[0]-1/2
else:
center_height = img.shape[0]+1/2
if img.shape[1] % 2 == 0:
center_width = img.shape[1]/2
even_height = True
elif center_left == True:
center_width = img.shape[1]-1/2
else:
center_width = img.shape[1]+1/2
half_center = center/2
first_dim_idx = center_height-half_center, center_height+half_center
sec_dim_idx = center_width-half_center, center_width+half_center
return first_dim_idx, sec_dim_idx
|
9c567a0b398d106089858553b502477b3195d7e4
| 12,162
|
def numba_update_requestable(requestable, r_ct, c_ct, c_f, c_cf, c_dt):
"""
calculates: requestable *= (1 - request)
"""
for a, b, c, d, e in zip(r_ct, c_ct, c_f, c_cf, c_dt):
requestable[a, b, c, d, e] = 0
return requestable
|
d1cf9c3a8c07fdfb4cab4d12733d1900e31ffd6a
| 12,163
|
def compute_bid(task, fog_node, phi_CPU=1.0, phi_RAM=1.0, phi_DISK=1.0):
"""Calculate the bid for the current node for the current task.
Args:
:param task (dataframe): A dataframe containing information about the current task.
:param fog_node (series): A series containing information about the current node.
:param phi_CPU (double): A price parameter used to determin the CPU price of each fog node.
:param phi_RAM (double): A price parameter used to determine the RAM price of each fog node.
:param phi_DISK (double): A price parameter used to determine the DISK price of each fog node.
Returns:
[int]: The bid of the current fog node.
"""
CPU_valuation = phi_CPU * task['CPU']
RAM_valuation = phi_RAM * task['RAM']
DISK_valuation = phi_DISK * task['DISK']
task_fog_perspective_valuation = (CPU_valuation + RAM_valuation + DISK_valuation)
###############################
# GENERATE TASK VALUATION HERE#
#
# 1. Occupancy rate:
# Multiply phi with the occupancy ratio of the node for that task: phi * task_base_valuation_value / (fog_node.iloc['CPU'] + fog_node.iloc['RAM'] + fog_node.iloc['DISK'])
#
# 2.
###############################
# task_fog_perspective_valuation = task_base_valuation_value / \
# (fog_node['CPU'] + fog_node['RAM'] + fog_node['DISK'])
return task['valuation_coefficient'] - task_fog_perspective_valuation
|
5e4560454fd709d3e63a3c165da84ee71dd13a49
| 12,164
|
from typing import Callable
def get_piecewise_rollout(start_time: float, end_times: list, vaccination_rates: list) -> Callable:
"""
Turn the vaccination rates and end times into a piecewise roll-out function.
"""
def get_vaccination_rate(time, computed_values):
if time > start_time:
idx = sum(end_times < time)
if idx < len(vaccination_rates):
return vaccination_rates[idx]
return 0.
return get_vaccination_rate
|
fcea805aa563efe7cd6b2dd5b38b30d039cd7027
| 12,165
|
def analysis_kindex(analysis):
"""
Returns a dictionary of keywords by (namespace, name).
'kindex' stands for 'keyword index'.
"""
return analysis.get("kindex", {})
|
7f8c48cf0b2acdd28466827cea9ca512e30479d8
| 12,166
|
import pathlib
import os
import re
def get_long_description() -> str:
"""Converts relative repository links to absolute URLs
if GITHUB_REPOSITORY and GITHUB_SHA environment variables exist.
If not, it returns the raw content in README.md.
"""
raw_readme = pathlib.Path("README.md").read_text()
repository = os.environ.get("GITHUB_REPOSITORY")
sha = os.environ.get("GITHUB_SHA")
if repository is not None and sha is not None:
full_url = f"https://github.com/{repository}/blob/{sha}/"
return re.sub(r"]\((?!https)", "](" + full_url, raw_readme)
return raw_readme
|
261b653af247ee9ca5f44c4c2e76ca5efb4b11bd
| 12,167
|
from typing import Tuple
import argparse
import json
def get_args() -> Tuple[dict, str]:
""" get_args.
Return:
Tuple[dict,str]: Args and name of file used
"""
parser = argparse.ArgumentParser()
parser.add_argument("config_file", help="Name of configuration file")
args = parser.parse_args()
print(f"Loading experiment from: {args.config_file}\n")
args_new = json.load(open(args.config_file, "r"))
return args_new, args.config_file
|
212ff3016a35e91c5ed4571090322b7b3606f880
| 12,168
|
def dst_main_directory(library: str) -> str:
"""
Main directory for report files resulting from
the reconciliation process.
"""
return f"./files/{library}"
|
cea4ea7bcd2c37fd97302cb02185ac2090413a4d
| 12,172
|
from pathlib import Path
def find_file(search_file: str, search_path: str):
"""This api find the file in sub-directories and returns
absolute path of file, if file exists"""
for File in Path(search_path).glob(f"**/{search_file}"):
return File
|
a776bb599c60d4bd532cdd05fe5e2b8b9ef5ccf0
| 12,175
|
import torch
def compute_scores(users_embeds: torch.Tensor, items_embeds: torch.Tensor, items_bias: torch.Tensor) -> torch.Tensor:
"""
Args:
users_embeds(torch.Tensor): shape (batch_size, items_total, embed_dim)
items_embeds(torch.Tensor): shape (items_total, embed_dim)
items_bias(torch.Tensor): shape (items_total)
Returns:
scores(torch.Tensor): shape (batch_size, items_total)
"""
scores = (users_embeds * items_embeds).sum(-1) + items_bias
return scores
|
8528963a23efef270b467ec6f039b5a8733d3b4f
| 12,176
|
def max_height(target):
"""
Solved using this racionale:
ignore x -- it's independent and can adapt to any value we specify for each starting y velocity (vy)
with positive vy necessary for reaching height, vy will be -vy when y is 0 again
so, the heighest initial velocity is reaching the minimum y target in one step, or vy = -min_y
the height for this vy is vy + vy-1 + vy-2 + ... + vy-(vy-1) = sum(1..vy) = vy(vy - 1) / 2
"""
min_y = -target[1][1]
return min_y * (min_y - 1) // 2
|
feeba57a4abaf7359ff2bf32a6b95ebe818c2d62
| 12,177
|
def extract_fplus_include_file(code_line):
"""
Extracts the included file path from an include statement
"""
return (
code_line
.replace("#include <fplus/", "")
.replace("#include \"", "")
.replace("\"", "")
.replace(">", "")[:-1]
)
|
c969fcb633332b05aec93f2998065d34b2d99c2f
| 12,178
|
def merge_sort(lst):
"""Merge Sort."""
if len(lst) <= 1:
return lst
mid = len(lst) // 2
left = lst[mid:]
right = lst[:mid]
left = merge_sort(left)
right = merge_sort(right)
output = []
while left and right:
if right[0] < left[0]:
output.append(right.pop(0))
elif left[0] < right[0]:
output.append(left.pop(0))
return output + left + right
|
16bb2351efc133c11e0b6d51b7dae908260c0422
| 12,179
|
def check_def_has_docstring(def_docstring, context, is_script):
"""Exported definitions should have docstrings.
...all functions and classes exported by a module should also have
docstrings. Public methods (including the __init__ constructor)
should also have docstrings.
"""
if is_script:
return # assume nothing is exported
def_name = context.split()[1]
if def_name.startswith('_') and not def_name.endswith('__'):
return # private, not exported
if not def_docstring:
return 0, len(context.split('\n')[0])
if not eval(def_docstring).strip():
return True
|
d9bb7ff278102e78505d77b28eb42010d7a13338
| 12,180
|
def hello():
""" Return a greeting.
"""
return "Greetings from Python!"
|
871175e50ee4e2d2ebd33829f1dff568cdfd0bb7
| 12,181
|
from typing import Tuple
import subprocess
import shlex
import re
def zsh_version() -> Tuple[int, ...]:
"""Try to guess zsh version, return (0, 0) on failure."""
try:
out = subprocess.run(shlex.split('zsh --version'), check=True,
stdout=subprocess.PIPE).stdout
except (FileNotFoundError, subprocess.CalledProcessError):
return (0, 0)
v_match = re.search(br'[0-9]+\.[0-9]+', out)
return tuple(map(int, v_match.group(0).split(b'.'))) if v_match else (0, 0)
|
5e1ca9c55b005864d8eebff90f1bc15c74de82c0
| 12,182
|
def split_byte(x):
"""Split byte into groups of bits: (2 bits, 3 bits, 3 bits)"""
return x >> 6, x >> 3 & 7, x & 7
|
7743fdf78c201dce66803ae0eb62cbdf58cccc7d
| 12,183
|
def total_histogram_diff(pixel_diff):
"""
Sums up all histogram values of an image. When used with the black/white pixel-diff image
this gives the difference "score" of an image.
:param pixel_diff: the black/white image containing all differences (output of imagecompare.pixel_diff function)
:return: the total "score" of histogram values (histogram values of found differences)
"""
return sum(i * n for i, n in enumerate(pixel_diff.histogram()))
|
93675cafcb82513513ddd5d078ab91e36c52de27
| 12,184
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.