content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def rescale_fidelity(fidelity, floor_fidelity, new_floor_fidelity):
"""
Linearly rescales our fidelities to allow comparisons of fidelities across benchmarks
fidelity: raw fidelity to rescale
floor_fidelity: threshold fidelity which is equivalent to random guessing
new_floor_fidelity: what we rescale the floor_fidelity to
Ex, with floor_fidelity = 0.25, new_floor_fidelity = 0.0:
1 -> 1;
0.25 -> 0;
0.5 -> 0.3333;
"""
rescaled_fidelity = (1-new_floor_fidelity)/(1-floor_fidelity) * (fidelity - 1) + 1
# ensure fidelity is within bounds (0, 1)
if rescaled_fidelity < 0:
rescaled_fidelity = 0.0
if rescaled_fidelity > 1:
rescaled_fidelity = 1.0
return rescaled_fidelity | e00cc772f24ee78b1af3ceeffb7520fc7ac1dec9 | 102,054 |
from typing import List
from typing import Union
from typing import Optional
def linear_search(items: List, target: Union[int, str]) -> Optional[int]:
"""Returns the index position of the target if found, else None"""
for index, value in enumerate(items):
if items[index] == target:
return index
return None | 1bb8ae4ce7831790bd439a38ebee0ea247701f42 | 102,057 |
def is_migration_initial(migration):
"""Return whether a migration is an initial migration.
Initial migrations are those that set up an app or models for the first
time. Generally, they should be limited to model creations, or to those
adding fields to a (non-migration-aware) model for the first time. They
also should not have any dependencies on other migrations within the same
app.
An initial migration should be able to be safely soft-applied (in other
words, ignored if the model already appears to exist in the database).
Migrations on Django 1.9+ may declare themselves as explicitly initial
or explicitly not initial.
Args:
migration (django.db.migrations.Migration):
The migration to check.
Returns:
bool:
``True`` if the migration appears to be an initial migration.
``False`` if it does not.
"""
# NOTE: The general logic here is based on the checks done in
# MigrationExecutor.detect_soft_applied.
# Migration.initial was introduced in Django 1.9.
initial = getattr(migration, 'initial', None)
if initial is False:
return False
elif initial is None:
# If the migration has any dependencies within the same app, it can't
# be initial.
for dep_app_label, dep_app_name in migration.dependencies:
if dep_app_label == migration.app_label:
return False
return True | 13c452f6d1fbbff9ca274425907ee5d0c4c38ee1 | 102,058 |
def tidy_correlation(df, columns=None, keep_identity_correlations=False, abs_val_correlations=False):
"""Given a dataframe and optionally subset of columns, compute correlations
between all features. Enable keep_identity_correlations for including the correlation
between a feature and itself. Enable abs_val_correlations to add a column for absolute value of
correlation value.
"""
df_copy = df.copy()
if columns:
df_copy = df_copy[columns]
df_copy = (df_copy.corr()
.reset_index()
.melt(id_vars='index', var_name="Paired_Feature", value_name="Correlation")
.rename(columns={'index':'Base_Feature'})
.sort_values(by='Correlation', ascending=False)
)
if abs_val_correlations:
df_copy['Correlation_Absolute_Value'] = df_copy['Correlation'].abs()
df_copy = df_copy.sort_values(by='Correlation_Absolute_Value', ascending=False)
if keep_identity_correlations:
return df_copy
else:
return df_copy.query('Base_Feature != Paired_Feature') | b45a366ad4c77c200e15dc1859b401c6fdf5cec0 | 102,059 |
def get_private_member(element, key):
"""
This filter returns a private member of an element
:param element: The requested object
:type element: object
:param key: The key of the private variable (without the leading underscore)
:type key: str
:return: The value of the private variable
:rtype: object
"""
return element[f"_{key}"] | 090165e1c0935035e3c3b6fac7bcb48b3c9f97cf | 102,062 |
def get_wei(chain, accounts):
""" Returns the wei for each address in `accounts`
:param chain: populus chain interface
:param accounts: List of adresses
:return: List of weis
"""
web3 = chain.web3
weis = []
for irun, account in enumerate(accounts):
wei = web3.eth.getBalance(accounts[irun])
weis.append(wei)
return weis | 6150b93ca1f11610b864a54e95c4ca9d2ad04d8f | 102,064 |
def gen7CipherList(birthYear):
"""Takes an integer birthyear and returns a sorted list of possible 7th
CPR digit
Input:
birthYear, int, an integer indicating year of birth
Output:
poss7Cipher, list of str[1], ordered list of possible 7th cipher.
Empty list if birthYear is out of range.
"""
#Note: While one can speculate how CPR numbers will be extended to years
#beyond 2057, it is currently not defined. I therefor opted for this simple
#implementation.
if birthYear >= 1858 and birthYear < 1900:
return ['5','6','7','8']
elif birthYear >= 1900 and birthYear < 1937:
return ['0','1','2','3']
elif birthYear >= 1937 and birthYear < 2000:
return ['0','1','2','3','4','9']
elif birthYear >= 2000 and birthYear < 2037:
return ['4','5','6','7','8','9']
elif birthYear >= 2037 and birthYear < 2057:
return ['5','6','7','8']
else:
print("Warning. The birthyear", str(birthYear), "is outisde the covered range from 1858 to 2057. Returning empty list.")
return [] | 1d8204099e5a93810efcabe1f484f1b10fa15234 | 102,069 |
def find_offset(text, call):
"""Takes in a text and function call and finds the start of call within text"""
text_list = text.split('\n')
offset = 0
for l in range(call.lineno - 1):
# add 1 to account for newline characters
offset += len(text_list[l]) + 1
offset += call.col_offset
return offset | 71cd3ce41d9b6bb8e7f35b83fead96acc0c775fd | 102,073 |
def initialisethisboard(boardtype):
"""Creates the board to play on. Just calls the class to initalise it."""
board = boardtype()
# board[53] = pieces.PawnPiece('white')
#
# board[56] = pieces.KingPiece('black')
# board[45] = pieces.KingPiece('white')
board.setupnormalboard()
return board | 1f834e7729eb6cefc9854eeaec40eaf37a33285d | 102,076 |
def find_legal_form(legal_forms, name):
"""Find name if it contains any of legal forms
Args:
legal_forms (list[str]): ['株式会社', '有限会社', '合同会社']
name (str): 'TIS株式会社'
Return:
if find, ['株式会社']
else, []
"""
for legal_form in legal_forms:
if legal_form in name:
return legal_form
return '' | ef5e41ac72bdf89d6fd7f1441c59d9df5a90d4f0 | 102,077 |
def _coerce_client_id(client_id):
"""
Ensure the provided client ID is a byte string. If a text string is
provided, it is encoded as UTF-8 bytes.
:param client_id: :class:`bytes` or :class:`str` instance
"""
if isinstance(client_id, type(u'')):
client_id = client_id.encode('utf-8')
if not isinstance(client_id, bytes):
raise TypeError('{!r} is not a valid consumer group (must be'
' str or bytes)'.format(client_id))
return client_id | 8f17f9eda24a0692bef77849ef409d8991ab5487 | 102,081 |
import re
def hasReNumbers(inputString):
""" Return true if inputString contains numbers. Using regex."""
return bool(re.search(r'\d', inputString)) | e0ecf16f9d0316092b2bb583c7fe08e88ae5caf6 | 102,086 |
import math
def bin_concat(int_1, int_2, int_1_byte=None, int_2_byte=None):
"""Concatenate two integer.
Steps:
1. Zero fill integers according to the given byte.
2. Concatenate two integers in binary.
Parameters
----------
int_1 : int
Left partition.
int_2 : int
Right partition.
int_1_byte : int, optional
If None is given, int_1 will be zero filled to the number which can be devided exactly by 8,
otherwise int_1 will be zero filled to int_1_byte*8, by default None.
int_2_byte : int, optional
If None is given, int_2 will be zero filled to the number which can be devided exactly by 8,
otherwise int_2 will be zero filled to int_2_byte*8, by default None.
Returns
-------
int
Concatenated value.
"""
int_1_bin = bin(int_1)[2:]
int_2_bin = bin(int_2)[2:]
if int_1_byte is not None:
int_1_bin = int_1_bin.zfill(int_1_byte * 8)
else:
int_1_bin = int_1_bin.zfill(math.ceil(len(int_1_bin) / 8) * 8)
if int_2_byte is not None:
int_2_bin = int_2_bin.zfill(int_2_byte * 8)
else:
int_2_bin = int_2_bin.zfill(math.ceil(len(int_2_bin) / 8) * 8)
return int(int_1_bin + int_2_bin, base=2) | 965dfcd120f6522e66119f89910b097911c135ae | 102,087 |
def apply_precip_ceiling(ds, ceiling):
"""
Converts all precip values above a threshold to the threshold value, uniformly across space and time.
Parameters
----------
ds : xr.Dataset
ceiling : int or float
Returns
-------
xr.Dataset
"""
ds_corrected = ds.where(ds <= ceiling, ceiling)
return ds_corrected | fdadb03cc322907bb0b4d845ca6486e8ce060f62 | 102,089 |
def first(nodes):
"""
Return the first node in the given list, or None, if the list is empty.
"""
if len(nodes) >= 1:
return nodes[0]
else:
return None | bf4911ea6db38cf3dede4bd050d8cf4f7bf92e1c | 102,090 |
def expectedCellId(data):
""" Returns the expected cell index """
if not data[0] or data[1]<0:
return -1
return data[2] | 14e5d80c68d13ce5c19b6c9ab8edde236c6c8c39 | 102,091 |
def extract_relationtypes(urml_xml_tree):
"""
extracts the allowed RST relation names and relation types from
an URML XML file.
Parameters
----------
urml_xml_tree : lxml.etree._ElementTree
lxml ElementTree representation of an URML XML file
Returns
-------
relations : dict of (str, str)
Returns a dictionary with RST relation names as keys (str)
and relation types (either 'par' or 'hyp') as values
(str).
"""
return {rel.attrib['name']: rel.attrib['type']
for rel in urml_xml_tree.iterfind('//header/reltypes/rel')
if 'type' in rel.attrib} | 0b910fd1a6e3b6fc2674b1b021d250d54d14faea | 102,092 |
def is_palindrome(number):
"""
Check if a number is a palindrome.
:param number: The int to check.
:returns: True if the number is a palindrome, else False.
"""
number_string = str(number)
reversed_number = ''.join(reversed(number_string))
return number_string == reversed_number | a1842ec14f095adc6d5c3ea6c1e7e137e457adc7 | 102,100 |
def time_float_to_text(time_float):
"""Convert tramscript time from float to text format."""
hours = int(time_float/3600)
time_float %= 3600
minutes = int(time_float/60)
seconds = time_float % 60
return f'{hours}:{minutes:02d}:{seconds:05.2f}' | 829861b7ad45d587cc636932fd315e8623e412d7 | 102,101 |
from typing import Dict
from pathlib import Path
def find_csv_file(directory: str) -> Dict[str, Dict[str, str]]:
"""
Finds the priorirization.csv file in the input directory and uses the
directory names to determine which method was used. This information is
saced in a dictionary as:
{phenotype : {method : path/to/file}}
"""
file_dict = {}
for path in Path(directory).rglob('prioritization.csv'):
full_path = str(path)
(name, method, __, __) = path.parts[-4:]
name = name[8:] # 'CELLECT-' is 8 long
method = method[8:]
if name not in file_dict:
file_dict[name] = {}
file_dict[name].update({method: full_path})
return file_dict | 2feea54f20582a40b75c58c526741d84af2dbb6e | 102,105 |
def verbatim(self, start):
"""
Find end of verbatim
Arg:
self(Builder): Code constructor
start (int): current position in code
Returns:
int: index location of end of verbatim
"""
if self.code[start:start+3] != "___":
self.syntaxerror(start, "verbatim start")
return self.code.find("\n", start)-1 | fec9d418c09a9687ea4668fb68e65dac3ab0623e | 102,111 |
def _CheckTestHarnessImageEntityAndApiMessageEqual(entity, message):
"""A helper method to check equility.
Args:
entity: an instance of datastore_entities.TestHarnessImageMetadata.
message: an instance of api_messages.TestHarnessImageMetadataMessage.
Returns:
A bool, whether the entity and message are considered equal.
"""
return (entity.repo_name == message.repo_name and
entity.digest == message.digest and
entity.test_harness == message.test_harness and
entity.test_harness_version == message.test_harness_version and
entity.current_tags == message.tags and
entity.create_time == message.create_time) | 9782d72be5ff963ef89bb50b2bcc372158943b95 | 102,116 |
from pathlib import Path
import gzip
def _open_xdf(filename):
"""Open XDF file for reading."""
filename = Path(filename) # convert to pathlib object
if filename.suffix == ".xdfz" or filename.suffixes == [".xdf", ".gz"]:
f = gzip.open(filename, "rb")
else:
f = open(filename, "rb")
if f.read(4) != b"XDF:": # magic bytes
raise IOError(f"Invalid XDF file {filename}")
return f | 4eea5729fe888a59fff39416bcdf0123bf7805fa | 102,117 |
def objectsNotNone(*objs):
"""
Checks if a tuple of objects is None.
@param *objs: Tuple of objects to check.
@return: False, if one object is None, else True.
"""
for obj in objs:
if obj is None:
return False
return True | 141b856744d0a4c12de4e2592335d1a5cf2157df | 102,119 |
def checkIfErrorJSONResponse(retval):
"""
Checks if the JSON returned is of the calss ErrorJSONReponse
"""
return retval.__class__.__name__ == "ErrorJSONResponse" | 587684a125f1db9cf11bd985fd26d83023cba5b5 | 102,120 |
def remove_blank_lines(player_data_list):
"""Remove blank lines.
Args:
player_data_list: player data list
Returns:
list with blank lines removed
"""
return [year for year in player_data_list if year[0] != ''] | 3de775bbbe578e38e8e2243baf614525e66e50e9 | 102,123 |
def pretty_org_name(org_name):
"""Convert e.g. "homo-sapiens" to "Homo sapiens"
"""
first_letter = org_name[0].upper()
return first_letter + org_name[1:].replace("-", " ") | 86afe1f48d6dd7b62435ef133a7079fb7b61ce8d | 102,125 |
def make_subsequences(x, y, step=1, max_len=2 ** 31):
"""
Creates views to all subsequences of the sequence x. For example if
x = [1,2,3,4]
y = [1,1,0,0]
step = 1
the result is a tuple a, b, where:
a = [[1],
[1,2],
[1,2,3],
[1,2,3,4]
]
b = [1,1,0,0]
Note that only a view into x is created, but not a copy of elements of x.
Parameters
----------
X : array [seq_length, n_features]
y : numpy array of shape [n_samples]
Target values. Can be string, float, int etc.
step : int
Step with which to subsample the sequence.
max_len : int, default 2 ** 31
Step with which to subsample the sequence.
Returns
-------
a, b : a is all subsequences of x taken with some step, and b is labels assigned to these sequences.
"""
r = range(step-1, len(x), step)
X = []
Y = []
for i in r:
start = max(0, i - max_len)
stop = i+1
X.append(x[start:stop])
Y.append(y[i])
return X, Y | 6b3b14de8548fa1ec316273a48eb22ebc975021c | 102,130 |
import re
def _IsLocation(zone_or_location):
"""Returns whether "zone_or_location" is a location."""
return re.match(r'[a-z]+[0-9]?$', zone_or_location) | f68c5d7923606bcba6e3804cdc8af5f08929e581 | 102,131 |
def region_append_without_whitespace(book, rclass, start, end, *extra):
"""
Shrink the region (start, end) until there is no whitespace either end of
the region. Then if it is non-zero, append the region to (rclass)
Return true iff a region is added.
"""
if start is None:
return
if start >= len(book['content']):
return (-1, -1) + extra # Outside the range, return something that will get ignored
while book['content'][start].isspace():
start += 1
if start >= len(book['content']):
# Fallen off the end of the book, this isn't a useful region
return
while book['content'][end - 1].isspace():
end -= 1
if end < 0:
# Fallen off the start of the book, this isn't a useful region
return
if end > start:
if rclass not in book:
book[rclass] = []
book[rclass].append((start, end) + extra)
return True
return False | 9ea1ff1425c65c94da585c23cc63f63232d924e8 | 102,133 |
import math
def distance(p1,p2):
"""
Computes distance between two points.
"""
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x2 - x1)**2 + (y2 - y1)**2) | 3311dc9f5ce58c165b7b3edbd47b16e01f899edd | 102,137 |
def get_sample_content(filename):
"""Return sample content form file."""
with open("tests/xml/{filename}".format(filename=filename),
encoding="utf-8") as file:
return file.read() | b71308098c1ea56dd02059851729ba7133dc8212 | 102,139 |
def validate(lat, lon):
"""Validate the lat and lon values,
return bool for validity
"""
try:
lat_in_ak_bbox = 51.229 <= float(lat) <= 71.3526
lon_in_ak_bbox = -179.1506 <= float(lon) <= -129.9795
valid = lat_in_ak_bbox and lon_in_ak_bbox
except ValueError:
valid = False
return valid | 34479e72de18106f49e6be93eef442be76abba07 | 102,145 |
import re
def _get_async_response_id(text):
"""Extract async response message id.
Arguments:
text {str} -- text that may contain async response id (e.g PENDING gn4RLF8M )
Raises:
ValueError -- [description]
Returns:
str -- async response id
"""
text = text.strip()
matches = re.findall("PENDING (\w+)", text)
if not matches:
raise ValueError("{} doesn't contain async response id".format(text))
return matches[0] | 709c9a1d7261b7563b95656452a8623afd495451 | 102,149 |
def upcase(string):
"""Returns a copy of `string` with all the alphabetic characters converted
to uppercase.
:param string: string to uppercase.
"""
return string.upper() | 9a7fa6b00f712b2a4bc6a97c3c64c31181e28aed | 102,151 |
import typing
def alignment_to_fasta(alignment: typing.Dict[str, str]) -> str:
"""
Convert alignment dict to fasta format
Parameters
----------
alignment
dict of key: aligned sequence
Returns
-------
fasta-formatted string
"""
fasta = []
for key, sequence in alignment.items():
fasta.append(f">{key}\n{sequence}")
return "\n".join(fasta) | 143e363836d65e8ee79336c4586d4da864c5ba4b | 102,154 |
def fahr_to_kelv(temperature):
"""
Takes a temperature `temp` in fahrenheit and returns it in Kelvin
"""
kelvin = 5./9. * (temperature - 32.) + 273.15
return kelvin | ea4e1042365cb2445018f6508eda0b3a1547ab62 | 102,157 |
def readFile(name):
"""
Reads a text file.
Args:
name (str): The name of the file
Returns:
str: The content of the file.
"""
with open(name, "rt") as f:
return ''.join(f) | 9d43adb20f30187c7c702543317bdd3e439ef6ed | 102,161 |
def get_table_meta(source_name, config):
"""Find and return table meta from source_name."""
if "_A_" in source_name:
td = config['Annex']
else:
td = config['Tables']
for chapter in td.keys():
for k, v in td[chapter].items():
if source_name.endswith(k.replace("-", "_")):
return v | b0c6a45b8c1b8ffef66f07fd01113ae496de6eb1 | 102,162 |
def trusted_division(a, b):
"""
Returns the quotient of a and b
Used for testing as 'trusted' implemntation of division.
"""
return a * 1.0 / b | e544c5b617cd559e207aa1d8c2440b4f88dc056a | 102,167 |
def _gc(seq: str) -> float:
"""Return the GC ratio of a sequence."""
return float(seq.count("G") + seq.count("C")) / float(len(seq)) | b5ace34e7239ad6a02dfca552d1b177db77ed77e | 102,168 |
def compare(days, var_A=0, var_B=0, rw_pool_A=0, rw_pool_B=0, rw_pool_AB=0, fees_AB=0):
"""Compare for 2 assets, buy&hold strategy with separate staking and farming by liquidity pool providing.
Considering: impermanent loss, fees earned and farming/staking rewards
Args:
days (int): days for strategy
var_A (float, optional): Percentual variation for A token. Ex 10 for 10%
var_B (float, optional): Percentual variation for B token. Ex 10 for 10%
rw_pool_A (float, optional): Percentual rewards per day for one asset pool (Token A)
rw_pool_B (float, optional): Percentual rewards per day for one asset pool (Token B)
rw_pool_AB (float, optional): Percentual rewards per day for two asset farm (LP Token AB)
fees_AB (float, optional): Percentual provider liquidity fees earned per day
Returns:
dict: Percentual returns for each strategy:
buy_hold two assets in your wallet
stake two assets at individual pools
farming by liquidity pool
"""
buy_hold = (0.5 * var_A + 0.5 * var_B)/100
x = (var_A/100 + 1) / (var_B/100 + 1)
perdida_impermanente = 2 * (x**0.5 / (1 + x)) - 1
stake = buy_hold + 0.5 * days * (rw_pool_A/100 + rw_pool_B/100)
farm = buy_hold * (1+perdida_impermanente) + days * (rw_pool_AB/100 + fees_AB/100)
mejor = 'Farm' if farm > stake else 'Stake'
return {'buy_hold':f'{buy_hold:.2%}', 'stake':f'{stake:.2%}', 'farm':f'{farm:.2%}', 'Best': mejor} | 825a0af293b8c9f77cf9adfc58d0ff60338a755d | 102,171 |
def secs2ms(seconds: int) -> int:
"""seconds to milliseconds"""
return seconds * 1000 | 611ddf99fafe908c7a5c948e2ef26ad451d25d5f | 102,173 |
def search2path(search_string):
"""Turn the input search string into a path entry for figures"""
search_string = search_string.replace(':', '_')
search_string = search_string.replace('=', '')
search_string = search_string.replace(', ', '_')
return search_string | fa6c58026df80839667f2654b595c63a7083843f | 102,178 |
def ParseDurationToSeconds(duration):
"""Parses a string duration of the form HH:MM:SS into seconds.
Args:
duration: A string such as '12:43:12' (representing in this case
12 hours, 43 minutes, 12 seconds).
Returns:
An integer number of seconds.
"""
h, m, s = [int(t) for t in duration.split(':')]
return s + 60 * m + 3600 * h | 11cff3bb74d6868d803bfa875c0ce854e4552ba0 | 102,183 |
from typing import Iterable
def replace_linear_combinations(list_of_3x3_matrices, force_constant_prefactor):
"""
Given a list of 3x3 matrices, where elements can either be float values
or lists representing linear combination of values, return a (copied) list
of 3x3 matrices, where linear combinations are replaced by their values.
For instance, a value ``[[0.1, 0.3], [0.8, 0.7]]`` means a combination
``0.1 * 0.3 + 0.8 * 0.7`` and will therefore be replaced by ``0.59``.
:return: a list of 3x3 lists, each being a numeric 3x3 force-constant matrix.
The prefactor is multiplied to each value before returning.
"""
result = []
for matrix in list_of_3x3_matrices:
new_matrix = []
for row in matrix:
new_row = []
for entry in row:
if isinstance(entry, Iterable):
new_entry = 0
for value, factor in entry:
new_entry += value * factor
new_row.append(new_entry * force_constant_prefactor)
else:
new_row.append(entry * force_constant_prefactor)
new_matrix.append(new_row)
result.append(new_matrix)
return result | c425616fa2c0f9cee04e372218faabe63781e706 | 102,185 |
def percent_to_float(s: str) -> float:
"""Parse a percentage string into a float."""
s = s.strip()
if s.endswith('%'):
s = s[:-1]
return float(s) / 100.0 | 41f6b14583b5c6d0fc7f4db46aa06454944a12eb | 102,188 |
def label_incl(row):
"""
Categorizes BMI calculations as Include, Implausible, or unable to calculate (Only Wt or Ht)
"""
if row["include_both"] == True:
return "Include"
elif (row["weight_cat"] == "Implausible") | (row["height_cat"] == "Implausible"):
return "Implausible"
else:
return "Only Wt or Ht" | 7d73babb2882ac1914afb3bec31aa29f5daa7321 | 102,190 |
def rgba(color):
""" Return 4-element list of red, green, blue, alpha values. """
if color.startswith('rgba'):
values = [int(v) for v in color[5:-1].split(',')]
else:
values = [int(v) for v in color[4:-1].split(',')]
values.append(1)
return values | e5468a52b74100bce23a26c51a4e395b7a0d2fe6 | 102,192 |
def npt_prod_finished(job):
"""Generate label if npt production is complete."""
return job.isfile("trajectory-npt.gsd") and job.isfile("log-npt.txt") | 88e8380c44485ec195dbe6adccc8dca81ac02503 | 102,194 |
import random
def roll_dice(probability):
"""given a probability, generate 1 or 0"""
randomResult = random.uniform(0, 1)
if probability >= randomResult:
return 1
else:
return 0 | 2272a5b02fbe1daa8a399cdb59f1ab7971febf40 | 102,195 |
def load_file(filename: str) -> list:
"""Read file of seat locations
:param filename: Location of seat input file
:return: List of seat specification strings
"""
with open(filename, 'r') as f:
seats = f.readlines()
seats = [seat[:-1] for seat in seats]
return seats | a8563582f39ad4e22d7fc56e78b3bc0fbb325362 | 102,199 |
def filter_numeric_column(df, col_name, num1, num2):
"""
Filter dataframe by the value of col_name (between num1, num2)
:param df: Dataframe
:param col_name: Column name
:param num1: Number
:param num2: Number
:return: Filtered dataframe
"""
mid = df[df[col_name] < num2]
return mid[mid[col_name] >= num1] | c4bd36699a4b6a979fbaede89d4e702b4ff42049 | 102,201 |
def _check_all_keys_hit_entry(entry, n_atoms, base_key):
"""
:param entry: routing entry discovered
:param n_atoms: the number of atoms this partition covers
:param base_key: the base key of the partition
:return: the list of keys which this entry doesn't cover which it should
"""
bad_entries = list()
for atom_id in range(0, n_atoms):
key = base_key + atom_id
key_combo = entry.mask & key
if key_combo != entry.routing_entry_key:
bad_entries.append(key)
return bad_entries | 5b328b855258188a21e31dd347cad8c2f308cdcb | 102,203 |
def get_char_of_ascii_code(ascii_code: int) -> str:
"""
Returns char for provided ASCII code (decimal).
"""
return chr(ascii_code) | 21063a66149064518eea0c304e69ada014d82230 | 102,206 |
def add_prefix(dict_like, prefix):
"""
takes a dict (or dict-like object, e.g. etree._Attrib) and adds the
given prefix to each key. Always returns a dict (via a typecast).
Parameters
----------
dict_like : dict (or similar)
a dictionary or a container that implements .items()
prefix : str
the prefix string to be prepended to each key in the input dict
Returns
-------
prefixed_dict : dict
A dict, in which each key begins with the given prefix.
"""
if not isinstance(dict_like, dict):
try:
dict_like = dict(dict_like)
except Exception as e:
raise ValueError("{0}\nCan't convert container to dict: "
"{1}".format(e, dict_like))
return {prefix + k: v for (k, v) in dict_like.items()} | 634fc88346fe7da94090d16f4f01265a248ff56c | 102,209 |
def make_multidir_mdrun_string_for_hrem(multidir_directories,
gromacs_path='gmx_mpi',
plumed_file='empty_plumed.dat',
deffnm='HREM',
tpr_file='HREM.tpr',
replex=100):
"""Helper function to make the mdrun string for an HREM (with multidir)
Parameters
-----------
multidir_directories : list(str) or list(path)
the list of the multidir directories. must be ordered
the first one is the reference state the last one the most
scaled replica
gromacs_path : str, default=gmx_mpi
plumed_file : str, default=empty_plumed.dat
deffnm : str, default=HREM
tpr_file : str, default=HREM.tpr
replex : int, default=100
after how many steps a swap shall be attempted
Returns
---------
str
it will look something like
gmx_mpi mdrun -v -plumed empty_plumed.dat -replex 100 -hrex -dlb no
-multidir BATTERY/scaled0 BATTERY/scaled1 -s HREM.tpr -deffnm HREM
"""
multidir_str = [str(i) for i in multidir_directories]
multidir_str = ' '.join(multidir_str)
multidir_str = (
f'{gromacs_path} mdrun -v -plumed {plumed_file}'
f' -replex {replex} -hrex -dlb no -s {tpr_file} -deffnm {deffnm}'
f' -multidir {multidir_str}')
return multidir_str | b29eb751a46d90a35f020bbd1062653621a3c41e | 102,210 |
def _complete(key_pb):
"""Determines whether a key protocol buffer is complete.
A new key may be left incomplete so that the id can be allocated by the
database. A key is considered incomplete if the last element of the path
has neither a ``name`` or an ``id``.
Args:
key_pb (entity_pb2.Key): The key to check.
Returns:
boolean: :data:`True` if key is incomplete, otherwise :data:`False`.
"""
if key_pb.path:
element = key_pb.path[-1]
if element.id or element.name:
return True
return False | 26bb9e3dcaf0d9d22b1d9ca17caae5d504f07055 | 102,211 |
def build_stop_word(stopword_filename):
"""
引用停用词
:param stopword_filename:停用词文件名
:return:停用词列表
"""
try:
with open(stopword_filename, 'r', encoding='utf-8') as r:
stopword = r.read()
stop_word = stopword.split('\n')
stop_list = [word for word in stop_word]
return stop_list
except:
print('读取停用词异常') | 2be1fe8f77fba14e7fc6ea84fbf7032855a8c53f | 102,213 |
import json
def parse_input_file(input_filename):
"""
Read `input_file` as a json and return a list of keywords, result_type
and proxies list (or empty list of not present int JSON)
"""
with open(input_filename, encoding="utf-8") as jsonfile:
kwargs = json.loads(jsonfile.read())
result_type = kwargs['type'].lower()
keywords = kwargs['keywords']
proxies = kwargs.get('proxies', [])
return keywords, result_type, proxies | b63f2e911c931ad5ed1b7a1d09e81de68a840bb9 | 102,214 |
import random
def make_first_key(path, book_dict):
"""Find a random key."""
first_key = random.choice(list(book_dict))
return first_key | e41f05bef9f791b47ba72ba9a4704a2ad3f1b87b | 102,216 |
import socket
def setup_socket() -> socket.socket:
"""
sets up a socket used by the client.
blocking must be false since used in async context.
:return: the clients socket used to connect to the processing server.
"""
sock = socket.socket()
sock.setblocking(False)
return sock | a16d3a9e8afef3af645bf1e245762b1b16c3d615 | 102,218 |
from typing import Dict
def handeye_metrics(population, environment) -> Dict:
"""
Analyzes all possible transition in maze environment and checks if there
is a reliable classifier for it.
Note: knowledge with/without block only works if env has note_in_hand
set to True
Parameters
----------
population
list of classifiers
environment
handeye environment
Returns
-------
Dict
knowledge - percentage of transitions we are able to anticipate
correctly (max 100)
with_block - percentage of all transitions involving block -
gripping, realising or moving block - we are able to anticipate
correctly (max 100)
no_block - percentage of all transitions not involving block -
moving the gripper without block - we are able to anticipate
correctly (max 100)
"""
transitions = environment.env.get_all_possible_transitions()
# Take into consideration only reliable classifiers
reliable_classifiers = [c for c in population if c.is_reliable()]
# Count how many transitions are anticipated correctly
nr_correct = 0
nr_with_block = 0
nr_correct_with_block = 0
# For all possible destinations from each path cell
for start, action, end in transitions:
p0 = start
p1 = end
if p0[-1] == '2' or p1[-1] == '2':
nr_with_block += 1
if any([True for cl in reliable_classifiers
if cl.predicts_successfully(p0, action, p1)]):
nr_correct += 1
if p0[-1] == '2' or p1[-1] == '2':
nr_correct_with_block += 1
return {
'knowledge': nr_correct / len(transitions) * 100.0,
'with_block': nr_correct_with_block / nr_with_block * 100.0,
'no_block':
(nr_correct - nr_correct_with_block) /
(len(transitions) - nr_with_block) * 100.0
} | ac502a5c0a81054342736c15f83ef65e0bf72cf3 | 102,220 |
def level_of_indent(state, indent):
"""
indent: str
precondition: `state.indent_str` is not None
returns: the whole number of non-overlapping `state.indent_str` in `indent`
(i.e. the indentation level where `state.indent_str` is the base unit of indentation)
"""
return len(indent) // len(state.indent_str) | 2b5406be09389e7fc302d256e5aa43a9f865ef9a | 102,225 |
def validate_clientvpnendpoint_selfserviceportal(value):
"""
Validate SelfServicePortal for ClientVpnEndpoint.
Property: ClientVpnEndpoint.SelfServicePortal
"""
VALID_CLIENTVPNENDPOINT_SELFSERVICEPORTAL = ("disabled", "enabled")
if value not in VALID_CLIENTVPNENDPOINT_SELFSERVICEPORTAL:
raise ValueError(
"ClientVpnEndpoint.SelfServicePortal must be one of: {}".format(
", ".join(VALID_CLIENTVPNENDPOINT_SELFSERVICEPORTAL)
)
)
return value | 0b78a7017aba3b414ab1aa3b51d4e17cf81727f8 | 102,226 |
import math
import logging
def __closest_power_of_two(number):
"""Closest n^2.
Example:
>>> __closest_power_of_two(67)
64
>>> __closest_power_of_two(64)
64
Returns:
Integer: Closest power of two.
"""
try:
return int(math.pow(2, int(math.log(number, 2) + 0.5)))
except IOError as err:
logging.error(err)
except ValueError as err:
logging.error(err)
except TypeError as err:
logging.error(err) | ea329a2a4b071aee4c2437b17c2b347363f5adcc | 102,227 |
def _set_domain_corner(ypoints, xpoints, grid_spacing):
"""
Set domain corner to create a grid around 0,0.
Returns:
Tuple[float, float]:
(y,x) values of the bottom left corner of the domain
"""
y_start = 0 - ((ypoints - 1) * grid_spacing) / 2
x_start = 0 - ((xpoints - 1) * grid_spacing) / 2
return y_start, x_start | 6b19ca4b865719798195ba2612893dcba282ace6 | 102,228 |
import requests
import json
def get_sections(course):
"""
Return the json dump from the api call to umd.io
Parameters:
classes (string): string of course_id
Returns:
A python data structure representing the json
"""
http_request = "https://api.umd.io/v1/courses/" + course + "/sections"
response = requests.get(http_request)
json_data = json.loads(response.text)
# print(json_data)
return json_data | cb90068067c4cf850162a6f036b3e692a9f3a736 | 102,231 |
def getFileNameWithNewExtension(input_file, format):
"""
Takes an input_file name and applies new extension to it by:
(i) replacing initial extension if there is one, OR
(ii) just appending new extension.
"""
base_name = input_file
last_four = base_name[-4:]
found = last_four.find(".")
if found > -1:
idx = len(base_name) + found
base_name = base_name[:idx]
return base_name + "." + format | 2f9d7570c87a481778b66c514eb736bfd5480037 | 102,232 |
def assert_(test, obj, msg=None):
"""
Raise an error if the test expression is not true. The test can be a function that takes the context object.
"""
if (callable(test) and not test(obj)) or not test:
raise AssertionError(msg)
return obj | 02238e490f76110fccd91dad6d6f9e3ee17dc357 | 102,239 |
from typing import Callable
from typing import Any
def get_display_name(function: Callable[..., Any]):
"""
Get the display name for a step function based on the function name.
Underscores will be replaced by spaces and each word will be capitalized.
:param call: The calable for the step function
:return: The presentation name as a string
"""
name = function.__name__
return name.replace("_", " ").title() | 60e2dd5efeffeb5a143a0882c9371ba2c1103b7c | 102,241 |
def str2int(x):
"""
Convert string to integer
"""
return int(str(x)) | 425429f98260a6e3055aa350e87a3d65931e9ad2 | 102,242 |
import inspect
def get_annotations(func):
"""
Return dict for parameter name and type.
Parameters
----------
func : Callable
A function to get parameter name and type.
The type must be specified as a type hint.
Returns
-------
Dict
Parameter name and type tuple.
Examples
--------
>>> def test_func(a: int, b: str) -> int:
... return a+1
>>> get_annotations(test_func)
{'a': <class 'int'>, 'b': <class 'str'>}
>>> def test_func2(a: int, b) -> int:
... return a+1
>>> get_annotations(test_func)
{'a': <class 'int'>, 'b': <class 'inspect._empty'>}
"""
signature = inspect.signature(func)
return {k: v.annotation for k, v in signature.parameters.items()} | 3279a629b636370b4a0b0d341ca5604b2d6f10b3 | 102,259 |
def is_seq(li):
"""return True if input is either a list or a tuple.
"""
return isinstance(li, (list, tuple)) | db47b39e35226919cb1d87165aa25affb8629d55 | 102,260 |
def acceptIf(ev, cond):
"""Accept an event if a condition is True, else ignore it.
:param ev: the event to accept or ignore
:type ev: QEvent
:param cond: the condition determining whether the event should be accepted or ignored
:returns: whether the event was accepted or not
"""
if cond:
ev.accept()
else:
ev.ignore()
return ev.isAccepted() | 9ae58de593a05ee4d61c50632e3f42fa5114795c | 102,264 |
from typing import Union
def sround(
x: Union[int, float, str], ndigits: int = 3, default: float = 0.0
) -> str:
"""Round the number and return as a string.
Args:
x (int|float|str): Number to format.
ndigits (int): Number of digits to enforce.
default (float): Default value if rounding fails.
Returns:
str: Rounded string representation.
"""
try:
x = round(float(x), ndigits=ndigits)
except ValueError:
x = default
return "{{:.{}f}}".format(ndigits).format(x) | e67cbcadf426e188f348e662859b2ca04e60ee7e | 102,267 |
import itertools
def peek(iterable):
"""
Get the first value of an iterator without iterating two times an iterator
Args:
iterable: An iterable
Returns:
The first value and an iterator to iterate all the values included the first one.
"""
try:
first = next(iterable)
except StopIteration:
return None, []
return first, itertools.chain([first], iterable) | fda5ef45cbd0b525449f8b720581f58be737ce72 | 102,272 |
def get_nested_dict_from_list(in_list) -> dict:
"""Convert list ['a','b','c'] to a nested dict {'a':{'b':{'c':{}}}}
Args:
in_list ([list]): list to convert
Returns:
[dict]: list converted to nested dict
"""
out = {}
for key in reversed(in_list):
out = {key: out}
return out | 39d5e485124c474ff503a5faa3e92f53592f6f3f | 102,274 |
def get_base_intervention(intervention_conf):
"""
Maps `conf` to the configuration filename in `configs/simulation/intervention/` folder.
Args:
intervention_conf (dict): an experimental configuration.
Returns:
(str): filename in `configs/simulation/intervention/` folder.
"""
# this key is added later
if "INTERVENTION_NAME" in intervention_conf:
return intervention_conf['INTERVENTION_NAME']
# for old runs, base_intervention needs to be inferred from the conf parameters.
if intervention_conf['RISK_MODEL'] == "":
if intervention_conf['N_BEHAVIOR_LEVELS'] > 2:
return "post-lockdown-no-tracing"
if intervention_conf['INTERPOLATE_CONTACTS_USING_LOCKDOWN_CONTACTS']:
return "lockdown"
return "no_intervention"
risk_model = intervention_conf['RISK_MODEL']
hhld_behavior = intervention_conf['MAKE_HOUSEHOLD_BEHAVE_SAME_AS_MAX_RISK_RESIDENT']
if risk_model == "digital":
order = intervention_conf['TRACING_ORDER']
x = f"bdt{order}"
else:
x = f"{risk_model}"
if hhld_behavior:
return f"{x}"
return f"{x}_wo_hhld" | 21de65dc9e816e35bf4bb4aea5e40bf2e9ff7171 | 102,276 |
def is_third_party_panoid(panoid):
"""
Returns whether or not a panoid refers to a third-party panorama.
"""
return len(panoid) > 22 | b039d0878ec0144d5fdfea310b21706bcf9e8c76 | 102,278 |
def bmi(height, weight):
"""Returns the BMI of a client with the given values.
Formula used is: BMI = (weight * 703) / height ^2 """
return round(((weight * 703) / height**2), 1) | 0adf5f6f33c490c325ba7827f88f8146696b5bcf | 102,288 |
from typing import List
def list_minus(l: List, minus: List) -> List:
"""Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3]
"""
return [o for o in l if o not in minus] | cb9e93faa68d2170b9d283a91a97466993873b30 | 102,291 |
import ast
def matches_namespace(node, namespace):
"""Determines if the ``ast.Call`` node corresponds to a matching namespace.
Args:
node (ast.Call): a node that represents a function call. For more,
see https://docs.python.org/3/library/ast.html#abstract-grammar.
namespace (str): the namespace.
Returns:
bool: if the node's namespaces matches the given namespace.
"""
names = namespace.split(".")
name, value = names.pop(), node.func.value
while isinstance(value, ast.Attribute) and len(names) > 0:
if value.attr != name:
return False
name, value = names.pop(), value.value
return isinstance(value, ast.Name) and value.id == name | 82a5ef3ca6f4799fc949ff7920146e53e3114f3d | 102,292 |
def _get_id_from_value(permissions, value):
"""Get id from value for appRoles or oauth2PermissionScopes."""
# https://docs.microsoft.com/en-us/graph/api/resources/serviceprincipal?view=graph-rest-1.0#properties
return next(p['id'] for p in permissions if p['value'] == value) | 7104da33015f9ad3758d363ad33ce56dc4a06f61 | 102,298 |
import csv
def get_one_stock_price(ticker, date, data_dir):
"""Gets the price of a stock at a certain date"""
with open(data_dir + ticker + '.csv', 'r') as f:
reader = csv.reader(f)
price = 0 # ($)
for row in reader:
if row[0] == date:
price = float(row[1][2:]) # ($)
return price | 657d48f997759d1ec7b25e94e22b7c70c8c50a98 | 102,301 |
def charge_deleted(charge):
"""
We don't want to display a green check box to mean deleted.
We use text instead to make sure there is no misinterpretation.
"""
return 'Yes' if charge.deleted else 'No' | e2726768bca0f2201d27695d927a3aa0a5dec37e | 102,304 |
import textwrap
import itertools
def rewrap(text, width=None):
"""
Rewrap text for output to the console.
Removes common indentation and rewraps paragraphs according to the console
width.
Line feeds between paragraphs preserved.
Formatting of paragraphs that starts with additional indentation
preserved.
"""
if width is None:
width = 80
# Remove common indentation.
text = textwrap.dedent(text)
def needs_wrapping(line):
# Line always non-empty.
return not line[0].isspace()
# Split text by lines and group lines that comprise paragraphs.
wrapped_text = ""
for do_wrap, lines in itertools.groupby(text.splitlines(True),
key=needs_wrapping):
paragraph = ''.join(lines)
if do_wrap:
paragraph = textwrap.fill(paragraph, width)
wrapped_text += paragraph
return wrapped_text | a327e1828941422daa6f1f94cbbee14bb7433559 | 102,305 |
def retrieve_id_list(list_of_ids):
"""
Parses a text file with individual IDs on newlines and returns them as a list
"""
with open(list_of_ids) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
return content | 1ee42fb03454938ce07e21f3a85f8f5ebdbe43de | 102,308 |
def get_ubids(chip_specs):
""" Return all ubids from supplied chip-specs """
return [ts['ubid'] for ts in chip_specs] | 685533fdeb2d26351074a27aadbf3cbe19180533 | 102,314 |
import yaml
def read_yaml(path_to_yaml: str) -> dict:
"""
Reads a yaml file and returns a dictionary
"""
with open(path_to_yaml, 'r') as yaml_file:
return yaml.safe_load(yaml_file) | fb1408f6107ee347798ad48622a738f370e529be | 102,319 |
def build_transcript(inputs, result_lines):
"""Build a session transcript from a series of inputs and results."""
return "\n".join(
" "*6 + inp + ("\n" if res else "") + "\n".join(res)
for inp, res in zip(inputs, result_lines)
) | 0351149fff8de84d34e2ba3287e6add8b6422f53 | 102,321 |
def get_range(padding, *args):
"""Get a [min, max] for all of the data passed as *args."""
if not isinstance(padding, tuple):
padding = (padding, padding)
all_data = [item for lst in args for item in lst]
return [min(all_data) - padding[0], max(all_data) + padding[1]] | 3fd5c11a6986c55fef1a27ccdb7ca04106c107f2 | 102,323 |
import time
async def bench_to_arrow(client):
"""Test how long it takes to create a view on the remote table and
retrieve an arrow."""
table = client.open_table("data_source_one")
view = await table.view()
start = time.time()
arrow = await view.to_arrow()
end = time.time() - start
assert len(arrow) > 0
return [end] | dccf9efc0516dc2774d886ec64182b85de97e428 | 102,328 |
def sexp_indent(s, tab=" "):
"""Indent an S-expression string.
Args:
s (string): S-expression string.
tab (string, optional): Indentation string. Defaults to " ".
Returns:
string: Indented S-expression.
"""
out_s = ""
indent = ""
nl = "" # First '(' will not be preceded by a newline.
in_quote = False
backslash = False
for c in s:
if c == "(" and not in_quote:
out_s += nl + indent
nl = "\n" # Every '(' from now on gets preceded by a newline.
indent += tab
elif c == ")" and not in_quote:
indent = indent[len(tab) :]
elif c == '"' and not backslash:
in_quote = not in_quote
if c == "\\":
backslash = True
else:
backslash = False
out_s += c
return out_s | 92eab97a1d4be395b6783931d3bf85c819ae3cc5 | 102,332 |
def listify(x):
"""
Returns [] if x is None, a single-item list consisting of x if x is a str or bytes, otherwise returns x.
listify(None) -> []
listify("string") -> ["string"]
listify(b"bytes") -> [b"bytes"]
listify(["foo", "bar"]) -> ["foo", "bar"]
:param x: What to listify.
:return:
"""
if x is None:
return []
if isinstance(x, (str, bytes)):
return [x]
return x | 273d0ea27517050f830f8065b0957bd6ab9ccbc1 | 102,341 |
def rgbToStrhex(r, g, b):
"""r (int), g (int), b (int): rgb values to be converted
Returned value (str): hex value of the color (ex: #7f866a"""
return '#%02x%02x%02x' % (r, g, b) | 9bcdb657c3ec3662b80f5fc83a09644b98292896 | 102,342 |
import re
def get_urls(inputfiles):
"""
This function takes as input the list of files containing the hostnames
and normalizes the format of the hostnames in order to be able to perform
valid HTTP/HTTPS requests.
Args:
inputfiles -- list of inputfiles
Returns:
urls -- list of normalized URLs which can be queries
"""
urls = []
scheme_rgx = re.compile(r'^https?://')
for ifile in inputfiles:
urls.append(ifile.read().splitlines())
urls = set([n for l in urls for n in l])
urls = list(filter(None, urls))
for i in range(len(urls)):
if not scheme_rgx.match(urls[i]):
urls[i] = 'http://' + urls[i]
return urls | abf1c0fdb533f763e4cd4b0bcf0a2a74f436a051 | 102,346 |
from typing import List
from typing import Tuple
def _to_physicist_index_order(facs: List[Tuple[int, str]]) -> Tuple[List[Tuple[int, str]], int]:
"""Reorder the factors `facs` to be two raising operators followed by two lowering operators and
return the new factors and the phase incurred by the reordering. Note that `facs` are not in
chemists' order, but rather sorted by index with least index first.
Args:
facs: a list of factors where each element is `(i, c)` where `i` is an integer index and
`c` is either `-` or `+`.
Returns:
facs_out: A copy of the reordered factors or the input list (not a copy) if the factors are
already in the desired order.
phase: Either `1` or `-1`.
Raises:
ValueError: if `facs` does not represent a two-body interaction.
"""
ops = [fac[1] for fac in facs]
if ops == ["+", "+", "-", "-"]:
facs_out = facs
phase = 1
elif ops == ["+", "-", "+", "-"]:
facs_out = [facs[0], facs[2], facs[1], facs[3]]
phase = -1
elif ops == ["+", "-", "-", "+"]:
facs_out = [facs[0], facs[3], facs[1], facs[2]]
phase = 1
else:
raise ValueError("unexpected sequence of operators", facs)
return facs_out, phase | 74729283b9a1bd8ed8d856561fc68be50167fc11 | 102,354 |
def FindRecentBuilds(ab_client, branch, target,
build_type='submitted',
build_attempt_status=None,
build_successful=None):
"""Queries for the latest build_ids from androidbuild.
Args:
ab_client: The androidbuild API client.
branch: The name of the git branch.
target: The name of the build target.
build_type: (Optional) The type of the build, defaults to 'submitted'.
build_attempt_status: (Optional) Status of attempt, use 'complete' to look
for completed builds only.
build_successful: (Optional) Whether to only return successful builds.
Returns:
A list of numeric build_ids, sorted from most recent to oldest (in reverse
numerical order.)
"""
kwargs = {
'branch': branch,
'target': target,
}
if build_type is not None:
kwargs['buildType'] = build_type
if build_attempt_status is not None:
kwargs['buildAttemptStatus'] = build_attempt_status
if build_successful is not None:
kwargs['successful'] = build_successful
builds = ab_client.build().list(**kwargs).execute().get('builds')
# Extract the build_ids, convert to int, arrange newest to oldest.
return sorted((int(build['buildId']) for build in builds), reverse=True) | 089dcb868be50d60dbebe912fe8f68dc73a28c40 | 102,366 |
def y_from_m_b_x(m, b, x):
"""
get y from y=mx+b
:param m: slope (m)
:param b: b
:param x: x
:return: y from y=mx+b
"""
return m * x + b | cd8269927d9336a51fdc1353cd54ccff7a50514f | 102,368 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.