content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import glob
import os
def getRecursiveFolderData(rootfolder):
"""
This function will get all the files path (including filename) recursively, given root folder.
Parameters
----------
rootfolder : str
Root folder in which all files are present.
Returns
----------
dict
dict of files path and their data.
int
total number of (only) files within the root folder.
"""
folderData = dict()
for filename in glob.iglob(rootfolder + '**/**', recursive=True):
if os.path.isfile(filename):
with open(filename, 'r') as file:
try:
folderData[filename] = file.read()
except UnicodeDecodeError:
pass
return folderData, len(folderData)
|
49e31041e0cc163da05e3d2910dde7fe8a2b4dae
| 19,240
|
def edges_flux_to_node_flux(G, attribute_name='flux'):
"""Sum all flux from incoming edges for each node in networkx object"""
node_fluxes = {}
for node in G.nodes:
node_flux = sum([edge[2] for edge in list(G.in_edges(node, data=attribute_name)) if edge[2]])
node_fluxes[node] = node_flux
return node_fluxes
|
8e70e44b38e2f8e2b48b070bb03234d2df75e810
| 19,244
|
def compose_redis_key(vim_name, vdu_uuid):
"""Compose the key for redis given vim name and vdu uuid
Args:
vim_name (str): The VIM name
vdu_uuid (str): The VDU uuid (NFVI based)
Returns:
str: the key for redis
"""
return "{}:{}".format(vim_name.lower(), vdu_uuid)
|
41d62d8d73979ae176349d9339edbf99c991cb07
| 19,245
|
def strip_math(s):
"""
Remove latex formatting from mathtext.
Only handles fully math and fully non-math strings.
"""
if len(s) >= 2 and s[0] == s[-1] == "$":
s = s[1:-1]
for tex, plain in [
(r"\times", "x"), # Specifically for Formatter support.
(r"\mathdefault", ""),
(r"\rm", ""),
(r"\cal", ""),
(r"\tt", ""),
(r"\it", ""),
("\\", ""),
("{", ""),
("}", ""),
]:
s = s.replace(tex, plain)
return s
|
8a71b7fe6c146834db797ab27c338c6aa17e4546
| 19,246
|
from os.path import getmtime
def mtime(filename):
"""When was the file modified the last time?
Return value: seconds since 1970-01-01 00:00:00 UTC as floating point number
0 if the file does not exists"""
try: return getmtime(filename)
except: return 0
|
beb0abba3d08139431b4ca516088586b0b1f6311
| 19,248
|
def get_file_extension(gdalformat):
"""
A function to get the extension for a given file format
(NOTE, currently only KEA, GTIFF, HFA, PCI and ENVI are supported).
:return: string
"""
ext = ".NA"
if gdalformat.lower() == "kea":
ext = ".kea"
elif gdalformat.lower() == "gtiff":
ext = ".tif"
elif gdalformat.lower() == "hfa":
ext = ".img"
elif gdalformat.lower() == "envi":
ext = ".env"
elif gdalformat.lower() == "pcidsk":
ext = ".pix"
else:
raise Exception("The extension for the gdalformat specified is unknown.")
return ext
|
222da48a8994307c675d519d0e81e02febbc63f6
| 19,249
|
def chain(op,x):
"""
chain(op,x)
Ensure that all elements pairwise satisfies the binary operator op.
Note: In order for this to work the operator must be from the
operator library, e.g. operator.lt, operator.ne, e.g:
chain(operator.lt,x)
Note: Many of the binary operator.* has a definition already, e.g.
(from cpmpy_hakank.py):
increasing, increasing_strict, decreasing, descreasing_strict
and
AllDifferent, AllEqual
"""
n = len(x)
constraints = []
for i in range(1,n):
constraints += [ op(x[i-1], x[i]) ]
return constraints
|
9bcb728708902918da6b9a30b32a0a2eab31bf86
| 19,250
|
def create_survey_filename(url: str, ext: str = "csv") -> str:
"""Return a filename for a survey."""
return f"export_survey_{url.split('/')[-1]}.{ext}"
|
25551a7a73cddee37214a36030a58e18519cef1c
| 19,252
|
import yaml
def _yaml_to_dict(yaml_filename):
"""Reads and stores a yaml file as a dictionary
Args:
yaml_filename (str):
The filename of the yaml file to read.
Returns:
input_dict (dict):
The result of reading the yaml file and translating
its structure into a dictionary.
"""
try:
# Open the yaml file and import the contents into a
# dictionary with the same structure
with open(yaml_filename) as fp:
input_dict = yaml.load(fp, Loader=yaml.FullLoader)
except:
raise ValueError("Could not open file %s" % (yaml_filename))
return input_dict
|
f5bfb960fcf817522a9cb49cb2877c44e9fb4d9f
| 19,253
|
import os
def is_absolute_path(path: str) -> bool:
"""
check if a path is absolute path
:param path: path
:return: True if it is an relative path
"""
return os.path.isabs(path)
|
a40b82df7dd8fe92e3cf49422368cab0abb140b6
| 19,254
|
def load_addresses():
"""Load a list of addresses from a file."""
with open('addresses.txt') as f:
return [address.strip() for address in f.readlines()]
|
cb74f6bfa52c7983666dc08cb2e21b094362840e
| 19,255
|
def gapBetweenRanges(rangeA,rangeB):
"""\
Returns the gap between two ranges of values, or zero if there is no gap.
The sign of the returned value indicates which range is below the other.
For example:
* The gap between (0,10) and (15,25) is -5
* The gap between (0,10) and (9,20) is 0
* The gap between (20,30) and (10,18) is 2
:param rangeA: a tuple (lo,hi) representing a range of values.
:param rangeB: a tuple (lo,hi) representing a range of values.
:returns: zero if two ranges overlap; otherwise the gap separating them.
If rangeA is below range B then the value will be negative.
If rangeA is above range B then the value will be positive.
"""
aLo,aHi = rangeA
bLo,bHi = rangeB
if aLo > bHi:
return aLo-bHi
elif aHi < bLo:
return aHi-bLo
else:
return 0
|
81db7f95a3cc5fcac5944f7b579ecf06c96b86c8
| 19,256
|
from typing import List
from typing import Union
from typing import Dict
def flatten_choices(choices: List[Union[Dict, List]]) -> List[Dict]:
"""Flatten package choices data out from a Distribution file as the choices can be nested values
:param choices (list or dict): choices to flatten out"""
result = list()
# Get a little messy
for choice in choices:
if isinstance(choice, list):
for subchoice in choice:
result.append(subchoice)
elif isinstance(choice, dict):
result.append(choice)
return result
|
834e45fb9471eccfc2f3174834217fcf786f7b31
| 19,257
|
import functools
def RequireAuth(handler):
"""Decorator for webapp2 request handler methods.
Only use on webapp2.RequestHandler methods (e.g. get, post, put),
and only after using a 'Check____Auth' decorator.
Expects the handler's self.request.authenticated to be not False-ish.
If it doesn't exist or evaluates to False, 403s. Otherwise, passes
control to the wrapped handler.
"""
@functools.wraps(handler)
def wrapper(self, *args, **kwargs):
"""Does the real legwork and calls the wrapped handler."""
if not getattr(self.request, 'authenticated', None):
self.abort(403)
else:
handler(self, *args, **kwargs)
return wrapper
|
edb8f223318d0b22511d6307ed6c35bc83a4ae4a
| 19,259
|
import typing
import json
import operator
import warnings
import functools
def pipe_json(documents, *, dump: bool,
sort_keys: bool = True,
compact: bool = False,
indent: typing.Optional[int] = None,
ensure_ascii: bool = False):
"""Bidirectional codec between a generator and a consumer."""
codec = json.dumps if dump else json.loads
if dump:
dump_kwargs = {'sort_keys': sort_keys,
'indent': indent,
'ensure_ascii': ensure_ascii,
# json-serialize datetime.datetime
'default': operator.methodcaller('isoformat')}
if compact:
if indent: # pragma: no cover
warnings.warn(f'indent={indent!r} overridden'
f' by compact={compact}')
dump_kwargs.update(indent=None,
separators=(',', ':'))
codec = functools.partial(codec, **dump_kwargs)
def itercodec(docs):
return map(codec, docs)
if dump:
assert next(itercodec([None])) == 'null'
else:
assert next(itercodec(['null'])) is None
return itercodec(documents)
|
12b34adc0d66106d61ccc2008f558f407edcd30a
| 19,260
|
def ManifestXml(*args):
"""Joins arbitrary XML and wraps it in a <manifest> element."""
xml = '\n'.join(args)
return '<?xml version="1.0" encoding="UTF-8"?><manifest>%s</manifest>' % xml
|
db1e86b02b58236ed45607114253e40df47b0069
| 19,261
|
import requests
import logging
def check_spot_termination():
"""Check if instance is marked for spot termination."""
r = requests.get("http://169.254.169.254/latest/meta-data/spot/termination-time")
logging.info("check_spot_termination response status code: %d" % r.status_code)
if r.status_code == 200:
return r.content.decode()
else:
return None
|
2c9cc2d74d9416cb7f71351ee54fc249b62d2802
| 19,262
|
import math
def update_ms_error(neurons_l, error, teacher_i, outputs_l_j):
"""Update the mean squared error for each update"""
# get number of output layer neurons
L = len(neurons_l) - 1
output_neurons = neurons_l[L]
for i in range(output_neurons):
# neuron count starts at 1
i += 1
error += math.pow(teacher_i[i] - outputs_l_j[L][i], 2)
return error
|
a8d60df21b8fe3f82540fb534cc8e637018df467
| 19,263
|
def __test_data_alloc_max():
"""Test data"""
intervals = [
(0, 159, 2048),
(0, 13, 7904),
(4, 35, 16),
(12, 17, 32768),
(16, 21, 32768),
]
return intervals
|
76abf0a14b80d59e06340493281e749d428a7275
| 19,264
|
def output_json(ips):
"""Returns a dict to output the IP Addresses as JSON"""
headers = ips[0]
ips = ips[1:]
return [dict(zip(headers, map(str, items))) for items in ips]
|
4e80151573f969ce31da5ef266630cdd079a8289
| 19,265
|
def middle(seq):
"""Return middle item of a sequence, or the first of two middle items."""
return seq[(len(seq) - 1) // 2]
|
736b6aed9a8e03f0581f2d7892c5f370f0886285
| 19,268
|
def make_duration(val):
"""
Converts a string in `hh:mm:ss` representation
to the equivalent number of seconds
Args:
val(str): input string in format `hh:mm:ss`
"""
if val == "00:00:00":
return 0
elif val is None:
return 0
parts = val.split(':')
if len(parts) != 3:
return 0
return int(parts[0]) * 3600 + int(parts[1]) * 60 + int(parts[2])
|
78087038716f85a9e3a292148f87f118e3a13eee
| 19,269
|
import hashlib
def get_md5(input_file, chunk_size=1024 * 16):
"""Get the md5 of a file without reading entire file in to memory."""
m = hashlib.md5()
while 1:
chunk = input_file.read(chunk_size)
if not chunk:
break
m.update(chunk)
return m.hexdigest()
|
e878fe004006ce25ea16d37f11bb3205696d90c5
| 19,270
|
def merge_ar_ssfr(ar_df, ssfr_mosdef_merge_no_dups):
"""Merges the ar_df with the ssfr_mosdef_merge_no_dups dataframe
Parameters:
Returns:
ar_ssfr_merge (pd.DataFrame): Pandas dataframe of the ssfr info, mosdef_df info, and duplicates removed
"""
ar_ssfr_merge = ar_df.merge(ssfr_mosdef_merge_no_dups, how='left', left_on=[
'field', 'v4id'], right_on=['FIELD_STR', 'V4ID'])
return ar_ssfr_merge
|
052efd614062a19bc39b61e014b1e1112bf36e37
| 19,271
|
def chebyshev_distance(position, other):
"""Return Chebyshev distance between points.
AKA chessboard distance, both ordinal and diagonal movements has the same cost.
"""
return max(abs(position.x-other.x), abs(position.y-other.y))
|
c74d30caca24969a3fdc03e166e989bd835ee4a0
| 19,272
|
def rivers_with_station(stations) -> set:
"""returns a set of rivers"""
DuplicateRivers = []
for station in stations:
DuplicateRivers.append(station.river)
Rivers = set(DuplicateRivers)
return Rivers
|
a9bf6b628e16fa39625268643df085a5ac58a771
| 19,274
|
def get_hero_image_url(hero_name, image_size="lg"):
"""
Get a hero image based on name and image size
"""
if hero_name.startswith("npc_dota_hero_"):
hero_name = hero_name[len("npc_dota_hero_"):]
valid_sizes = ['eg', 'sb', 'lg', 'full', 'vert']
if image_size not in valid_sizes:
raise ValueError("Not a valid hero image size")
return "http://media.steampowered.com/apps/dota2/images/heroes/{}_{}.png".format(
hero_name, image_size)
|
5ecedd354fe8d7fadf7a5fe59f71861bc82d6a30
| 19,276
|
def to_lower_case(given: str) -> str:
"""Returns 'given' in lower case
>>> to_lower_case("0D")
'0d'
"""
return given.lower()
|
23e8298f7f4e33b827a76a7c17d1e5468f6d5fd1
| 19,278
|
from openpyxl.styles import Alignment
def prepare_excel(workbook, filters=True):
"""
Formats the excel a bit in order to be displayed nicely
workbook: openpyxl workbook
filters: If True enable excel filtering headers
returns: formated workbook
"""
# openpyxl is an extra requirement
for worksheet in workbook:
# estimate column width
for col in worksheet.columns:
max_length = 0
column = col[0].column_letter
for cell in col:
if len(str(cell.value)) > max_length:
max_length = len(str(cell.value))
adjusted_width = (max_length + 3) * 1.2
worksheet.column_dimensions[column].width = min([adjusted_width, 50])
# enable excel filters
if filters is True:
worksheet.auto_filter.ref = f"A1:{column}1"
# enable word wrap
for row in worksheet.iter_rows():
for cell in row:
cell.alignment = Alignment(wrap_text=True)
if isinstance(cell.value, str):
cell.value = cell.value.strip()
if cell.value.isdigit():
cell.value = int(cell.value)
return workbook
|
e9f4b20747a5d8d3fca804117c1e4e3950dc1d45
| 19,279
|
from datetime import datetime
def _n64_to_datetime(n64):
"""
Converts Numpy 64 bit timestamps to datetime objects. Units in seconds
"""
return datetime.utcfromtimestamp(n64.tolist() / 1e9)
|
1d46e67edb29da3c0c340bfcc346e80c0fd36541
| 19,280
|
def hess_BFGS_update(H, dq, dg):
"""Update hessian using BFGS rule"""
dH1 = dg[None, :] * dg[:, None] / dq.dot(dg)
dH2 = H.dot(dq[None, :] * dq[:, None]).dot(H) / dq.dot(H).dot(dq)
dH = dH1 - dH2 # BFGS update
return dH
|
460f9896c0178d17ce5f9757eb826a0fad1a295f
| 19,282
|
def make_integer_odd(n):
"""Convert integer into odd integer
Notebook: C8/C8S1_HPS.ipynb
Args:
n (int): Integer
Returns:
n (int): Odd integer
"""
if n % 2 == 0:
n += 1
return n
|
005de06583352860a7abcc6dc1123c5f7ae8db7a
| 19,283
|
import os
import sys
def check_profile_file(filename_path):
"""
verifies if a given profile file is valid and indicates if it is a null profile or not
:param filename_path: path to the profile file to check
:return: returns False if the profile is null
"""
# check file existence
if not os.path.isfile(filename_path):
print('Illegal value : option can be 0, 1 or an existent filename.\
%s is not an existent file' % filename_path)
sys.exit(1)
two_profiles = False
with open(filename_path, 'r') as profile_file:
two_profiles = (len(profile_file.readline().strip().split("\t")) == 2)
with open(filename_path, 'r') as profile_file:
first_profile = []
indirect_profile = []
for idx, line in enumerate(profile_file):
try:
line_vals = line.strip().split("\t")
if (len(line_vals) == 1) and not two_profiles:
first_profile.append(float(line_vals[0]))
elif (len(line_vals) == 2) and two_profiles:
first_profile.append(float(line_vals[0]))
indirect_profile.append(float(line_vals[1]))
else:
print('Line %d in file %s is not valid.'
% (idx + 1, filename_path))
sys.exit(1)
except ValueError:
print('Line %d in file %s is not valid: allowed formats "X" or "X\tY".'
% (idx + 1, filename_path))
sys.exit(1)
if (first_profile[-1] < 0) or (two_profiles and indirect_profile[-1] < 0):
print('Line %d in file %s indicates a negative value'
% (idx + 1, filename_path))
sys.exit(1)
if len(first_profile) != 8760:
print('file %s does not have 8760 lines'
% filename_path)
sys.exit(1)
return any(first_profile) or any(indirect_profile)
|
e3ae7aa63b1797f07b7ea75561a4d20ae10b279b
| 19,285
|
def _get(redis, key):
""" Get the current hits per rolling time window.
:param redis: Redis client
:param key: Redis key name we use to keep counter
:return: int, how many hits we have within the current rolling time window
"""
return redis.zcard(key)
|
288d17e0ef4c0d667d984c7f462a2c07d6c66147
| 19,286
|
def normalize(score, base=2048):
""" Use 2048 as base score if the performance equals baseline """
return int(base * score)
|
fdeaaef80030b8eb60a55791df6a14eb2d91cae4
| 19,287
|
def github_uri(plug_name):
"""
Just make the standard public https uri.
"""
return 'https://github.com/' + plug_name
|
8d9941eeb6820bc32e4faa188ad5b7ca2156b0e9
| 19,288
|
def points(a, b, answer_given):
"""Check answer. Correct: 1 point, else 0"""
true_answer = a*b
if answer_given == true_answer:
print('Correct!')
return 1
else:
print('Sorry! Correct answer was: {:d}'.format(true_answer))
return 0
|
2e0ac980b6cc140dd4cd812bd59f7e25cd12d865
| 19,289
|
def readInventory(filename):
"""Function to read Serpent bumat files
Parameter
---------
filename : str
path to the bumatfile to be read
Returns
-------
inventory : dict
dictionary to store the inventory. keys are ZAID identifiers (str), values
are atom densities (str) in b^{-1}cm^{-1}
"""
mat=open(filename)
matfile=mat.readlines()
mat.close()
inventory={}
for line in matfile[6:]:
x=line.strip().split()
inventory[x[0][:-4]]=x[1]
return inventory
|
a0756d334566fa16341bf67d021fb014899d9a83
| 19,290
|
def get_pokemon_type(pokemon_types):
"""Asks the user for a type of pokemon and displays the names of all
pokemon with that type.
Implicitly converts the type entered by the user to lower case.
If the user enters an invalid type, warns the user and repeats until they
enter a valid one.
Args:
pokemon_types: a list of pokemon types sorted in alphabetic order
Returns:
the pokemon type chosen by the user
"""
# This is the prompt you'll display to the user to ask for their choice.
# Don't modify it!
prompt = ('enter a type from one of the following: \n{0}\n'.format(
', '.join(pokemon_types)))
# This is the prompt you'll display to the user if they enter something
# invalid. Don't modify it!
warning = 'Unrecognized type'
choice = input(prompt)
choice = choice.lower()
condition = True
while condition:
if choice in pokemon_types:
condition = False
else:
print(warning)
choice = input(prompt)
choice = choice.lower()
return choice
|
571dbe5d04c749d83bf2bbcd7e6ee3b3f6bf1a62
| 19,293
|
def clip(x):
"""Limit the number in range [0, 1].
if x < 0, x = 0
x > 1, x = 1
otherwise x = x
"""
return max(0, min(1, x))
|
f6f4fbde059ee4b71587f1b095012f9083a057b0
| 19,294
|
def bbox_crop(bboxes, top, left, height, width):
"""crop bbox
Arguments:
img {ndarray} -- image to be croped
top {int} -- top size
left {int} -- left size
height {int} -- croped height
width {int} -- croped width
"""
croped_bboxes = bboxes.copy()
right = width + left
bottom = height + top
croped_bboxes[..., 0::2] = bboxes[..., 0::2].clip(left, right) - left
croped_bboxes[..., 1::2] = bboxes[..., 1::2].clip(top, bottom) - top
return croped_bboxes
|
bbf8bb17226a99800ab0ae30aa34a6419898b5e1
| 19,295
|
def periodic_commit(start, length, split):
"""Return the hourly commit within interval according to split()."""
return start + split() * length
|
911e0ba00d1ae1e8b41f4c21678447b8d569adfc
| 19,296
|
def handle_from_clause(module, from_):
"""\
Takes care of parsing the ``from_`` string and finding the objects in
``module``. If one object is imported it is returned, otherwise a tuple of the
requested objects is returned.
"""
if from_ is None:
return module
results = []
for name in from_.split(','):
n = name.strip()
if not hasattr(module, n):
raise ImportError('Cannot import name %s'%n)
results.append(getattr(module, n))
if len(results) == 1:
return results[0]
return tuple(results)
|
6d60b46d9248612e53a9862ddcdac35582d0a7d6
| 19,298
|
def getAccounts():
"""Return a list of Twilio accounts that have been configured in the
Gateway.
Returns:
list[str]: A list of configured Twilio accounts.
"""
return ["twilio_account1", "twilio_account2"]
|
2c02a7b20173ec481b17cb4ae60ac01d53109abb
| 19,299
|
def _uvs(mesh):
"""
:param mesh:
:rtype: [[], ...], [{}, ...]
"""
uv_layers = []
uv_indices = []
for layer in mesh.uv_layers:
uv_layers.append([])
uv_indices.append({})
index = 0
for uv_data in layer.data:
uv_tuple = (uv_data.uv[0], uv_data.uv[1])
uv_key = str(uv_tuple)
try:
uv_indices[-1][uv_key]
except KeyError:
uv_indices[-1][uv_key] = index
uv_layers[-1].append(uv_tuple)
index += 1
return uv_layers, uv_indices
|
d0951aeda8118df5f278348c934b32ca4a2bcc1c
| 19,301
|
def lockf(fd, operation, length=0, start=0, whence=0):
"""This is essentially a wrapper around the :func:`fcntl` locking calls. *fd* is
the file descriptor of the file to lock or unlock, and *operation* is one of the
following values:"""
return None
|
67047339ee10d1fc2acca2e77dfe4c4ad0399966
| 19,302
|
import tempfile
import os
def _tempfile(filename):
"""
Create a NamedTemporaryFile instance to be passed to atomic_writer
"""
return tempfile.NamedTemporaryFile(mode='w',
dir=os.path.dirname(filename),
prefix=os.path.basename(filename),
suffix=os.fsencode('.tmp'),
delete=False)
|
e571b091088057c3e70d23831964b168f022a655
| 19,303
|
def get_functions_windows_only():
"""
Returns list of PMDK functions that are specific to Windows OS
"""
return ['pmem2_source_from_handle', 'pmem2_source_get_handle']
|
21199da91f704002ad3c7aad5abca91ad91a2f73
| 19,304
|
def soma_elementos(lista):
""" Recebe inteiros e devolve um inteiro correspondente a soma dos elementos da lista.
>>> soma_elementos([1, 2, 4])
7
:param lista:
:return:
"""
soma = 0
for i in lista:
soma += i
return soma
|
ee824c3c7adb407f7a57c55737bbcf86ab1c2b95
| 19,305
|
import math
import sys
def get_formation(formation, ncol, nrow, nmotifs):
""" check formation or set formation to one of the existing options """
# if ncol and/or nrow is missing automatically set fitting parameters
if formation != "alltoone":
if ncol is None and nrow is None:
half_nmotifs = math.ceil(math.sqrt(nmotifs))
ncol, nrow = half_nmotifs, half_nmotifs
else:
if ncol is None:
ncol = math.ceil(nmotifs/nrow)
if nrow is None:
nrow = math.ceil(nmotifs/ncol)
if isinstance(formation, str):
if formation == "row":
# fill plot left to right
formation = list()
rows = list(range(nrow))
for row in rows:
for col in range(ncol):
formation.append((row,col))
elif formation == "col":
# fill plot top to bottom
formation = list()
rows = list(range(nrow))
for col in range(ncol):
for row in rows:
formation.append((row,col))
elif formation == "alltoone":
# fill first column execpt for one motif
# ignores parameter ncol and nrow
formation = list()
rows = list(range(nmotifs-1))
for row in rows:
formation.append((row,0))
formation.append((math.ceil(len(rows)/2)-1, 1))
ncol = 2
nrow = len(rows)
else:
sys.exit("ERROR: Unknown formation setting.")
else:
# Check if formation fits to grid
formation_max_row = max([i[0] for i in formation])
formation_max_col = max([i[1] for i in formation])
if nrow < formation_max_row or ncol < formation_max_col:
sys.exit("ERROR: Grid is to small for specified formation")
return formation, nrow, ncol
|
194623ed2529f5279c41bfa2126f3dcce3c1f42b
| 19,306
|
def parse_result(res):
"""
Returns total energy/throughput.
"""
ret = 0.0
for line in filter(lambda s: s != "", res.split("\n")):
# parse key,value
ln = [x.strip() for x in line.split(":")]
if len(ln) > 1:
k, v = ln
if k == "Performance per MAC energy":
ret += 1 / float(v.split(" ")[0])
return ret
|
462a584800199d8f3b86e7ecce2159c81838e044
| 19,307
|
import logging
import json
def merge_results(sample_portfolio, grid_results):
"""This function merges multiple results from the grid.
Args:
str: sample_portfolio - initial list of tasks
str: grid_results - response from the get_results function. Expected output is demonstrated below.
The actual output will depend on the implementation of the worker lambda function
{
"finished": [
"ea59c0dc-ab53-11eb-ad40-16e8133b0d08-part005_4",
"ea59c0dc-ab53-11eb-ad40-16e8133b0d08-part005_5",
...
],
"finished_OUTPUT": [
"{"results": [0.030341803941731974]}",
"{"results": [4.489065538706168]}",
...
],
"metadata": { "tasks_in_response": 10 }
}
Returns:
number: total value of the evaluated portfolio.
"""
assert (len(sample_portfolio["portfolio"]) == grid_results["metadata"]["tasks_in_response"])
logging.info(grid_results)
portfolio_value = 0.0
for str_output in grid_results["finished_OUTPUT"]:
json_out = json.loads(str_output)
logging.info(json_out)
for val in json_out["results"]:
portfolio_value += val
return portfolio_value
|
bc4b6576b2815730c26c8e6e4a61d7676cf0db47
| 19,308
|
def skip_levels(tree, number_of_levels):
"""
Select the left node number_of_levels deep and
return the new tree
"""
# allow to select specific branches in a three instead of default first
if type(number_of_levels) is list:
for branch in number_of_levels:
tree = tree['children'][branch]
return tree
while number_of_levels > 0:
tree = tree['children'][0]
number_of_levels -= 1
return tree
|
46aa0e22d636af1e0e7775b41e688a655c4aada3
| 19,309
|
import re
def _FormatProjectIdentifier(client, project_id):
"""Formats a project identifier.
If the user specifies a project with "projects/${PROJECT_ID}", isolate the
project id and return it.
This function will also set the client's project id to the specified
project id.
Returns:
The project is.
"""
formatted_identifier = project_id
match = re.search(r'projects/([^/]+)', project_id)
if match:
formatted_identifier = match.group(1)
client.project_id = formatted_identifier
return formatted_identifier
|
90db4240873306699d5914073c64c270ba35a26e
| 19,310
|
def credentials_to_dict(credentials):
"""Extend the authorization method used to create credentials object."""
return {'token': credentials.token,
'refresh_token': credentials.refresh_token,
'token_uri': credentials.token_uri,
'client_id': credentials.client_id,
'client_secret': credentials.client_secret,
'scopes': credentials.scopes}
|
ec350caab7be513901b68d108f345bc3e06a6801
| 19,311
|
def exists(env):
"""Returns true if tool exists."""
# NOTE: SCons requires the use of this name, which fails gpylint.
return env.Detect('distcc')
|
339fd7c09dcaee8bc53beaa87fb83481a5db836e
| 19,312
|
import pathlib
def get_source_id_from_file_path(file_path: pathlib.Path) -> str:
"""Extrapolates the source id from the file path.
To retrieve the source id from the file name, the function uses the fact that the
ICE uses a consistent naming convention consisting of the file type accompanied by
the source id and the date the data in the file was generated.
(e.g. COREREF_207_20201023.txt.bz2).
Parameters
----------
file_path: str
The path to the file for which the source id has to be extrapolated.
Returns
-------
str
The source id.
"""
file_name = file_path.name.split(".")[0]
name_components = file_name.split('_')
return name_components[1]
|
03588ef925e3de688451bedd922b3ab29a8042e5
| 19,313
|
def get_prime_array(high):
"""Gets all primes lower than high from the pre-generated primes to
improve efficiency.
Parameters
==========
high : Integer
The number below which primes will be returned
Returns
=======
int array : The primes less than high
"""
# Array of pre-generated primes less than high
primes = []
with open("../pre_generated_primes/primes-to-100k.txt") as f:
for line in f:
hundred = [int(i) for i in line.split()]
primes.extend(hundred)
if (high > 100000):
with open("../pre_generated_primes/primes-to-200k.txt") as f2:
for line in f2:
two_hundred = [int(i) for i in line.split()]
primes.extend(two_hundred)
if (high > 200000):
with open("../pre_generated_primes/primes-to-300k.txt") as f:
for line in f:
three_hundred = [int(i) for i in line.split()]
primes.extend(three_hundred)
if (high > 300000):
with open("../pre_generated_primes/primes-to-400k.txt") as f:
for line in f:
four_hundred = [int(i) for i in line.split()]
primes.extend(four_hundred)
if (high > 400000):
with open("../pre_generated_primes/primes-to-500k.txt") as f:
for line in f:
five_hundred = [int(i) for i in line.split()]
primes.extend(five_hundred)
for x in reversed(range(0, len(primes))):
if primes[x] > high:
primes.pop(x)
else:
break
return primes
|
fadbad975adddb870dd22c8f35573e4e65f6af11
| 19,315
|
def _prep_vars(variables: list):
"""
Convert from a list to a comma separated string
:param variables: list of vars
:return: comma separated string
"""
# if vars is not a string, assume it's a list of multiple strings
out = ""
for i in range(len(variables) - 1):
out += variables[i]
out += ","
out += variables[-1]
return out
|
e5a8585f3c7ae8edd67e3893ab0bd17b035d17e3
| 19,316
|
import logging
def get_logger(log_file=None):# {{{
"""Set logger and return it.
If the log_file is not None, log will be written into log_file.
Else, log will be shown in the screen.
Args:
log_file (str): If log_file is not None, log will be written
into the log_file.
Return:
~Logger
* **logger**: An Logger object with customed config.
"""
# Basic config
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger(__name__)
# Add filehandler
if log_file is not None:
file_handler = logging.FileHandler(log_file, mode='w')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(
logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
)
logger.addHandler(file_handler)
return logger
|
25cbf7d9cd9150ee5b86929c9ab56d748ae0fdc3
| 19,318
|
import re
def identifyProgram(program):
"""Return a standard (generic) program name."""
try:
name = program.name
except AttributeError:
# No name was set for this program
# Try to acquire the name from the termination
# message
try:
name = program.termination_name
except AttributeError:
# No name found anywhere
name = "unknown"
if re.compile(r"^Refmac").match(name):
name = "refmac5"
if re.compile(r"^FFT(BIG)?").match(name):
name = "fft"
# Return name as a title
return str(name).title()
|
eda0ef3f9d8126919156bd3c0684e5b385dad520
| 19,319
|
def is_unique_msg(msg, previous_msg_ids, previous_run_time):
"""
Determines if message is unique given previous message ids, and that it's greater than previous run time
:param msg: raw Message object
:param previous_msg_ids: set of previously fetched message ids
:param previous_run_time: previous run time string
:return: True if message is unique
"""
message_dict = msg.get("message", {})
if message_dict:
msg_id = message_dict.get("messageId")
msg_pub_time = message_dict.get("publishTime", "")
return msg_id not in previous_msg_ids and msg_pub_time > previous_run_time
return False
|
844e814e5473dc9da63238d12a8e70d7d469b355
| 19,320
|
import re
def extract_code_from_function(function):
"""Return code handled by function."""
if not function.__name__.startswith('fix_'):
return None
code = re.sub('^fix_', '', function.__name__)
if not code:
return None
try:
int(code[1:])
except ValueError:
return None
return code
|
522f1e19f8304d60106625af2c426e9f9b78643a
| 19,321
|
def _pad(s, l):
"""
Pads *s* to length *l*.
"""
missing = l - len(s)
return s + " " * (missing if missing > 0 else 0)
|
bfb824cf84e677b23012957e0f5ddec7a16f5e3d
| 19,322
|
def parse_dir(opts):
"""Parse directory and return"""
return 0
|
65114231cb1e6eaa26583aa2961012bb7c16c3a4
| 19,325
|
def eqn14_rule(model,storage_r,g,strg_tech,t,tm,s):
""" General retrieval rate constraint."""
return model.retrieval_rate[storage_r,g,strg_tech,t,tm,s] <= model.MAX_RETRIEVAL_RATE[strg_tech]
|
e471c34b4a038df6547a52dc351ebe54c70c516f
| 19,326
|
def add_zeros(card):
""" This adds leading zeros to the card number display. """
return str(['000' if card < 100 else '00'][0]) + str(card)[-5:]
|
0c2cacef82fcc24e1ba7cade70e8f506be7ccebc
| 19,327
|
def get_name():
"""Get Name"""
return __name__
|
c5d0ad0e5fe9f31bcbc9a818bd9336bfee938d22
| 19,328
|
def train_and_eval(model, train_input_fn, eval_input_fn,
steps_per_epoch, epochs, eval_steps, callbacks):
"""Train and evaluate."""
train_dataset = train_input_fn()
eval_dataset = eval_input_fn()
history = model.fit(
x=train_dataset,
validation_data=eval_dataset,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_steps=eval_steps,
callbacks=callbacks)
return history
|
528c5f257b8b8f95d0202a3e964bef498c862fcf
| 19,329
|
from typing import List
def build_location_line(split_ids: List[str], keys: List[str], delimiter: str) -> str:
"""
Builds the locations and lines based on the configuration.
:param split_ids: The split IDs
:param keys: The locations or lines keys position to join from IDs
:return: The joined locations or lines
"""
temp_list = []
for key in keys:
if len(split_ids) - 1 >= int(key):
temp_list.append(split_ids[int(key)])
return delimiter.join(temp_list)
|
fbcace1da1e0bd737531f8dcfbda5644958d5d6e
| 19,330
|
import re
def handle_iter_to_list(stmt: str):
"""
convert map, reduce, filter to list, for a compatibility with
Python2.*'s operation, e.g. filter(func, iter)[0]
e.g.
map(func, filter(lambda x: str(x) in '0123456789', [1,2,3,4,5,99]))
to
list(map(func, list(filter(lambda x: str(x) in '0123456789', [1,2,3,4,5,99]))))
"""
search = re.search(r"[^a-zA-Z]*(map|reduce|filter)[^a-zA-Z]", stmt)
if search:
start = search.start(1)
end = search.end(1)
func = search.group(1)
# find the position of closed brackets
temp = stmt[end:]
stack = 0
str_stack = []
closed_position = 0
for index, c in enumerate(temp):
if (c == '(' or c == '{' or c == '[') and not str_stack:
stack += 1
if (c == ')' or c == '}' or c == ']') and not str_stack:
stack -= 1
if c == "'" or c == '"':
if str_stack and str_stack[-1] == c:
str_stack.pop()
else:
str_stack.append(c)
if not stack:
closed_position = index + 1 + end
break
return f'{stmt[:start]}list({func}{handle_iter_to_list(stmt[end:closed_position])}){handle_iter_to_list(stmt[closed_position:])}'
else:
return stmt
|
0d54b37fdee17fb078168a35ed8475330dd9910d
| 19,331
|
def create_user(options, sess, token):
"""
Create an Administrtaor user using the CVE
"""
data = {
# User object
'user[name]': options.username,
'user[username]': options.username,
'user[password1]': options.password,
'user[password2]': options.password,
'user[email1]': options.email,
'user[email2]': options.email,
'user[groups][]': '7', # Yay, Administrator!
# Sometimes these will be overridden
'user[activation]': '0',
'user[block]': '0',
# Form data
'form[name]': options.username,
'form[username]': options.username,
'form[password1]': options.password,
'form[password2]': options.password,
'form[email1]': options.email,
'form[email2]': options.email,
'form[option]': 'com_users',
'form[task]': 'user.register',
token: '1',
}
return sess.post(options.url + "/index.php/component/users/?task=user.register", data=data, allow_redirects=False)
|
5176e8c3ba12e4224045a66b6eb6b79e9ade5602
| 19,332
|
import inspect
import textwrap
def get_function_body(func):
"""Extract the body of a function.
This can be used instead of an embedded string to define python code that
needs to be used as a string. It means that the code in question can be
edited, parsed, autocompleted and linting as normal python code.
"""
lines = inspect.getsourcelines(func)
return textwrap.dedent('\n'.join(lines[0][1:]))
|
224ea7e2a5c16db250db8212ffe679cc4086fe96
| 19,333
|
def get_view_model(cls):
"""
Get the model to use in the filter_class by inspecting
the queryset or by using a declared auto_filters_model
"""
msg = 'When using get_queryset you must set a auto_filters_model field in the viewset'
if cls.queryset is not None:
return cls.queryset.model
else:
assert hasattr(cls, 'auto_filters_model'), msg
return cls.auto_filters_model
|
7f0d3141eba794778679d0c30bfdcddbc145f63d
| 19,334
|
from bs4 import BeautifulSoup
def get_word_meanings(response):
"""
Parse a HTML document and get a text of xth meaning
from particular tags
"""
dom = BeautifulSoup(response.content, "lxml")
div_element = dom.select_one(".word_num") or None
word_meanings = []
if div_element:
word_meaning_elements = div_element.select(".fnt_k05")
for word_meaning_element in word_meaning_elements:
word_meanings.append(word_meaning_element.text)
return word_meanings
|
ef14498eff72dfc83bf4bd2e146d7e5ed0f41bf0
| 19,335
|
def strategy_largest_first(G, colors):
"""
Largest first (lf) ordering. Ordering the nodes by largest degree
first.
"""
nodes = G.nodes()
nodes.sort(key=lambda node: -G.degree(node))
return nodes
|
0ef116896b8e43712887888dcc79af197f7a9d17
| 19,336
|
def most_common(lst):
"""
This method returns the most frequent value in an array
"""
y = {}
for i in lst:
if i != "":
if i in y:
y[i] += 1
else:
y[i] = 1
most_frequent_value = 0
most_frequent_key = ""
for key in y.keys():
if y[key] > most_frequent_value:
most_frequent_value = y[key]
most_frequent_key = key
return most_frequent_key
|
d06393ab5b06332f58675944c7a2bfb8ed942644
| 19,338
|
def two_change(graph, tour, x1, x2, y1, y2):
"""Updates the tour, if an improvement, else makes no change.
Args:
tour (list): The initial tour.
edge_1 (list): The first edge.
edge_2 (list): The second edge.
Returns:
bool: A flag indicating whether the change improved the tour.
"""
# Only first/first and last/last vertices are valid for connecting
#breakpoint()
change = (graph[tour[x1]-1, tour[y1]-1] + graph[tour[x2]-1, tour[y2]-1] -
graph[tour[x1]-1, tour[x2]-1] - graph[tour[y1]-1, tour[y2]-1])
if change < 0:
temp_tour = tour[0:x1+1] + tour[y1:x1:-1]
if y2 > 0:
temp_tour += tour[y2:]
return temp_tour, change
else:
return False
|
1baafd1ff65a758a51a469a66f90f06b704eb341
| 19,339
|
import math
def prime_pi_upper_bound(n):
"""
Return an upper bound of the number of primes below n.
"""
ln = math.log(n)
return int(n / (ln - 1 - (154/125) / ln))
|
1a7330f50bc14e4ba20f6efedad48d55a4ccbd01
| 19,340
|
def filter_bits():
""" i[0] j in the tests below
"""
return [('chokepoint_%s',[1,2,3]),('exit_dims_%s',[2,4,6,8])]
|
0969259507821b4d9e017bb20b61e384001a53cd
| 19,341
|
def remove_obs_from_match(md, rm):
""" Return an updated matched data frame.
:param md: pd data
:param rm: pd data
:return: data frame
"""
rm = rm[['target_year', 'target_start_yr', 'target_end_yr', 'archive_experiment',
'archive_variable', 'archive_model', 'archive_ensemble',
'archive_start_yr', 'archive_end_yr']].copy()
rm["remove"] = True
mergedTable = md.merge(rm, how="left")
key = mergedTable["remove"].isnull()
out = mergedTable.loc[key][['target_variable', 'target_experiment', 'target_ensemble',
'target_model', 'target_start_yr', 'target_end_yr', 'target_year',
'target_fx', 'target_dx', 'archive_experiment', 'archive_variable',
'archive_model', 'archive_ensemble', 'archive_start_yr',
'archive_end_yr', 'archive_year', 'archive_fx', 'archive_dx', 'dist_dx',
'dist_fx', 'dist_l2']]
return out
|
3e22e07f7ae241d1d88dc67389e37d65952abf15
| 19,342
|
def find_animals_groups(df):
"""given a dataframe, return animals and groups dataframes based on population of
each row (> 1 == group)"""
pop_sums = df[
["Population _Male", "Population _Female", "Population _Unknown"]
].sum(axis=1)
groups = df[pop_sums > 1]
animals = df[pop_sums == 1]
return animals, groups
|
02a8376dcdb7abf2e3df39672650bfac8adc7c64
| 19,343
|
import argparse
def parse_args():
"""Defines all arguments.
Returns
-------
args object that contains all the params
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create an image list or \
make a record database by reading from an image list')
# parser.add_argument('--train', action='store_true',
# help='generate train/val list file & resize train/val image to 112 size which saved in ../phase1/ dir.')
parser.add_argument('train', help='generate train/val list file & resize train/val image to 112 size which saved in ../phase1/ dir.')
cgroup = parser.add_argument_group('Options for creating image lists')
cgroup.add_argument('--no-enmfake', action='store_true', default=False,
help='remove enm fake train image dataset')
cgroup.add_argument('--aug', action='store_true', default=False,
help='augment train positive image dataset')
args = parser.parse_args()
return args
|
98b452b5488bca0f21554ddd01374bea75074fe2
| 19,344
|
def retrieve_service(name, path):
"""Retrieve the secret for the service."""
service_file = path / name
return service_file.read_text()
|
7d570688381959fbc021d6c7a22a08d62f38fcd7
| 19,345
|
def word_capital(text):
"""
Capitalizes the first character of each word, it converts a string into
titlecase by making words start with an uppercase character and keep the
remaining characters.
"""
if text and len(text) > 0:
return ' '.join([s[0].upper() + s[1:] for s in text.split(' ') if len(s) > 0])
else:
return text
|
fbb20324204f62344af5b76f74fad98810f6fee0
| 19,346
|
import torch
def _allpairs_cosine_similarity(x):
""" for a matrix of size [n, d] we will compute all pairs cosine similarity and get [n,n]"""
pairwise_numerator = x @ x.t()
denominator_elems = torch.sqrt(torch.diag(pairwise_numerator))
denominator = denominator_elems[None] * denominator_elems[:, None]
cosine_sim = pairwise_numerator / denominator
return cosine_sim
|
9065fcf6d14fdc926267e08d278c66598fc13479
| 19,347
|
def sort_modes(modes):
"""Sorts a list of ``Modes`` based on the logic described below.
Mode ordering is decided primarily by mode Hilbert space size:
modes with more levels go on the right.
Among modes with the same number of levels,
the ordering is decided alphanumerically from right to left.
For example, assuming all cavities have the same number of levels
and all qubits have the same, smaller, number of levels:
[qubit1, qubit0, cavity2, cavity1, cavity0]
Args:
modes (list[Mode]): List of modes to sort.
Returns:
``list[Mode]``: Sorted list of Modes.
"""
return sorted(modes, key=lambda m: (m.name, m.levels), reverse=True)
|
d999fc54da7d6ed13078d1e20b66ca8150802781
| 19,348
|
def yields_from_leung_nomoto_2018_table10(feh):
"""
Supernova data source: Leung & Nomoto, 2018, ApJ, Volume 861, Issue 2, Id 143, Table 10/11
The seven datasets are provided for Z/Zsun values of 0, 0.1, 0.5, 1, 2, 3 and 5.
Using Zsun = 0.0169 the corresponding FeH values are -1, -0.301, 0.0, 0.301, 0.4771 and 0.69897.
We use seven intervals delimited by midpoints of those values.
"""
if feh <= -1.65:
return [0.0, 5.48e-4, 1.3e-11, 2.15e-9, 3.46e-2, 1.63e-4, 2.50e-3, 1.72e-1, 1.14e-1, 2.55e-2, 7.57e-1]
elif -1.65 < feh <= -0.65:
return [0.0, 5.44e-4, 1.54e-12, 4.34e-10, 3.81e-2, 1.63e-4, 1.84e-3, 1.79e-1, 1.12e-1, 2.24e-2, 7.60e-1]
elif -0.65 < feh <= -0.15:
return [0.0, 5.88e-4, 3.24e-12, 2.94e-10, 4.85e-2, 6.58e-4, 1.69e-3, 2.30e-1, 1.14e-1, 1.84e-2, 7.20e-1]
elif -0.15 < feh <= 0.15:
return [0.0, 5.82e-4, 6.45e-12, 3.69e-10, 4.90e-2, 6.56e-4, 1.22e-3, 2.8e-1, 1.9e-1, 1.59e-2, 6.81e-1]
elif 0.15 < feh <= 0.39:
return [0.0, 5.71e-4, 1.62e-11, 5.52e-10, 4.94e-2, 6.46e-4, 8.41e-4, 2.13e-1, 9.81e-2, 1.26e-2, 6.44e-1]
elif 0.39 < feh <= 0.59:
return [0.0, 5.47e-4, 5.54e-11, 9.20e-10, 6.23e-2, 6.82e-4, 7.57e-4, 2.21e-1, 9.27e-2, 1.11e-2, 5.87e-1]
elif 0.59 <= feh:
return [0.0, 5.36e-4, 8.29e-11, 7.60e-10, 7.54e-2, 2.81e-4, 8.39e-4, 2.25e-1, 8.00e-2, 8.93e-3, 4.99e-1]
|
4a03971e14c80d013259afefdbece6e2c67ccdf8
| 19,349
|
def post_dict():
"""
Dictionary with post data
"""
data = {
"post": "This is my first post",
}
return data
|
07dcbd3f84e8d99a9923e82592d3f3e255dc8739
| 19,350
|
from typing import Any
def getPaneFor( panelName ):
"""
getPaneFor( panelName ) -> Dock
Returns the first pane that contains the named panel or None if it can't be found.
Note that the panelName must be exact as described in the layout.xml file or the panel ID.
For example, 'Properties.1' or 'Viewer.1 or 'co.uk.thefoundry.WebBrowser'
@return: The pane or None.
"""
return Any
|
35777bc99c00eae4db0b87caf53ec21551806eb4
| 19,351
|
def describe(path):
"""
Returns a human-readable representation of path.
"""
return "/" + "/".join(str(p) for p in path)
|
7b6ba2f17379bfca88d79eb15db35a3bff183720
| 19,354
|
import time
import os
def main(cli_args=None):
"""Command line argument parsing and main script execution.
:returns: result of requested command
:raises errors.Error: OS errors triggered by wrong permissions
:raises errors.Error: error if plugin command is not supported
"""
time.sleep(10)
fqdn = 'test.sakura.ne.jp'
os.makedirs('/etc/letsencrypt/live/' + fqdn, exist_ok = True)
with open('/etc/letsencrypt/live/' + fqdn + '/cert.pem', 'w', newline="\n") as cert_file:
cert_file.write('-----BEGIN CERTIFICATE-----\n'\
'MIIJqzCCCJOgAwIBAgIQVIhihZkFDxismyB1t2pm+jANBgkqhkiG9w0BAQsFADCB\n'\
'gjELMAkGA1UEBhMCSlAxDjAMBgNVBAgTBVRva3lvMRMwEQYDVQQHEwpDaGl5b2Rh\n'\
'LWt1MRQwEgYDVQQKEwtHZWhpcm4gSW5jLjE4MDYGA1UEAxMvR2VoaXJuIE1hbmFn\n'\
'ZWQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBSU0EgRFYwHhcNMTgwNjI4MDAw\n'\
'MDAwWhcNMjAwNjI3MjM1OTU5WjA8MSEwHwYDVQQLExhEb21haW4gQ29udHJvbCBW\n'\
'YWxpZGF0ZWQxFzAVBgNVBAMMDiouc2FrdXJhLm5lLmpwMIIBIjANBgkqhkiG9w0B\n'\
'AQEFAAOCAQ8AMIIBCgKCAQEA5Q7v6Ba4yS7NGTG535yVZBR82oO6XZXzi/PJV4hh\n'\
'qivpVtMGWvEHwS9qQaK+jq70bwjvoSgb6Or6MOax8lgS1h9kVG6/kc+OBkZ0nUr+\n'\
'3RBaHlci6ixU9wFtuWZaFe7/gUv23vjb70LDbhwOqktISJyWqchfnmeQcbcpuaXh\n'\
'5P5sUAiyprYMbqjiM6+RhRKS5BA4GxGwYWdFMKTyx8wrvYPbjLWUtsdYnyw+rutn\n'\
'BoKgM5OSuh7vnRJ9BZUzb2csRIajHdvxmfsRpNt0PuxvUEQtke/a/fwsipF/XHls\n'\
'FhTtMzk+qsbLiEZYaYmeXNoQeU9XlDwBHxalpwtTcyYTsQIDAQABo4IGYDCCBlww\n'\
'HwYDVR0jBBgwFoAUEuZqJYZx7cyOaQxZGcAHvByorUswHQYDVR0OBBYEFGqEaygH\n'\
'q4zhSo5jT6fgrtrit50CMA4GA1UdDwEB/wQEAwIFoDAMBgNVHRMBAf8EAjAAMB0G\n'\
'A1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjBLBgNVHSAERDBCMDYGCysGAQQB\n'\
'sjEBAgI8MCcwJQYIKwYBBQUHAgEWGWh0dHBzOi8vY3BzLnVzZXJ0cnVzdC5jb20w\n'\
'CAYGZ4EMAQIBMFYGA1UdHwRPME0wS6BJoEeGRWh0dHA6Ly9jcmwudXNlcnRydXN0\n'\
'LmNvbS9HZWhpcm5NYW5hZ2VkQ2VydGlmaWNhdGlvbkF1dGhvcml0eVJTQURWLmNy\n'\
'bDCBiAYIKwYBBQUHAQEEfDB6MFEGCCsGAQUFBzAChkVodHRwOi8vY3J0LnVzZXJ0\n'\
'cnVzdC5jb20vR2VoaXJuTWFuYWdlZENlcnRpZmljYXRpb25BdXRob3JpdHlSU0FE\n'\
'Vi5jcnQwJQYIKwYBBQUHMAGGGWh0dHA6Ly9vY3NwLnVzZXJ0cnVzdC5jb20wggMq\n'\
'BgNVHREEggMhMIIDHYIOKi5zYWt1cmEubmUuanCCCiouMTgwci5jb22CCCouMi1k\n'\
'LmpwggoqLmFjaG9vLmpwgg0qLmFtYXJldHRvLmpwggkqLmJvbmEuanCCCSouY2hl\n'\
'dy5qcIIJKi5jcmFwLmpwgg0qLmRheW5pZ2h0LmpwggoqLmRla284LmpwggsqLmRv\n'\
'amluLmNvbYIIKi5lZWsuanCCCSouZmxvcC5qcIIJKi5mcm9tLnR2gg0qLmZ1YnVr\n'\
'aS5pbmZvgg0qLmdva3Vqb3UuYml6ggoqLmdyYXRzLmpwggkqLmdycnIuanCCDSou\n'\
'aGFsZm1vb24uanCCDSouaXZvcnkubmUuanCCCSouamVlei5qcIIJKi5qcG4ub3Jn\n'\
'ggsqLmtpcmFyYS5zdIILKi5rb2thZ2UuY2OCECoubWFpbC1ib3gubmUuanCCCyou\n'\
'bWF0cml4LmpwggsqLm1pbW96YS5qcIINKi5taW50cy5uZS5qcIIPKi5tb2t1cmVu\n'\
'Lm5lLmpwggkqLm5hem8uY2OCDioubmV0Z2FtZXJzLmpwggkqLm5vb2IuanCCCyou\n'\
'bnlhbnRhLmpwggkqLm8wbzAuanCCDCoub3BhbC5uZS5qcIIJKi5yYXNoLmpwggoq\n'\
'LnJhem9yLmpwgggqLnJkeS5qcIIIKi5yZ3IuanCCCSoucm9qby5qcIIKKi5yb3Nz\n'\
'YS5jY4IKKi5ydWxlei5qcIIJKi5ydXNrLnRvgg0qLnNhaWt5b3UuYml6ggsqLnNh\n'\
'a3VyYS50doIPKi5zYWt1cmF0YW4uY29tgg8qLnNha3VyYXdlYi5jb22CCyouc2Fs\n'\
'b29uLmpwggkqLnNpbGsudG+CCCouc2tyLmpwggoqLnNwYXduLmpwgg0qLnNxdWFy\n'\
'ZXMubmV0gg4qLnN1bW9tby5uZS5qcIIJKi50YW5rLmpwggoqLnRoeW1lLmpwgg0q\n'\
'LnRvcGF6Lm5lLmpwggoqLnVoLW9oLmpwggkqLnVuZG8uanCCDSoud2Vic296YWku\n'\
'anCCCSoud2hvYS5qcIIIKi54MC5jb22CByoueDAudG+CCCoueGlpLmpwMIIBfQYK\n'\
'KwYBBAHWeQIEAgSCAW0EggFpAWcAdgDuS723dc5guuFCaR+r4Z5mow9+X7By2IMA\n'\
'xHuJeqj9ywAAAWREex2WAAAEAwBHMEUCIHEbhtYPC2HmaSI+VKvoho0aKZb7dv9e\n'\
'uQAFx620tfL1AiEAk3+PvH35stHXo9jhEjLyotlUV/5Bfzsv3uBZQQjyPtQAdgBe\n'\
'p3P531bA57U2SH3QSeAyepGaDIShEhKEGHWWgXFFWAAAAWREex3aAAAEAwBHMEUC\n'\
'IEsUHzvzvEXOJRlghqlcIJR9BKkKYNiAxrGKB6ADqLGfAiEA8nzKg99hFTzImtpG\n'\
'5jJ1pJIrPu7lm3+099GIWTAepu8AdQBVgdTCFpA2AUrqC5tXPFPwwOQ4eHAlCBcv\n'\
'o6odBxPTDAAAAWREex26AAAEAwBGMEQCIAa/nHlcXPjyQhcnnkddqzH9+KMdSGgx\n'\
'd2KF7r/Kwi+nAiBFTggil1fhKxm0hYWptOQJjzF1MX1p3ncv/A1idBqg3TANBgkq\n'\
'hkiG9w0BAQsFAAOCAQEALKhm1hC3mkllUw3C4lv1igtYRLTmEx1XoCYAGrJK5Aap\n'\
'QMwTakzmRl+U41XoQcamk0pAGFaewwplae2kqsMe/tRSuQmrPM08TyVfJGS1nB4m\n'\
'Zow3fYajj4zwu84TQhbQxIweGT7BEMlfowxNS92vtoAcSOP8JGjQc9KTOIlSBh3e\n'\
'CZLgwz8n+p3q9GaLrOsoqM2KzIXjPNHfxnEVqnadLl1FCgcIxO/dTQTXZ7EfBDXR\n'\
'mlQjTkNT1AQfMMga98nW7EMnUk8QuKpEWQKVAFkqhkIJtbYsph5NkCN18lY4gGJB\n'\
'28tMCgf8kEjX2H/aS8p/lLftS3UKeapBSFQR/TRSyQ==\n'\
'-----END CERTIFICATE-----')
with open('/etc/letsencrypt/live/' + fqdn + '/fullchain.pem', 'w', newline="\n") as fullchain_file:
fullchain_file.write('-----BEGIN CERTIFICATE-----\n'\
'MIIGKDCCBRCgAwIBAgISA5FY/8zZgI3QbaUi3wSpgtMJMA0GCSqGSIb3DQEBCwUA\n'\
'MEoxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MSMwIQYDVQQD\n'\
'ExpMZXQncyBFbmNyeXB0IEF1dGhvcml0eSBYMzAeFw0xODEwMDUxMDM4MjRaFw0x\n'\
'OTAxMDMxMDM4MjRaMCoxKDAmBgNVBAMTH3BvcnRhbC5taWMtdGVzdC5wcm9jdWJl\n'\
'LWRlbW8uanAwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrNahcIILd\n'\
'gyEMcwveA7CYC+ZnmDW2jbE2AwXO6b3G4/lMX8YDeq5FXLdC5Qrcs6jBSu02uUKQ\n'\
'o5uHaEaCXynR2Xo1L1kvKYFivKM+GWs3KY/pT/1lCaP6xCVtED/Tepy1K0lgfj5e\n'\
'-----END CERTIFICATE-----')
with open('/etc/letsencrypt/live/' + fqdn + '/key.pem', 'w', newline="\n") as key_file:
key_file.write('-----BEGIN KEY-----\n'\
'MIIGKDCCBRCgAwIBAgISA5FY/8zZgI3QbaUi3wSpgtMJMA0GCSqGSIb3DQEBCwUA\n'\
'MEoxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MSMwIQYDVQQD\n'\
'ExpMZXQncyBFbmNyeXB0IEF1dGhvcml0eSBYMzAeFw0xODEwMDUxMDM4MjRaFw0x\n'\
'OTAxMDMxMDM4MjRaMCoxKDAmBgNVBAMTH3BvcnRhbC5taWMtdGVzdC5wcm9jdWJl\n'\
'LWRlbW8uanAwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrNahcIILd\n'\
'gyEMcwveA7CYC+ZnmDW2jbE2AwXO6b3G4/lMX8YDeq5FXLdC5Qrcs6jBSu02uUKQ\n'\
'o5uHaEaCXynR2Xo1L1kvKYFivKM+GWs3KY/pT/1lCaP6xCVtED/Tepy1K0lgfj5e\n'\
'-----END KEY-----')
with open('/etc/letsencrypt/live/' + fqdn + '/privkey.pem', 'w', newline="\n") as privkey_file:
privkey_file.write('-----BEGIN PRIVATE KEY-----\n'\
'MIIGKDCCBRCgAwIBAgISA5FY/8zZgI3QbaUi3wSpgtMJMA0GCSqGSIb3DQEBCwUA\n'\
'MEoxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MSMwIQYDVQQD\n'\
'ExpMZXQncyBFbmNyeXB0IEF1dGhvcml0eSBYMzAeFw0xODEwMDUxMDM4MjRaFw0x\n'\
'OTAxMDMxMDM4MjRaMCoxKDAmBgNVBAMTH3BvcnRhbC5taWMtdGVzdC5wcm9jdWJl\n'\
'LWRlbW8uanAwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrNahcIILd\n'\
'gyEMcwveA7CYC+ZnmDW2jbE2AwXO6b3G4/lMX8YDeq5FXLdC5Qrcs6jBSu02uUKQ\n'\
'o5uHaEaCXynR2Xo1L1kvKYFivKM+GWs3KY/pT/1lCaP6xCVtED/Tepy1K0lgfj5e\n'\
'-----END PRIVATE KEY-----')
return None
# return internal_main.main(cli_args)
|
c07bd0e7618f9672fed835e05f47d31ddcf4146d
| 19,355
|
def count_occurences(string, substring):
"""Count occurence of base"""
# Initialize count and start to 0
count = 0
start = 0
# Search through the string till
# we reach the end of it
while start < len(string):
# Check if a substring is present from
# 'start' position till the end
pos = string.find(substring, start)
if pos != -1:
# If a substring is present, move 'start' to
# the next position from start of the substring
start = pos + 1
# Increment the count
count += 1
else:
# If no further substring is present
break
# return the value of count
return count
|
0e3991166880e488cbc8311e937c22d4ce6330af
| 19,356
|
import re
def clean_phone_number(phone_number):
"""
remove all characters other can numbers and dashes
:param phone_number:
:return: cleaned number
"""
return '+' + re.sub("[^0-9]", "", phone_number)
|
2b7b1d1b8e236e1c79e86d54d0b70a268431efe3
| 19,357
|
def _get_transform_y(element):
"""
Extracts the translateY for an element from its transform or assumes it to be 0
:return: an int representing the translateY value from the
transform or 0 if there is no transform present
"""
return element.get('transform').get('translateY') or 0
|
5c1c00843148ae384604d71c46aceaa2904a6497
| 19,358
|
def readme():
"""
Return a properly formatted readme text, if possible, that can be used
as the long description for setuptools.setup.
"""
with open("readme.md") as readme_file:
descr = readme_file.read()
return descr
|
e1c197c3ac3d6111a5bd1a8f708d3d5358d8f18c
| 19,360
|
import logging
def format_covid_notification(api_request: dict) -> dict:
"""
Formats and computes information retreived form an API request to return a covid notification
Keyword arguments:
api_request -- The information retrieved from an API request
"""
notification = {}
covid_data_details = api_request["data"]
covid_data_details_today = covid_data_details[0]
covid_data_details_yesterday = covid_data_details[1]
#Threshold level is calculated
if int(covid_data_details_today['newCasesByPublishDate']) > 10000:
if int(covid_data_details_today['newCasesByPublishDate']) > 15000:
threshold_level = 'Red - Not safe'
else:
threshold_level = 'Yellow - Be careful'
else:
threshold_level = 'Green - safe'
#information is formatted into an appropriate dictionary format
notification['title'] = "Covid-19 report - England " + covid_data_details_today['date']
notification['content'] = ''.join(("New cases today: ",
str(covid_data_details_today["newCasesByPublishDate"]),
" New cases yesterday: ",
str(covid_data_details_yesterday["newCasesByPublishDate"]),
", Total cases: ",
str(covid_data_details_today["cumCasesByPublishDate"]),
", Yesterday's deaths: ",
str(covid_data_details_yesterday["newDeathsByDeathDate"]),
", Total deaths: ",
str(covid_data_details_yesterday["cumDeathsByDeathDate"]), ' Threshold level is ',
threshold_level))
logging.info('notification ' + notification['title'] + ' has been created')
return notification
|
a901c57cc48db5acafac2a19f2339e86feb8a4f2
| 19,361
|
def _p_r_log_helper(x, a, b, alpha, beta, n, p):
"""
Step function used with the pollard rho discrete log algorithm
"""
if x % 3 == 1:
f = ((beta * x) % p, a, (b + 1) % n)
elif x % 3 == 0:
f = ((x * x) % p, (2 * a) % n, (2 * b) % n)
else:
f = ((alpha * x) % p, (a + 1) % n, b)
return f
|
092f7111d3b7fb8ce8a4ba27775bdf89cb99308f
| 19,362
|
def clause_b_4_1_8_T_w(
L_f,
w_t,
Q,
T_0,
*_,
**__,
):
"""Equation B.14. Flame temperature at the window opening."""
try:
assert L_f * w_t / Q < 1
except AssertionError:
raise AssertionError(f'Condition not satisfied L_f*w_t/Q<1 in Clause B.4.1(8) ({L_f:.2f}*{w_t:.2f}/{Q:.2f}={L_f*w_t/Q:.2f}!<{1})')
T_w = 520 / (1 - 0.4725 * (L_f * w_t / Q)) + T_0
_latex = [
'T_w=\\frac{520}{1-0.4725\\frac{L_f w_t}{Q}}+T_0',
f'T_w=\\frac{{520}}{{1-0.4725\\frac{{{L_f:.2f}\\cdot {w_t:.2f}}}{{{Q:.2f}}}}}+{T_0:.2f}',
f'T_w={T_w:.2f}\\ \\left[K\\right]',
f'T_w={T_w - 273.15:.2f}\\ \\left[^\\circ C\\right]'
]
return dict(T_w=T_w, _latex=_latex)
|
cdeccaec22c4167a41d18f205756a170ed7925cf
| 19,364
|
def max_profit_in_01knapsack(weight, value, capacity, n) -> int:
# Base Case --> Initialization step
"""
for i in range(n + 1):
for j in range(capacity + 1):
if i == 0 or j == 0:
dp[i][j] = 0
"""
dp = [[0 for x in range(capacity + 1)] for x in range(n + 1)]
# Starting from 1st index, not 0
for i in range(1, n + 1):
for j in range(1, capacity + 1):
curr_item_wt = weight[i - 1]
curr_item_val = value[i - 1]
if curr_item_wt <= j:
# We have option for choosing it and not choosing it
profit_after_choosing_curr_item = curr_item_val + dp[i - 1][j - curr_item_wt]
profit_after_NOT_choosing_curr_item = dp[i - 1][j]
dp[i][j] = max(profit_after_choosing_curr_item, profit_after_NOT_choosing_curr_item)
else:
# We can't choose it
profit_after_NOT_choosing_curr_item = dp[i - 1][j]
dp[i][j] = profit_after_NOT_choosing_curr_item
return dp[n][capacity]
|
d6082abf68cb04f6649f6e1868b3fbac8406d2bc
| 19,365
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.