content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def get_public_key() -> bytes:
"""
Retrieve the raw public key.
:return: Bytes of key
"""
key = Ed25519PrivateKey.from_private_bytes(config.WEBHOOK_KEY)
return key.public_key().public_bytes(Encoding.Raw, PublicFormat.Raw) | 2cdad670643c215405bd0b988031f7485f0f9c5b | 3,631,800 |
def create_empty_array(n_k, n_vals_i, n_feats):
"""Create null measure in the array form.
Parameters
----------
n_k: int
the number of perturbations
n_vals_i: int
the number of indices of the output measure.
n_feats: int
the number of features.
Returns
-------
measure: np.ndarray
the null measure to be fill by the computation of the spatial
descriptor model.
"""
return np.zeros((n_vals_i, n_feats, n_k)) | 58ae0dc05bd0c256c8cad41e8f43cf3f87a11d1d | 3,631,801 |
def transform_bbox(x):
"""
Function purpose: Transform bounding box (str) into geometry
x: bounding box (str)
"""
try:
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(x[0], x[1])
ring.AddPoint(x[2], x[1])
ring.AddPoint(x[2], x[3])
ring.AddPoint(x[0], x[3])
ring.CloseRings()
# Create polygon
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
poly.FlattenTo2D()
bbox = poly.ExportToWkt()
except:
bbox = None
return bbox | a8946a9f54307e82e3a9e82294371695f3d5eb86 | 3,631,802 |
from ressources.interactions import getIntKey
def addUser(username: str, autoGenerateKeys: bool = True, keys: list = []):
"""
Create an user with the corresponding username to the users list and return the corresponding user id which can be used as index
username: name of the user
autoGenerateKeys: generate elGammal keys for the user
keys: base64 of tuple (public key, private key) if keys are not generated for the user
"""
if autoGenerateKeys:
# generate keys
algModule = __import__("core.asymmetric." + c.BC_SIGNING_ALG, fromlist=[""])
publicKey, privateKey = algModule.key_gen(c.BC_KEY_SIZE)
else:
# decode base64 tuple of key
publicKey = getIntKey(keys[0], [2, 3][c.BC_SIGNING_ALG == "elGamal"])
privateKey = getIntKey(keys[1], [2, 3][c.BC_SIGNING_ALG == "elGamal"])
userID = len(c.BC_USERS)
c.BC_USERS.append([userID, username, publicKey, privateKey])
return userID | ea01b6480a6953f9f4132e5245d98be29c8e77cd | 3,631,803 |
from re import DEBUG
from sys import stdout
def setup_logging(debug=False):
"""
Set up the logging for hypernode_vagrant_runner
:param bool debug: Log DEBUG level to console (INFO is default)
:return obj logger: The logger object
"""
logger = getLogger('hypernode_vagrant_runner')
logger.setLevel(DEBUG if debug else INFO)
console_handler = StreamHandler(stdout)
logger.addHandler(console_handler)
return logger | 15af00b3d72cb51effa4e9606d590283cf7d4862 | 3,631,804 |
import json
def remove_conf(module):
"""
Remove specified module from db
Module is identified by its name in lowercase
"""
# Get the original document
res = db.delete("configuration", 'name', str(module).lower())
if res == None:
raise ConfError("Module '%s' wasn't deleted" % module, status_code=404)
return(json.dumps(orig_config)) | 1c7b4d70dabe05d1a9ac8d253b89e1c2bff6c9eb | 3,631,805 |
import re
def video(package):
"""method for download video
"""
params = package.get('params')
video_id = params.get(ParamType.VideoID)
request = package.get('request')
range_header = request.META.get('HTTP_RANGE', '').strip()
range_re = re.compile(r'bytes\s*=\s*(\d+)\s*-\s*(\d*)', re.I)
range_match = range_re.match(range_header)
videoinfo = VideoHelper.get_video(video_id)
filepath = videoinfo['filepath']
filesize = videoinfo['size']
if range_match:
first_byte, last_byte = range_match.groups()
first_byte = int(first_byte) if first_byte else 0
last_byte = first_byte + 1024 * 1024 * 8
if last_byte >= filesize:
last_byte = filesize - 1
length = last_byte - first_byte + 1
response = StreamingHttpResponse(
file_iterator(filepath, offset=first_byte, length=length), status=206)
response['Content-Length'] = str(length)
response['Content-Range'] = 'bytes %s-%s/%s' % (first_byte, last_byte, filesize)
else:
response = FileResponse(open(filepath, 'rb'))
response['Content-Length'] = str(filesize)
response['Content-Type'] = 'video/mp4'
response['Content-Disposition'] = 'attachment;filename="' + videoinfo['filename'] + '"'
response['Accept-Ranges'] = 'bytes'
return response | 6bf9fff7b49e5a22da945a4ff87784b408ab6086 | 3,631,806 |
import sqlite3
def task_items(max_entries=None):
"""Information about the items in the task queue. Returns a
generator of QueueItems.
Keyword arguments:
max_entries - (int) (Default: None) Maximum number of items to
return. Default is to return all entries.
"""
con = sqlite3.connect(str(DB_PATH))
with con:
c = con.execute(
f"""
SELECT task_id, queue_name, position, published, args, kwargs
FROM {QUEUE_TABLENAME}
ORDER BY position DESC
"""
)
if max_entries:
items = map(QueueItem.from_tuple, c.fetchmany(size=max_entries))
else:
items = map(QueueItem.from_tuple, c.fetchall())
con.close()
return items | 9bbe72d6fadc134b0654a8e2736ff66e6aa718ec | 3,631,807 |
from typing import Tuple
from typing import Optional
def _remove_anchors_in_pattern(pattern: str) -> Tuple[Optional[str], Optional[str]]:
"""
We need to remove the anchors (``^``, ``$``) since schemas are always anchored.
This is necessary since otherwise the schema validation fails.
See: https://stackoverflow.com/questions/4367914/regular-expression-in-xml-schema-definition-fails
Return pattern without anchors, or error message.
"""
parsed, error = parse_retree.parse(values=[pattern])
if error is not None:
regex_line, pointer_line = parse_retree.render_pointer(error.cursor)
return None, f"{error.message}\n{regex_line}\n{pointer_line}"
assert parsed is not None
remover = _AnchorRemover()
remover.visit(parsed)
values = parse_retree.render(regex=parsed)
parts = [] # type: List[str]
for value in values:
assert isinstance(value, str), (
"Only strings expected when rendering a pattern "
"supplied originally as a string"
)
parts.append(value)
return "".join(parts), None | 9ff66372943df6b4e0c0243c18a82d1ea5c49008 | 3,631,808 |
def bfs(connections, start, goal=None):
"""
Requires a connections dict with tuples with neighbors per node.
Or a connections function returning neighbors per node
Returns
if goal == None: return dict of locations with neighbor closest to start
elif goal found: returns path to goal
else: returns False
"""
seen = set() # the locations that have been explored
frontier = deque([start]) # the locations that still need to be visited
# paths = {start: [start]}
isfunction = callable(connections)
parents = {start: None}
def get_path(parents,start,goal):
# print(start,goals)
cur = goal
path = [cur]
while cur != start:
cur = parents[cur]
path.append(cur)
path.reverse()
return path
while frontier:
search = frontier.popleft()
if isfunction: neighbors = connections(search)
else: neighbors = connections.get(search,None)
if neighbors:
for n in neighbors:
if n not in seen:
seen.add(n)
frontier.append(n)
# paths[n] = paths[search]+[n]
parents[n]= search
if goal and n == goal:
# print('goal found')
return get_path(parents,start,goal)
# return paths[goal],parents
seen.add(search)
if goal: return False
else: return parents | c93e619def9ca183ab5224bee50b021531d85f4a | 3,631,809 |
def reverse_complement(dna):
"""
Reverse-complement a DNA sequence
:param dna: string, DNA sequence
:type dna: str
:return: reverse-complement of a DNA sequence
"""
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
return ''.join([complement[base] for base in dna[::-1]]) | efcb38e06fc494adabeb304934ebef9bd932a11f | 3,631,810 |
import pyarrow
def df_to_bytes(df: pd.DataFrame) -> bytes:
"""Write dataframe to bytes.
Use pyarrow parquet if available, otherwise csv.
"""
if pyarrow is None:
return df_to_bytes_csv(df)
return df_to_bytes_parquet(df) | 145eb7204ae77f5fd457eeb82c54e2e8b2f65b27 | 3,631,811 |
import tqdm
def select_training_voxels(input_masks, threshold=0.1, datatype=np.float32, t1=0):
"""
Select voxels for training based on a intensity threshold
Inputs:
- input_masks: list containing all subject image paths for a single modality
- threshold: minimum threshold to apply (after normalizing images with 0 mean and 1 std)
Output:
- rois: list where each element contains the subject binary mask for selected voxels [len(x), len(y), len(z)]
"""
# load images and normalize their intensities
images = [load_nii(image_name).get_data() for image_name in input_masks]
images_norm = [(im.astype(dtype=datatype) - im[np.nonzero(im)].mean()) / im[np.nonzero(im)].std() for im in images]
# select voxels with intensity higher than threshold
rois = [image > threshold for image in tqdm(images_norm, desc="extract sampling masks")]
return rois | f5868ce28f51abb4e8f84539b5bfffa028d2a184 | 3,631,812 |
import itertools
def find_intersections(formula_lists,group_labels,exclusive = True):
"""
Docstring for function pyKrev.find_intersections
====================
This function compares n lists of molecular formula and outputs a dictionary containing the intersections between each list.
Use
----
find_intersections([list_1,..,list_n],['group_1',...,'group_n'])
Returns a dictionary in which each key corresponds to a combination of group labels
and the corresponding value is a set containing the intersections between the groups in that combination.
Parameters
----------
formula_lists: a list containing n lists of molecular formula. Each item in the sub list should be a formula string.
group_labels: a list containing n strings of corresponding group labels.
exclusive: True or False, depending on whether you want the intersections to contain only unique values.
"""
if len(formula_lists) != len(group_labels):
raise InputError('formula_lists and group_labels must be of equal length')
combinations = [seq for i in range(0,len(group_labels)+1) for seq in itertools.combinations(group_labels,i) if len(seq) > 0]
combinations = sorted(combinations,key = lambda c : len(c),reverse = True) # sort combinations by length
if exclusive == True:
assigned_formula = set() #create a set that will hold all the formula already assigned to a group
amb = pd.DataFrame(data = formula_lists).T
amb.columns = group_labels
intersections = dict()
for combo in combinations:
queries = []
for c in combo:
formula = list(filter(None,amb[c])) #Remove None entries introduced by dataframe
queries.append(set(formula))
if len(queries) == 1: #if there is only one query find the unique elements in it
q_set = frozenset(queries[0]) #qset is a frozen set, so it will not be mutated by changes to queries[0]
for f_list in formula_lists: #cycle all formula in formula_lists
set_f = frozenset(f_list) #convert f_list to sets, must be frozen so type matches q_set
if set_f == q_set: # ignore the set that corresponds to the query
pass
else:
queries[0] = queries[0] - set_f #delete any repeated elements in fset
intersections[combo] = queries[0]
elif len(queries) > 1:
if exclusive == True:
q_intersect = intersect(queries)
intersections[combo] = q_intersect - assigned_formula #remove any elements from q_intersect that have already been assigned
assigned_formula.update(q_intersect) #update the assigned_set with q_intersect
else:
intersections[combo] = intersect(queries)
return intersections | ae023b053dc98f34b99ab1aa70161eb306a197f6 | 3,631,813 |
import starlink.Ast as Ast
import starlink.Atl as Atl
def wcs_align(hdu_in, header, outname=None, clobber=False):
"""
This function is used to align one FITS image to a specified header. It takes the following arguments:
:param hdu_in: the HDU to reproject (must have header and data)
:param header: the target header to project to
:param outname: the filename to write to
:param clobber: overwrite the file 'outname' if it exists
:return: the reprojected fits.primaryHDU
Credits: Written by David Berry and adapted to functional form by Adam Ginsburg (adam.g.ginsburg@gmail.com)
"""
# Create objects that will transfer FITS header cards between an AST
# FitsChan and the fits header describing the primary HDU of the
# supplied FITS file.
adapter_in = Atl.PyFITSAdapter(hdu_in)
hdu_ref = pyfits.PrimaryHDU(header=header)
adapter_ref = Atl.PyFITSAdapter(hdu_ref)
# Create a FitsChan for each and use the above adapters to copy all the header
# cards into it.
fitschan_in = Ast.FitsChan(adapter_in, adapter_in)
fitschan_ref = Ast.FitsChan(adapter_ref, adapter_ref)
# Get the flavour of FITS-WCS used by the header cards currently in the
# input FITS file. This is so that we can use the same flavour when we write
# out the modified WCS.
encoding = fitschan_in.Encoding
# Read WCS information from the two FitsChans. Additionally, this removes
# all WCS information from each FitsChan. The returned wcsinfo object
# is an AST FrameSet, in which the current Frame describes WCS coordinates
# and the base Frame describes pixel coodineates. The FrameSet includes a
# Mapping that specifies the transformation between the two Frames.
wcsinfo_in = fitschan_in.read()
wcsinfo_ref = fitschan_ref.read()
# Check that the input FITS header contained WCS in a form that can be
# understood by AST.
if wcsinfo_in is None:
raise ValueError("Failed to read WCS information from {0}".format(hdu_in))
# This is restricted to 2D arrays, so check theinput FITS file has 2 pixel
# axes (given by Nin) and 2 WCS axes (given by Nout).
elif wcsinfo_in.Nin != 2 or wcsinfo_in.Nout != 2:
raise ValueError("{0} is not 2-dimensional".format(hdu_in))
# Check the reference FITS file in the same way.
elif wcsinfo_ref is None:
raise ValueError("Failed to read WCS information from {0}".format(hdu_ref))
elif wcsinfo_ref.Nin != 2 or wcsinfo_ref.Nout != 2:
raise ValueError("{0} is not 2-dimensional".format(hdu_ref))
# Proceed if the WCS information was read OK.
# Attempt to get a mapping from pixel coords in the input FITS file to pixel
# coords in the reference fits file, with alignment occuring by preference in
# the current WCS frame. Since the pixel coordinate frame will be the base frame
# in each Frameset, we first invert the FrameSets. This is because the Convert method
# aligns current Frames, not base frames.
wcsinfo_in.invert()
wcsinfo_ref.invert()
alignment_fs = wcsinfo_in.convert(wcsinfo_ref)
# Invert them again to put them back to their original state (i.e.
# base frame = pixel coords, and current Frame = WCS coords).
wcsinfo_in.invert()
wcsinfo_ref.invert()
# Check alignment was possible.
if alignment_fs is None:
raise Exception("Cannot find a common coordinate system shared by {0} and {1}".format(hdu_in,hdu_ref))
else:
# Get the lower and upper bounds of the input image in pixel indices.
# All FITS arrays by definition have lower pixel bounds of [1,1] (unlike
# NDFs). Note, unlike fits AST uses FITS ordering for storing pixel axis
# values in an array (i.e. NAXIS1 first, NAXIS2 second, etc).
lbnd_in = [1, 1]
ubnd_in = [fitschan_in["NAXIS1"], fitschan_in["NAXIS2"]]
# Find the pixel bounds of the input image within the pixel coordinate
# system of the reference fits file.
(lb1, ub1, xl, xu) = alignment_fs.mapbox(lbnd_in, ubnd_in, 1)
(lb2, ub2, xl, xu) = alignment_fs.mapbox(lbnd_in, ubnd_in, 2)
# Calculate the bounds of the output image.
lbnd_out = [int(lb1), int(lb2)]
ubnd_out = [int(ub1), int(ub2)]
# Unlike NDFs, FITS images cannot have an arbitrary pixel origin so
# we need to ensure that the bottom left corner of the input image
# gets mapped to pixel [1,1] in the output. To do this we, extract the
# mapping from the alignment FrameSet and add on a ShiftMap (a mapping
# that just applies a shift to each axis).
shift = [1 - lbnd_out[0],
1 - lbnd_out[1]]
alignment_mapping = alignment_fs.getmapping()
shiftmap = Ast.ShiftMap(shift)
total_map = Ast.CmpMap(alignment_mapping, shiftmap)
# Modify the pixel bounds of the output image to take account of this
# shift of origin.
lbnd_out[0] += shift[0]
lbnd_out[1] += shift[1]
ubnd_out[0] += shift[0]
ubnd_out[1] += shift[1]
# Get the value used to represent missing pixel values
if "BLANK" in fitschan_in:
badval = fitschan_in["BLANK"]
flags = Ast.USEBAD
else:
badval = 0
flags = 0
# Resample the data array using the above mapping.
# total_map was pixmap; is this right?
(npix, out, out_var) = total_map.resample(lbnd_in, ubnd_in,
hdu_in.data, None,
Ast.LINEAR, None, flags,
0.05, 1000, badval, lbnd_out,
ubnd_out, lbnd_out, ubnd_out)
# Store the aligned data in the primary HDU, and update the NAXISi keywords
# to hold the number of pixels along each edge of the rotated image.
hdu_in.data = out
fitschan_in["NAXIS1"] = ubnd_out[0] - lbnd_out[0] + 1
fitschan_in["NAXIS2"] = ubnd_out[1] - lbnd_out[1] + 1
# The WCS to store in the output is the same as the reference WCS
# except for the extra shift of origin. So use the above shiftmap to
# remap the pixel coordinate frame in the reference WCS FrameSet. We
# can then use this FrameSet as the output FrameSet.
wcsinfo_ref.remapframe(Ast.BASE, shiftmap)
# Attempt to write the modified WCS information to the primary HDU (i.e.
# convert the FrameSet to a set of FITS header cards stored in the
# FITS file). Indicate that we want to use original flavour of FITS-WCS.
fitschan_in.Encoding = encoding
fitschan_in.clear('Card')
if fitschan_in.write(wcsinfo_ref) == 0:
raise Exception("Failed to convert the aligned WCS to Fits-WCS")
# If successful, force the FitsChan to copy its contents into the
# fits header, then write the changed data and header to the output
# FITS file.
else:
fitschan_in.writefits()
if outname is not None:
hdu_in.writeto(outname, clobber=clobber)
return hdu_in | 41a0da7943845bbd63f5634d65b45804f1caaf7c | 3,631,814 |
import os
import tempfile
import csv
import zipfile
import shutil
def save_pumping_test(pump_test, path="", name=None):
"""Save a pumping test to file.
This writes the variable to a csv file.
Parameters
----------
path : :class:`str`, optional
Path where the variable should be saved. Default: ``""``
name : :class:`str`, optional
Name of the file. If ``None``, the name will be generated by
``"Test_"+name``. Default: ``None``
Notes
-----
The file will get the suffix ``".tst"``.
"""
path = os.path.normpath(path)
# create the path if not existing
if not os.path.exists(path):
os.makedirs(path)
# create a standard name if None is given
if name is None:
name = "Test_" + pump_test.name
# ensure the name ends with '.tst'
if name[-4:] != ".tst":
name += ".tst"
name = _formname(name)
# create temporal directory for the included files
patht = tempfile.mkdtemp(dir=path)
# write the csv-file
with open(os.path.join(patht, "info.csv"), "w") as csvf:
writer = csv.writer(
csvf, quoting=csv.QUOTE_NONNUMERIC, lineterminator="\n"
)
writer.writerow(["wtp-version", __version__])
writer.writerow(["Testtype", "PumpingTest"])
writer.writerow(["name", pump_test.name])
writer.writerow(["description", pump_test.description])
writer.writerow(["timeframe", pump_test.timeframe])
writer.writerow(["pumpingwell", pump_test.pumpingwell])
# define names for the variable-files (file extension added autom.)
pumprname = name[:-4] + "_PprVar"
aquidname = name[:-4] + "_AqdVar"
aquirname = name[:-4] + "_AqrVar"
# save variable-files
pumpr_path = pump_test.pumpingrate.save(patht, pumprname)
pumpr_base = os.path.basename(pumpr_path)
writer.writerow(["pumpingrate", pumpr_base])
aquid_path = pump_test.aquiferdepth.save(patht, aquidname)
aquid_base = os.path.basename(aquid_path)
writer.writerow(["aquiferdepth", aquid_base])
aquir_path = pump_test.aquiferradius.save(patht, aquirname)
aquir_base = os.path.basename(aquir_path)
writer.writerow(["aquiferradius", aquir_base])
okeys = tuple(pump_test.observations.keys())
writer.writerow(["Observations", len(okeys)])
obsname = {}
for k in okeys:
obsname[k] = name[:-4] + "_" + k + "_Obs.obs"
writer.writerow([k, obsname[k]])
pump_test.observations[k].save(patht, obsname[k])
# compress everything to one zip-file
file_path = os.path.join(path, name)
with zipfile.ZipFile(file_path, "w") as zfile:
zfile.write(os.path.join(patht, "info.csv"), "info.csv")
zfile.write(pumpr_path, pumpr_base)
zfile.write(aquir_path, aquir_base)
zfile.write(aquid_path, aquid_base)
for k in okeys:
zfile.write(os.path.join(patht, obsname[k]), obsname[k])
# delete the temporary directory
shutil.rmtree(patht, ignore_errors=True)
return file_path | ae98a5f70932c37c3478e6320aac6bf2f568fc52 | 3,631,815 |
from typing import Dict
def merge_flag_dictionaries(a: Dict[str, str], b: Dict[str, str]) -> Dict[str, str]:
"""
>>> a = {'CFLAGS': '-1'}
>>> b = {'CFLAGS': ' -2'}
>>> merge_flag_dictionaries(a, b)
{'CFLAGS': '-1 -2'}
"""
a_copy = deepcopy(a)
b_copy = deepcopy(b)
merged_entries = {}
for i in a_copy.items():
for j in b_copy.items():
if i[0] == j[0]:
matching_entry_name = i[0]
content_first = i[1]
content_second = j[1]
merged_entries[matching_entry_name] = ' '.join([content_first, content_second])
for i in merged_entries.items():
del a_copy[i[0]]
del b_copy[i[0]]
a_left = a_copy
b_left = b_copy
return {**a_left, **b_left, **merged_entries} | 3d83f5083cfdb1a280c54fb42bd5bf0c89304b5f | 3,631,816 |
def _delete_magic(line):
"""Returns an empty line if it starts with the # [magic] prefix
"""
return '' if line.startswith(_PREFIX) else line | 9e14cb7cac1f3c991cfad01bde7e0c2bf1a24a72 | 3,631,817 |
import odbc
import pyodbc
import psycopg2
import pgdb
def init_db_conn(connect_string, username, passwd, show_connection_info, show_version_info=True):
"""initializes db connections, can work with PyGres or psycopg2"""
global _CONN
try:
dbinfo = connect_string
if show_connection_info:
print(dbinfo)
if USE_JYTHON:
_CONN = zxJDBC.connect(connect_string, username, passwd, 'org.postgresql.Driver')
elif '/' in connect_string:
_CONN = odbc.odbc(connect_string)
if show_connection_info:
print(_CONN)
elif connect_string.startswith('Driver='):
# Driver={PostgreSQL};Server=IP address;Port=5432;Database=myDataBase;Uid=myUsername;Pwd=myPassword;
# Driver={PostgreSQL};Server=isof-test64;Port=5435;Database=isof_stable;Uid=postgres;Pwd=postgres;
_CONN = pyodbc.connect(connect_string)
if show_connection_info:
print(_CONN)
else:
# 'host:[port]:database:user:password'
arr = connect_string.split(':')
if len(arr) > 4:
host = '%s:%s' % (arr[0], arr[1])
port = int(arr[1])
dbname = arr[2]
user = arr[3]
passwd = arr[4]
elif len(arr) == 4:
host = arr[0]
port = -1
dbname = arr[1]
user = arr[2]
passwd = arr[3]
else:
raise exceptions.ImportError('Incorrect connect_string!\n\n%s' % (USAGE))
if port > 0:
host = host.split(':')[0]
sport = 'port=%d' % (port)
else:
sport = ''
dsn = "host=%s %s dbname=%s user=%s password=%s" % (host, sport, dbname, user, passwd)
if show_connection_info:
print(dsn)
dbinfo = 'db: %s:%s' % (host, dbname)
use_pgdb = 0
try:
except:
try:
use_pgdb = 1
except:
raise exceptions.ImportError('No PostgreSQL library, install psycopg2 or PyGres!')
if not _CONN:
if show_connection_info:
print(dbinfo)
if use_pgdb:
_CONN = pgdb.connect(database=dbname, host=host, user=user, password=passwd)
if show_connection_info:
print(_CONN)
else:
_CONN = psycopg2.connect(dsn)
if show_connection_info:
print(_CONN)
if show_version_info:
add_ver_info(connect_string, username)
except:
ex = sys.exc_info()
s = 'Exception: %s: %s\n%s' % (ex[0], ex[1], dbinfo)
print(s)
return None
return _CONN | 02487f08519d25203e9eabd3a504795114bc020a | 3,631,818 |
import re
def untokenize(words):
"""
Source: https://github.com/commonsense/metanl/blob/master/metanl/token_utils.py
Untokenizing a text undoes the tokenizing operation, restoring
punctuation and spaces to the places that people expect them to be.
Ideally, `untokenize(tokenize(text))` should be identical to `text`,
except for line breaks.
"""
text = ' '.join(words)
step1 = text.replace("`` ", '"').replace(" ''", '"').replace('. . .', '...')
step2 = step1.replace(" ( ", " (").replace(" ) ", ") ")
step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2)
step4 = re.sub(r' ([.,:;?!%]+)$', r"\1", step3)
step5 = step4.replace(" '", "'").replace(" n't", "n't").replace("can not", "cannot")
step6 = step5.replace(" ` ", " '")
step7 = step6.replace("$ ", "$") # added
return step7.strip() | e62720d5a5fc7048e73659d013cb92e274671533 | 3,631,819 |
def create_new_course(request_ctx, account_id, course_name=None, course_course_code=None, course_start_at=None, course_end_at=None, course_license=None, course_is_public=None, course_is_public_to_auth_users=None, course_public_syllabus=None, course_public_description=None, course_allow_student_wiki_edits=None, course_allow_wiki_comments=None, course_allow_student_forum_attachments=None, course_open_enrollment=None, course_self_enrollment=None, course_restrict_enrollments_to_course_dates=None, course_term_id=None, course_sis_course_id=None, course_integration_id=None, course_hide_final_grades=None, course_apply_assignment_group_weights=None, offer=None, enroll_me=None, course_syllabus_body=None, **request_kwargs):
"""
Create a new course
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) The unique ID of the account to create to course under.
:type account_id: integer
:param course_name: (optional) The name of the course. If omitted, the course will be named "Unnamed Course."
:type course_name: string or None
:param course_course_code: (optional) The course code for the course.
:type course_course_code: string or None
:param course_start_at: (optional) Course start date in ISO8601 format, e.g. 2011-01-01T01:00Z
:type course_start_at: datetime or None
:param course_end_at: (optional) Course end date in ISO8601 format. e.g. 2011-01-01T01:00Z
:type course_end_at: datetime or None
:param course_license: (optional) The name of the licensing. Should be one of the following abbreviations (a descriptive name is included in parenthesis for reference): - 'private' (Private Copyrighted) - 'cc_by_nc_nd' (CC Attribution Non-Commercial No Derivatives) - 'cc_by_nc_sa' (CC Attribution Non-Commercial Share Alike) - 'cc_by_nc' (CC Attribution Non-Commercial) - 'cc_by_nd' (CC Attribution No Derivatives) - 'cc_by_sa' (CC Attribution Share Alike) - 'cc_by' (CC Attribution) - 'public_domain' (Public Domain).
:type course_license: string or None
:param course_is_public: (optional) Set to true if course if public.
:type course_is_public: boolean or None
:param course_is_public_to_auth_users: (optional) Set to true if course is public to authorized users.
:type course_is_public_to_auth_users: boolean or None
:param course_public_syllabus: (optional) Set to true to make the course syllabus public.
:type course_public_syllabus: boolean or None
:param course_public_description: (optional) A publicly visible description of the course.
:type course_public_description: string or None
:param course_allow_student_wiki_edits: (optional) If true, students will be able to modify the course wiki.
:type course_allow_student_wiki_edits: boolean or None
:param course_allow_wiki_comments: (optional) If true, course members will be able to comment on wiki pages.
:type course_allow_wiki_comments: boolean or None
:param course_allow_student_forum_attachments: (optional) If true, students can attach files to forum posts.
:type course_allow_student_forum_attachments: boolean or None
:param course_open_enrollment: (optional) Set to true if the course is open enrollment.
:type course_open_enrollment: boolean or None
:param course_self_enrollment: (optional) Set to true if the course is self enrollment.
:type course_self_enrollment: boolean or None
:param course_restrict_enrollments_to_course_dates: (optional) Set to true to restrict user enrollments to the start and end dates of the course.
:type course_restrict_enrollments_to_course_dates: boolean or None
:param course_term_id: (optional) The unique ID of the term to create to course in.
:type course_term_id: integer or None
:param course_sis_course_id: (optional) The unique SIS identifier.
:type course_sis_course_id: string or None
:param course_integration_id: (optional) The unique Integration identifier.
:type course_integration_id: string or None
:param course_hide_final_grades: (optional) If this option is set to true, the totals in student grades summary will be hidden.
:type course_hide_final_grades: boolean or None
:param course_apply_assignment_group_weights: (optional) Set to true to weight final grade based on assignment groups percentages.
:type course_apply_assignment_group_weights: boolean or None
:param offer: (optional) If this option is set to true, the course will be available to students immediately.
:type offer: boolean or None
:param enroll_me: (optional) Set to true to enroll the current user as the teacher.
:type enroll_me: boolean or None
:param course_syllabus_body: (optional) The syllabus body for the course
:type course_syllabus_body: string or None
:return: Create a new course
:rtype: requests.Response (with Course data)
"""
path = '/v1/accounts/{account_id}/courses'
payload = {
'course[name]' : course_name,
'course[course_code]' : course_course_code,
'course[start_at]' : course_start_at,
'course[end_at]' : course_end_at,
'course[license]' : course_license,
'course[is_public]' : course_is_public,
'course[is_public_to_auth_users]' : course_is_public_to_auth_users,
'course[public_syllabus]' : course_public_syllabus,
'course[public_description]' : course_public_description,
'course[allow_student_wiki_edits]' : course_allow_student_wiki_edits,
'course[allow_wiki_comments]' : course_allow_wiki_comments,
'course[allow_student_forum_attachments]' : course_allow_student_forum_attachments,
'course[open_enrollment]' : course_open_enrollment,
'course[self_enrollment]' : course_self_enrollment,
'course[restrict_enrollments_to_course_dates]' : course_restrict_enrollments_to_course_dates,
'course[term_id]' : course_term_id,
'course[sis_course_id]' : course_sis_course_id,
'course[integration_id]' : course_integration_id,
'course[hide_final_grades]' : course_hide_final_grades,
'course[apply_assignment_group_weights]' : course_apply_assignment_group_weights,
'offer' : offer,
'enroll_me' : enroll_me,
'course[syllabus_body]' : course_syllabus_body,
}
url = request_ctx.base_api_url + path.format(account_id=account_id)
response = client.post(request_ctx, url, payload=payload, **request_kwargs)
return response | fa5aea3872506356a60776093bca4faefe1caea0 | 3,631,820 |
from copy import deepcopy
def addReference(inData, reference):
"""
"""
data = deepcopy(inData)
existing_refs = [x for x in data['relatedIdentifiers'] if x['relationType']=='References']
ref_list = [ x['relatedIdentifier'] for x in existing_refs]
if ( reference not in ref_list):
print(reference, 'is NOT in existing references, adding it.')
else:
print(reference, 'is in existing references, do Noting.')
return None # temporary.
r = {"relatedIdentifier": reference,
"relatedIdentifierType": 'DOI',
"relationType": 'References'}
data['relatedIdentifiers'].append(r)
return data | 85dd0c18966b632a2173c27e913bfe94a4d5ec29 | 3,631,821 |
def len_path_in_limit(p, n=128):
"""if path len in limit, return True"""
return len(p) < n | 988858918109902e662144a6650a33e593ba90b7 | 3,631,822 |
import torch
def threshold(tensor, density):
"""
Computes a magnitude-based threshold for given tensor.
:param tensor: PyTorch tensor
:type tensor: `torch.Tensor`
:param density: Desired ratio of nonzeros to total elements
:type density: `float`
:return: Magnitude threshold
:rtype: `float`
"""
tf = tensor.abs().view(-1)
numel = int(density * tf.numel())
if numel == 0:
raise RuntimeError('Provided density value causes model to be zero.')
topk, _ = torch.topk(tf.abs(), numel, sorted=True)
return topk.data[-1] | d0c5a2726a2df195b0588af8af95dac187f50e1b | 3,631,823 |
def _nt_quote_args(args):
"""Quote command-line arguments for DOS/Windows conventions.
Just wraps every argument which contains blanks in double quotes, and
returns a new argument list.
"""
# XXX this doesn't seem very robust to me -- but if the Windows guys
# say it'll work, I guess I'll have to accept it. (What if an arg
# contains quotes? What other magic characters, other than spaces,
# have to be escaped? Is there an escaping mechanism other than
# quoting?)
for i, arg in enumerate(args):
if ' ' in arg:
args[i] = '"%s"' % arg
return args | a4281afcbc572f02e719f97f92ec30bdf4ddb138 | 3,631,824 |
def algorithm_free_one_only_over_isls(
output_dynamic_state_dir,
time_since_epoch_ns,
satellites,
ground_stations,
sat_net_graph_only_satellites_with_isls,
ground_station_satellites_in_range,
num_isls_per_sat,
sat_neighbor_to_if,
list_gsl_interfaces_info,
prev_output,
enable_verbose_logs
):
"""
FREE-ONE ONLY OVER INTER-SATELLITE LINKS ALGORITHM
"one"
This algorithm assumes that every satellite and ground station has exactly 1 GSL interface.
"free"
This 1 interface is bound to a maximum outgoing bandwidth, but can send to any other
GSL interface (well, satellite -> ground-station, and ground-station -> satellite) in
range. ("free") There is no reciprocation of the bandwidth asserted.
"only_over_isls"
It calculates a forwarding state, which is essentially a single shortest path.
It only considers paths which go over the inter-satellite network, and does not make use of ground
stations relay. This means that every path looks like:
(src gs) - (sat) - (sat) - ... - (sat) - (dst gs)
"""
if enable_verbose_logs:
print("\nALGORITHM: FREE ONE ONLY OVER ISLS")
# Check the graph
if sat_net_graph_only_satellites_with_isls.number_of_nodes() != len(satellites):
raise ValueError("Number of nodes in the graph does not match the number of satellites")
for sid in range(len(satellites)):
for n in sat_net_graph_only_satellites_with_isls.neighbors(sid):
if n >= len(satellites):
raise ValueError("Graph cannot contain satellite-to-ground-station links")
#################################
# BANDWIDTH STATE
#
# There is only one GSL interface for each node (pre-condition), which as-such will get the entire bandwidth
output_filename = output_dynamic_state_dir + "/gsl_if_bandwidth_" + str(time_since_epoch_ns) + ".txt"
if enable_verbose_logs:
print(" > Writing interface bandwidth state to: " + output_filename)
with open(output_filename, "w+") as f_out:
if time_since_epoch_ns == 0:
for node_id in range(len(satellites)):
f_out.write("%d,%d,%f\n"
% (node_id, num_isls_per_sat[node_id],
list_gsl_interfaces_info[node_id]["aggregate_max_bandwidth"]))
for node_id in range(len(satellites), len(satellites) + len(ground_stations)):
f_out.write("%d,%d,%f\n"
% (node_id, 0, list_gsl_interfaces_info[node_id]["aggregate_max_bandwidth"]))
#################################
# FORWARDING STATE
#
# Previous forwarding state (to only write delta)
prev_fstate = None
if prev_output is not None:
prev_fstate = prev_output["fstate"]
# GID to satellite GSL interface index
gid_to_sat_gsl_if_idx = [0] * len(ground_stations) # (Only one GSL interface per satellite, so the first)
# Forwarding state using shortest paths
fstate = calculate_fstate_shortest_path_without_gs_relaying(
output_dynamic_state_dir,
time_since_epoch_ns,
len(satellites),
len(ground_stations),
sat_net_graph_only_satellites_with_isls,
num_isls_per_sat,
gid_to_sat_gsl_if_idx,
ground_station_satellites_in_range,
sat_neighbor_to_if,
prev_fstate,
enable_verbose_logs
)
if enable_verbose_logs:
print("")
return {
"fstate": fstate
} | ca540acb71218579c63f9d19b8f3597fb376488f | 3,631,825 |
def combine_fo_m(m, moved_f):
"""
derate
1 -> available
0 -> not available
rules for combing after moving fo
r -> min(m,fo)
"""
df = pd.DataFrame({"m": m,
"newf": moved_f})
return df.apply(min, axis=1) | 7e966becd686fca955ac77e13b685f85bc3d4e86 | 3,631,826 |
import random
import copy
def modify_drone(solution, simulation):
"""Modifies the drone of a random operation.
...
Parameters:
solution(List[Transportation]): The list of the transportations of the solution
simulation(Simulation): The simulation
Returns:
List[Transportation]: The modified solution
"""
solution = solution.copy()
if simulation.environment.drones_count == 1:
return solution
random_operation = random.randrange(0, len(solution))
new_drone = simulation.random_drone()
# Continues to generate a random drone while the drone generated
# is the same as the drone previously assigned
while solution[random_operation].drone == new_drone:
new_drone = simulation.random_drone()
# Copies the transportation in order to change its drone
# only in the mutated solution
transportation = copy.deepcopy(solution[random_operation])
transportation.drone = new_drone
# Assigns the transportation with the new drone to its position
solution[random_operation] = transportation
return solution | 69debcb5a42e52248b6b8e18c62642f8290126f6 | 3,631,827 |
def keygen():
"""
Generates random RSA keys
"""
a = gen_prime()
b = gen_prime()
if a == b:
keygen()
c = a * b
m = (a - 1) * (b - 1)
e = coPrime(m)
d = mod_inverse(e, m)
return (e, d, c) | e5bb7d6b8c7c52f6328dc3ce1955b513f49d45a4 | 3,631,828 |
def _succ(p, l):
"""
retrieve the successor of p in list l
"""
pos = l.index(p)
if pos + 1 >= len(l):
return l[0]
else:
return l[pos + 1] | 0eea63bd24da4079b9718af437c6d7e38ef25444 | 3,631,829 |
def generate_diff_mos(laygen, objectname_pfix, placement_grid, routing_grid_m1m2, devname_mos_boundary, devname_mos_body,
devname_mos_dmy, m=1, m_dmy=0, origin=np.array([0,0])):
"""generate an analog differential mos structure with dummmies """
pg = placement_grid
rg12 = routing_grid_m1m2
pfix = objectname_pfix
# placement
imbl0 = laygen.relplace(name="I" + pfix + 'BL0', templatename=devname_mos_boundary, gridname=pg, xy=origin)
refi=imbl0
if not m_dmy==0:
imdmyl0 = laygen.relplace(name="I" + pfix + 'DMYL0', templatename=devname_mos_dmy, gridname=pg, refobj=refi, shape=[m_dmy, 1])
refi=imdmyl0
else:
imdmyl0=None
iml0 = laygen.relplace(name="I" + pfix + '0', templatename=devname_mos_body, gridname=pg, refobj=refi, shape=[m, 1])
imr0 = laygen.relplace(name="I" + pfix + '1', templatename=devname_mos_body, gridname=pg, refobj=iml0, shape=[m, 1], transform='MY')
refi=imr0
if not m_dmy==0:
imdmyr0 = laygen.relplace(name="I" + pfix + 'DMYR0', templatename=devname_mos_dmy, gridname=pg, refobj=refi, shape=[m_dmy, 1], transform='MY')
refi=imdmyr0
else:
imdmyr0=None
imbr0 = laygen.relplace(name="I" + pfix + 'BR0', templatename=devname_mos_boundary, gridname=pg, refobj=refi, transform='MY')
mdl=iml0.elements[:, 0]
mdr=imr0.elements[:, 0]
#route
#gate
rgl0=laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=mdl[0].pins['G0'], refobj1=mdl[-1].pins['G0'])
rgr0=laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=mdr[0].pins['G0'], refobj1=mdr[-1].pins['G0'])
for _mdl, _mdr in zip(mdl, mdr):
laygen.via(name=None, xy=[0, 0], refobj=_mdl.pins['G0'], gridname=rg12)
laygen.via(name=None, xy=[0, 0], refobj=_mdr.pins['G0'], gridname=rg12)
#drain
rdl0=laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=mdl[0].pins['D0'], refobj1=mdl[-1].pins['D0'])
rdr0=laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=mdr[-1].pins['D0'], refobj1=mdr[0].pins['D0'])
for _mdl, _mdr in zip(mdl, mdr):
laygen.via(name=None, xy=[0, 1], refobj=_mdl.pins['D0'], gridname=rg12)
laygen.via(name=None, xy=[0, 1], refobj=_mdr.pins['D0'], gridname=rg12)
#source
rs0=laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=mdl[0].pins['S0'], refobj1=mdr[0].pins['S0'])
for _mdl, _mdr in zip(mdl, mdr):
laygen.via(name=None, xy=[0, 0], refobj=_mdl.pins['S0'], gridname=rg12)
laygen.via(name=None, xy=[0, 0], refobj=_mdr.pins['S0'], gridname=rg12)
laygen.via(name=None, xy=[0, 0], refobj=mdl[-1].pins['S1'], gridname=rg12)
laygen.via(name=None, xy=[0, 0], refobj=mdr[-1].pins['S1'], gridname=rg12)
#dmy
if m_dmy>=2:
mdmyl=imdmyl0.elements[:, 0]
mdmyr=imdmyr0.elements[:, 0]
laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=mdmyl[0].pins['D0'], refobj1=mdmyl[-1].pins['D0'])
laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=mdmyr[0].pins['D0'], refobj1=mdmyr[-1].pins['D0'])
laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=mdmyl[0].pins['S0'], refobj1=mdmyl[-1].pins['S0'])
laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=mdmyr[0].pins['S0'], refobj1=mdmyr[-1].pins['S0'])
for _mdmyl, _mdmyr in zip(mdmyl, mdmyr):
laygen.via(name=None, xy=[0, 1], refobj=_mdmyl.pins['D0'], gridname=rg12)
laygen.via(name=None, xy=[0, 1], refobj=_mdmyr.pins['D0'], gridname=rg12)
laygen.via(name=None, xy=[0, 0], refobj=_mdmyl.pins['S0'], gridname=rg12)
laygen.via(name=None, xy=[0, 0], refobj=_mdmyr.pins['S0'], gridname=rg12)
return [imbl0, imdmyl0, iml0, imr0, imdmyr0, imbr0] | d851371ea4c513a4a77661ffb177ef3d41d39189 | 3,631,830 |
def fetch_rrlyrae_templates(**kwargs):
"""Access the RR Lyrae template data (table 1 of Sesar 2010)
These return approximately 23 ugriz RR Lyrae templates, with normalized
phase and amplitude.
Parameters
----------
Returns
-------
templates: :class:`RRLyraeTemplates` object
collection of RRLyrae templates.
Other Parameters
----------------
data_home : str (optional)
Specify the local cache directory for the dataset. If not used, it
will default to the ``astroML`` default location.
url : str (optional)
Specify the URL of the datasets. Defaults to webpage associated with
Sesar 2010.
force_download : bool (optional)
If true, then force re-downloading data even if it is already cached
locally. Default is False.
"""
return RRLyraeTemplates('RRLyr_ugriz_templates.tar.gz', kwargs) | 90b965be26a18481fa60bf1b49a956d90fc559ba | 3,631,831 |
def BVHTreeAndVerticesInWorldFromObj(obj):
"""
Input: Object of Blender type Object
Output: BVH Tree necessary for ray tracing and vertsInWorld = verts in global coordinate system.
"""
mWorld = obj.matrix_world
vertsInWorld = [mWorld @ v.co for v in obj.data.vertices]
bvh = BVHTree.FromPolygons( vertsInWorld, [p.vertices for p in obj.data.polygons] )
return bvh, vertsInWorld | 81154ee936785c14a1228c705190114a9a84fecf | 3,631,832 |
def _readline(ser):
"""Read a line from device on 'ser'.
ser open serial port
Returns all characters up to, but not including, a newline character.
"""
line = bytearray() # collect data in a byte array
while True:
c = ser.read(1)
if c:
if c == b'\n':
break
line += c
return str(line, encoding='utf-8') | 469c5b6afa786d8bf94dec72a918b6df3b3ba4d7 | 3,631,833 |
def axline(x=None,y=None,a=None,b=None,label=None,lab_loc=0,ax=None,plot_kw={},**kwargs):
"""Generalised axis lines.
This function aims to generalise the usage of axis lines calls (axvline/axhline) together and
to allow lines to be specified by a slope/intercept according to the function y=a*x + b.
Parameters
----------
x : int or list, optional
x position(s) in data coordinates for a vertical line(s).
y : int or list, optional
y position(s) in data coordinates for a horizontal line(s).
a : int or list, optional
Slope(s) of diagonal axis line(s), defaults to 1 if not specified when b is given.
b : int or list, optional
Intercept points(s) of diagonal axis line(s), defaults to 0 if not specified when a is given.
label : str, optional
Sets label(s) for line(s) and plots legend.
lab_loc : int, optional
Defines the position of the legend. Defaults as lab_loc=0.
ax : pyplot.Axes, optional
Use the given axes to make the plot, defaults to the current axes.
plot_kw : dict, optional
Passes the given dictionary as a kwarg to the plotting function. Valid kwargs are Line2D properties.
**kwargs: Line2D properties, optional
kwargs are used to specify matplotlib specific properties such as linecolor, linewidth,
antialiasing, etc. A list of available `Line2D` properties can be found here:
https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D
Returns
-------
lines
A list of Line2D objects representing the plotted data.
"""
from matplotlib.pyplot import plot, legend, gca
from .base_func import axes_handler,plot_finalizer,dict_splicer,is_numeric
from warnings import warn
# Handle deprecated variables
deprecated = {'plabel':'label'}
for dep in deprecated:
if dep in kwargs:
warn(f"'{dep}' will be deprecated in future verions, using '{deprecated[dep]}' instead")
if (dep=='plabel'): label = kwargs.pop(dep)
if ax is not None:
old_axes=axes_handler(ax)
else:
ax=gca()
old_axes=ax
if not (any([is_numeric(var) for var in [x,y,a,b]])): # If nothing has been specified
raise TypeError("axline() missing one of optional arguments: 'x', 'y', 'a' or 'b'")
for i, val in enumerate([x,y,a,b]):
if (val is not None):
try: # Test whether the parameter is iterable
temp=(k for k in val)
except TypeError: # If not, convert to a list
if (i == 0): x=[x]
elif (i == 1): y=[y]
elif (i == 2): a=[a]
elif (i == 3): b=[b]
if (x is not None and y is not None): # Check whether both x and y were specified
raise ValueError("'x' and 'y' cannot be both specified")
if (x is not None): # Check conditions if x specified
if (any([a,b])): # Should not specify a or b, if x given.
raise ValueError("'{0}' cannot be specified if x specified".format('a' if a else 'b'))
L=len(x)
if (y is not None): # Check conditions if y specified
if (any([a,b])): # Should not specify a or b, if y given.
raise ValueError("'{0}' cannot be specified if y specified".format('a' if a else 'b'))
L=len(y)
if (a is not None):
if (b is None): # If no intercept specified
b=[0]*len(a) # set b to 0 for all a
else:
if (len(b) == 1):
b=[b[0]]*len(a)
elif (len(b) != len(a)):
if (len(a) == 1):
a=[a[0]]*len(b)
else:
raise ValueError(f"Length of 'a' ({len(a)}) and length of 'b' ({len(b)}) must be equal or otherwise 1")
L=len(a)
elif (b is not None):
if (a is None): # If no slope specified
a=[1]*len(b) # set a to 1 for all b
L=len(b)
if type(label) is not list:
label=[label for i in range(L)]
elif (len(label) != L):
raise ValueError("Length of label list ({0}) must match the number of lines given ({1}).".format(len(label),L))
# Combine the `explicit` plot_kw dictionary with the `implicit` **kwargs dictionary
#plot_par={**plot_kw, **kwargs} # For Python > 3.5
plot_par=plot_kw.copy()
plot_par.update(kwargs)
# Create 'L' number of plot kwarg dictionaries to parse into each plot call
plot_par=dict_splicer(plot_par,L,[1]*L)
lines=[] # Initialising list which contains each line
if (x is not None):
for ii, xx in enumerate(x):
l=ax.axvline(x=xx,**plot_par[ii],label=label[ii])
lines.append(l)
if (y is not None):
for ii, yy in enumerate(y):
l=ax.axhline(y=yy,**plot_par[ii],label=label[ii])
lines.append(l)
if (a is not None):
for ii, pars in enumerate(zip(a,b)):
aa=pars[0]; bb=pars[1]
xLims=ax.get_xlim()
yLims=ax.get_ylim()
lines.append(plot([xLims[0],xLims[1]],[aa*xLims[0]+bb,aa*xLims[1]+bb],label=label[ii],**plot_par[ii]))
ax.set_xlim(xLims)
ax.set_ylim(yLims)
if any(label):
legend(loc=lab_loc)
if ax is not None:
old_axes=axes_handler(old_axes)
return lines[0] if len(lines) == 1 else lines | cb9b5c1bb1b6bdf28c2eec7f9f6e9791533915fc | 3,631,834 |
def getPermCityState(permRecord):
"""Returns a string with the 'location' of the perm.
It is generated from the starting city and starting state.
This important conversion is used in many places and thus
warrants its own commonized utility method.
Input: a CSVRecord/permanent object.
Output: a string"""
m = "getPermCityState:"
sop(9,m,"Entry.")
permStartCity = permRecord.get("Start City")
permStartState = permRecord.get("Start State")
sop(9,m,"startCity=%s startState=%s" % ( permStartCity, permStartState ))
permCityState = "%s, %s" % ( permStartCity.strip('"'), permStartState.strip('"') )
sop(9,m,"Exit. Returning permCityState=%s" % ( permCityState ))
return permCityState | 6d7bc3f6f10fc7f04a22318292a94aad3fa64cae | 3,631,835 |
import click
def direct_group(parent):
"""Direct ldap access CLI group"""
@parent.group()
def direct():
"""Direct access to LDAP data"""
pass
@direct.command()
@click.option('-c', '--cls', help='Object class', required=True)
@click.option('-a', '--attrs', help='Addition attributes',
type=cli.LIST)
@click.argument('rec_dn')
@cli.admin.ON_EXCEPTIONS
def get(rec_dn, cls, attrs):
"""List all defined DNs"""
if not attrs:
attrs = []
try:
# TODO: it is porbably possible to derive class from DN.
klass = getattr(admin, cls)
attrs.extend([elem[0] for elem in klass.schema()])
except AttributeError:
cli.bad_exit('Invalid admin type: %s', cls)
return
entry = context.GLOBAL.ldap.conn.get(
rec_dn, '(objectClass=*)', list(set(attrs)))
formatter = cli.make_formatter(None)
cli.out(formatter(entry))
@direct.command(name='list')
@click.option('--root', help='Search root.')
@cli.admin.ON_EXCEPTIONS
def _list(root):
"""List all defined DNs"""
dns = context.GLOBAL.ldap.conn.list(root)
for rec_dn in dns:
cli.out(rec_dn)
@direct.command()
@cli.admin.ON_EXCEPTIONS
@click.argument('rec_dn', required=True)
def delete(rec_dn):
"""Delete LDAP object by DN"""
context.GLOBAL.ldap.conn.delete(rec_dn)
del get
del delete
return direct | d44e252ab86bc14bf30f6bb3472e6bfe48ff2004 | 3,631,836 |
import os
def listPDBCluster(pdb, ch, sqid=95):
"""Returns the PDB sequence cluster that contains chain *ch* in structure
*pdb* for sequence identity level *sqid*. PDB sequence cluster will be
returned in as a list of tuples, e.g. ``[('1XXX', 'A'), ]``. Note that
PDB clusters individual chains, so the same PDB identifier may appear
twice in the same cluster if the corresponding chain is present in the
structure twice.
Before this function is used, :func:`fetchPDBClusters` needs to be called.
This function will load the PDB sequence clusters for *sqid* automatically
using :func:`loadPDBClusters`."""
assert isinstance(pdb, str) and len(pdb) == 4, \
'pdb must be 4 char long string'
assert isinstance(ch, str) and len(ch) == 1, \
'ch must be a one char long string'
try:
sqid = int(sqid)
except TypeError:
raise TypeError('sqid must be an integer')
if not (30 <= sqid <= 100):
raise ValueError('sqid must be between 30 and 100')
sqid = PDB_CLUSTERS_SQIDS[abs(PDB_CLUSTERS_SQIDS-sqid).argmin()]
PDB_CLUSTERS_PATH = os.path.join(getPackagePath(), 'pdbclusters')
clusters = PDB_CLUSTERS[sqid]
if clusters is None:
loadPDBClusters(sqid)
clusters = PDB_CLUSTERS[sqid]
pdb_ch = pdb.upper() + '_' + ch.upper()
index = clusters.index(pdb_ch)
maxlen = clusters.index('\n')
end = clusters.find('\n', index)
start = clusters.rfind('\n', index-maxlen, end)+1
cluster = clusters[start:end]
return [tuple(item.split('_')) for item in cluster.split()] | 0ea03fba88bdd715691cb63fd7bb1cfa9d0831a8 | 3,631,837 |
def elem_to_Z(sym: str) -> int:
"""
Converts element symbol to atomic number.
Parameters
----------
sym : str
Element string.
Returns
-------
int
Atomic number.
Examples
--------
>>> rd.utils.elem_to_Z('H')
1
>>> rd.utils.elem_to_Z('Br')
35
"""
return SYM_DICT[sym] | 8539658768e25dece01583031e161927c766adc8 | 3,631,838 |
def fn_I_axion_p(omega,xi_11,zeta_11,h_11,c_11,P_nuc,l,v,a,b,beta_11,k2,L_squid, R_squid, L_i, k_i, C_1, L_1, L_2, k_f, N_series,N_parallel):
"""Total axion-induced current through primary circuit, as a function of:
-- angular frequency omega
-- piezoaxionic tensor component xi_11
-- electroaxionic tensor component zeta_11
-- piezoelectric tensor component h_11
-- elastic stiffness tensor component c_11
-- the spin polarization fraction P_nuc
-- crystal thickness l = l_1
-- longitudinal sound speed v
-- transverse aspect ratios a = l_2 / l_1 and b = l_3 / l_1
-- impermittivity tensor component beta_11
-- EM coupling factor k^2
-- dynamical inductance L_squid and resistance R_squid
-- input inductor L_i
-- SQUID coupling factor k_i
-- readout capacitor C_1
-- readout inductor L_1
-- transformer inductor L_2
-- transformer coupling factor k_f
-- number of crystals in series (N_series) and parallel (N_parallel).
"""
V_axion = fn_V_axion(omega,xi_11,zeta_11,l,v,h_11,c_11,N_series,P_nuc)
Z_total = fn_Z_total(omega,l,v,a,b,beta_11,k2,L_squid, R_squid, L_i, k_i, C_1, L_1, L_2, k_f,N_series,N_parallel)
I_axion_p = V_axion / Z_total
return I_axion_p | 8eb0cd4c5e221e425551604677151865bca7f70a | 3,631,839 |
def _C(startmat,endmat):
"""Calculate right Cauchy-Green deformation
tensor to go from start to end
:startmat: ndarray
:endmat: ndarray
:returns: ndarray
"""
F=_F(startmat,endmat)
C=np.dot(F.T,F)
return C | 2f83b7423ecd0611b6f6baf8e015fd8da28ea5e7 | 3,631,840 |
import re
def valid_uuid(uuid):
"""
Check if the given string is a valid uuid
"""
regex = re.compile('^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}\Z', re.I)
match = regex.match(uuid)
return bool(match) | 0fd773851b8aa9ef65fda4a0c9ac0b24fdda8588 | 3,631,841 |
def get_products_stats_data(domains, datespan, interval,
datefield='created_at'):
"""
Number of products by created time
"""
# groups products by the interval and returns the time interval and count
ret = (SQLProduct.objects
.filter(domain__in=domains,
created_at__gte=datespan.startdate,
created_at__lte=datespan.enddate)
.extra({
interval:
"EXTRACT(EPOCH FROM date_trunc('%s', %s))*1000" %
(interval, datefield)})
.values(interval).annotate(Count('id')))
ret = [{"time": int(r[interval]), "count": r['id__count']} for r in ret]
initial = (SQLProduct.objects
.filter(domain__in=domains,
created_at__lt=datespan.startdate)
.count())
return format_return_data(ret, initial, datespan) | 5be8a75bb9157fc1ac000a9c91caa2fd6584133a | 3,631,842 |
def task_schemas_json_orchestrator():
"""Schemas - generate hat-orchestrator JSON schema repository data"""
return _get_task_json(
[schemas_json_dir / 'orchestrator.yaml'],
[src_py_dir / 'hat/orchestrator/json_schema_repo.json']) | 4c988e9efa0a077e64da685b27c3c1281cf9ddf0 | 3,631,843 |
def stopStreaming():
"""
Stop streaming.
Will return an `error` if streaming is not active.
"""
return __createJSON("StopStreaming", {}) | abc164be9756a186d12bc90ebf56eedc9c04aff3 | 3,631,844 |
from bs4 import BeautifulSoup
from typing import Callable
from typing import Union
from typing import Dict
from typing import List
def process_doc(
doc: BeautifulSoup, proc: Callable = None, log: bool = True
) -> Union[None, Dict[str, Union[str, List]]]:
"""
Process soup to extract text in sections recursively
ex return:
{
"title": "Chapter 1",
"text": "",
"sections": [
{
"title": "Section 1",
"text": "",
"sections": [
{
"title": "Subsection 1",
"text" : "this is text\nthis is the second paragraph",
"sections": [],
}
]
}
]
}
:param doc: bs4 document
:param proc: processor function to apply to text before storage (summarizer or other)
:param log: log completion of call
:return: list of sections and all subsections represented as dictionaries
"""
paragraphs = []
output = {"title": None, "text": "", "sections": []}
# populate title
header_el = doc.find("header", recursive=False)
if header_el:
title_el = header_el.find("h1", recursive=False)
output["title"] = get_text(title_el)
# check if we are in dummy section
if output["title"] == "Essential Learning Concepts":
return None
# populate paragraphs
p_els = doc.findAll("p", recursive=False)
for p_el in p_els:
# extract text from p
p_text = get_text(p_el)
paragraphs.append(p_text)
output["text"] = "\n".join(paragraphs)
if proc and callable(proc) and output["text"]:
output["text"] = proc(output["text"])
# populate subsections
section_els = doc.findAll("section", recursive=False)
for sec in section_els:
sub_sec = process_doc(sec, proc=proc)
if sub_sec:
output["sections"].append(sub_sec)
# convenience so we can call on body of document instead of first section
# if root has no title or paragraphs and has only one section, return that section
if not output["title"] and not output["text"] and len(output["sections"]) == 1:
return output["sections"][0]
else:
if log:
print(output["title"])
return output | fb97a6eb1a63bb9ef4cbff6d0abfceca886fbb73 | 3,631,845 |
def gain_corr_double_ExpDecayFunc(t, tau_A, tau_B, amp_A, amp_B, gc):
"""
Specific form of an exponential decay used for flux corrections.
Includes a "gain correction" parameter that is ignored when correcting
the distortions.
"""
y = gc * (1 + amp_A * np.exp(-t / tau_A) + amp_B * np.exp(-t / tau_B))
return y | 998af4a236b0d11893319e59401bada9e70f9957 | 3,631,846 |
def cross_entropy_sequence_loss(logits, targets, sequence_length):
"""Calculates the per-example cross-entropy loss for a sequence of logits and
masks out all losses passed the sequence length.
Args:
logits: Logits of shape `[T, B, vocab_size]`
targets: Target classes of shape `[T, B]`
sequence_length: An int32 tensor of shape `[B]` corresponding
to the length of each input
Returns:
A tensor of shape [T, B] that contains the loss per example, per time step.
"""
with tf.name_scope("cross_entropy_sequence_loss"):
losses = cross_entropy(logits, targets)
##losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
## logits=logits, labels=targets)
# Mask out the losses we don't care about
loss_mask = tf.sequence_mask(
tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))
losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])
return losses | 78ee272b2e6fe7b7f02579fdd0e9f90aab936e76 | 3,631,847 |
def get_common_interior_polygons(polygon, list_of_polygons):
"""Check if polygon resides inside any polygon
in the list_of_polygons.
Parameters
----------
polygon: matplotlib.Polygon
Returns
-------
list_of_common_polygons: list
A filtered list of ids
"""
if isinstance(polygon, Polygon):
polygon = shapelyPolygon(polygon.get_xy()).buffer(0)
list_of_common_polygons = []
for index, outside_polygon in enumerate(list_of_polygons):
if isinstance(outside_polygon, Polygon):
outside_polygon = shapelyPolygon(outside_polygon.get_xy()).buffer(0)
if polygon.is_valid and outside_polygon.is_valid:
if polygon.within(outside_polygon):
list_of_common_polygons.append(index)
return list_of_common_polygons | 489cd8afd61ce8431f253c445266c14b3f8b50f6 | 3,631,848 |
def handle_image_size(input_image: np.ndarray, dimension: tuple):
"""
:param input_image:
:param dimension:
:return:
"""
assert input_image.ndim == 3, (
"Image should have 3 dimension '[HxWxC]'" "got %s",
(input_image.shape,),
)
assert len(dimension) == 2, (
"'dimension' should have 'Hxw' " "got %s",
(dimension,),
)
h, w, _ = input_image.shape
if dimension < (h, w):
random_height, random_width = get_random_crop_x_and_y(dimension, (h, w))
input_image = crop_image(input_image, dimension, (random_height, random_width))
elif dimension > (h, w):
limit = get_pad_limit(dimension, (h, w))
input_image = pad_image(input_image, limit)
return input_image | d4327548130c86e7ffdfa34ad3ed3e72bb510eb4 | 3,631,849 |
def netconvecs_to_listoflists(t_vec, id_vec, minmax=None):
"""
Convert data from NetCon.record(tvec, idvec) vectors into a dict
where the keys of the dict are the ids and the value is a list of
timestamps associated with that id.
:param tvec: Timestamp vector.
:param idvec: Associated ids of each timestamp.
:param min: If specified as a tuple, then the full range of values
will between (min, max) will be inserted as empty lists
if they are not present in id_vec.
.. NOTE: idvec and tvec must be the same length.
Example::
# nclist is a list of NetCons
t_vec = nrn.Vector()
id_vec = nrn.Vector()
for i in range(len(nclist)):
nclist[i].record(t_vec, id_vec, i)
simulate()
:return: The data as a list of lists with each row being the spike
times.
"""
as_dict = netconvecs_to_dict(t_vec, id_vec)
if minmax:
for i in range(minmax[0], minmax[1]+1):
if not i in as_dict:
as_dict[i] = []
return dictspikes_to_listoflists(as_dict) | e3ed2752c963de97379ce5fac7e7adb4bec2e334 | 3,631,850 |
import random
def log_roulette_selection_method(fx_input, optimization_type_input, n_individuals_input, seed_input):
"""Roulette selection method with a twist. Here the evaluation value is processed with a log function. It reduces the difference between individuals, which increases population diversity."""
random.seed(seed_input)
roulette_sum = sum(np.log(fx_input))
roulette_sum = np.array(roulette_sum)
roulette_mean = np.mean(np.log(fx_input))
roulette_max = np.max(np.log(fx_input))
if optimization_type_input == 'max':
fi_over_roulette_sum = np.log(fx_input)/roulette_sum
prob_if_max = fi_over_roulette_sum
roulette_cum_probability = np.cumsum(prob_if_max)
else:
prob_if_min = ((roulette_sum/np.log(fx_input))/(sum(roulette_sum/np.log(fx_input))))
roulette_cum_probability = np.cumsum(prob_if_min)
parents_idxs = []
# Perform trials
n_roulette_spin = 0
while n_roulette_spin < n_individuals_input:
idx = (np.abs(roulette_cum_probability - random.uniform(0, 1))).argmin()
parents_idxs.append(idx)
n_roulette_spin = n_roulette_spin + 1
return parents_idxs | 72a440402b0af2b1a6dcec8aba1116094bc87ecd | 3,631,851 |
import torch
def evaluate(attention_model,x_test,y_test):
"""
cv results
Args:
attention_model : {object} model
x_test : {nplist} x_test
y_test : {nplist} y_test
Returns:
cv-accuracy
"""
attention_model.batch_size = x_test.shape[0]
attention_model.hidden_state = attention_model.init_hidden()
x_test_var = Variable(torch.from_numpy(x_test).type(torch.LongTensor))
y_test_pred,_ = attention_model(x_test_var)
if bool(attention_model.type):
y_preds = torch.max(y_test_pred,1)[1]
y_test_var = Variable(torch.from_numpy(y_test).type(torch.LongTensor))
else:
y_preds = torch.round(y_test_pred.type(torch.DoubleTensor).squeeze(1))
y_test_var = Variable(torch.from_numpy(y_test).type(torch.DoubleTensor))
return torch.eq(y_preds,y_test_var).data.sum()/x_test_var.size(0) | 3216f6092c61f35bb74140ac51ef635f53691e19 | 3,631,852 |
def reorder_cols_df(df, cols):
"""Reorder the columns of a DataFrame to start with the provided list of columns"""
cols2 = [c for c in cols if c in df.columns.tolist()]
cols_without = df.columns.tolist()
for col in cols2:
cols_without.remove(col)
return df[cols2 + cols_without] | 917b0084ba34f8e1b1fc697c4838ff8404a2fc90 | 3,631,853 |
from typing import Union
from pathlib import Path
from typing import Optional
def uri_resolve(base: Union[str, Path], path: Optional[str]) -> str:
"""
Backport of datacube.utils.uris.uri_resolve()
"""
if path:
p = Path(path)
if p.is_absolute():
return p.as_uri()
if isinstance(base, Path):
base = base.absolute().as_uri()
return urljoin(base, path) | d280456a0071edd1cfce60a8d3b17c193c9ba446 | 3,631,854 |
def preprocess_report(rep, rep2):
""" Processes lists containing report grades """
rv = np.asarray([rep], dtype="float32")
rv2 = np.asarray([rep2], dtype="float32")
return rv, rv2 | 628659ba90af516497b005986854b86dde3c6edb | 3,631,855 |
from typing import Any
async def async_check_srv_record(hass: HomeAssistant, host: str) -> dict[str, Any]:
"""Check if the given host is a valid Minecraft SRV record."""
# Check if 'host' is a valid SRV record.
return_value = None
srv_records = None
try:
srv_records = await aiodns.DNSResolver().query(
host=f"{SRV_RECORD_PREFIX}.{host}", qtype="SRV"
)
except (aiodns.error.DNSError):
# 'host' is not a SRV record.
pass
else:
# 'host' is a valid SRV record, extract the data.
return_value = {
CONF_HOST: srv_records[0].host,
CONF_PORT: srv_records[0].port,
}
return return_value | 40aef8e2446669040975a5d15c4be2c28e08b6e1 | 3,631,856 |
def add_923_heat_rate(df):
"""
Small function to calculate the heat rate of records with fuel consumption and net
generation.
Parameters
----------
df : dataframe
Must contain the columns net_generation_mwh and
fuel_consumed_for_electricity_mmbtu
Returns
-------
dataframe
Same dataframe with new column of heat_rate_mmbtu_mwh
"""
# Calculate the heat rate for each prime mover/fuel combination
df["heat_rate_mmbtu_mwh"] = (
df["fuel_consumed_for_electricity_mmbtu"] / df["net_generation_mwh"]
)
return df | 907ac6ba469a65dfe25a84f7498e66b1e0535d19 | 3,631,857 |
def get_local_bounding_box_min_max():
"""Gets an Axis-Aligned Bounding Box for the canonical die model, in local coordinate space."""
return np.array([[-0.49946,-0.48874,-0.52908],
[0.50094,0.51166,0.47132]]).T | be707edf6e1d92726a55f355bbe0052323b9c27b | 3,631,858 |
def get_inception_features(inputs, inception_graph, layer_name="pool_3:0"):
"""Compose the preprocess_for_inception function with TFGAN run_inception."""
preprocessed = preprocess_for_inception(inputs)
return tfgan_eval.run_inception(
preprocessed,
graph_def=inception_graph,
output_tensor=layer_name) | ea6d8772291bb3e6b156f4905e2f22bb1539870c | 3,631,859 |
def v4_multimax(iterable):
"""Return a list of all maximum values.
Bonus 1 - on short solution.
"""
try:
max_item = max(iterable)
except ValueError:
return []
return [
item
for item in iterable
if item == max_item
] | fddeae328993fa77a0b73ab55c4e53a88b42b39c | 3,631,860 |
def sig_beg_to_adj_ground_ht(ds):
"""
Height in meters from GLAS signal beginning to whichever of the two lowest peaks has greater amplitude.
"""
return get_heights_from_distance(
ds, top_metric='sig_begin_dist', bottom_metric='adj_ground_peak_dist'
) | d83aeb47ac7df081f310dc2e445d232326b099b2 | 3,631,861 |
from typing import List
def tree_to_formula(tree: DecisionTreeClassifier, concept_names: List[str], target_class: int) -> str:
"""
Translate a decision tree into a set of decision rules.
:param tree: sklearn decision tree
:param concept_names: concept names
:param target_class: target class
:return: decision rule
"""
tree_ = tree.tree_
feature_name = [
concept_names[i] if i != _tree.TREE_UNDEFINED else "undefined!"
for i in tree_.feature
]
pathto = dict()
global k
global explanation
explanation = ''
k = 0
def recurse(node, depth, parent):
global k
global explanation
indent = " " * depth
if tree_.feature[node] != _tree.TREE_UNDEFINED:
name = feature_name[node]
threshold = tree_.threshold[node]
s = f'{name} < {threshold}'
if node == 0:
pathto[node] = s
else:
pathto[node] = pathto[parent] + ' & ' + s
recurse(tree_.children_left[node], depth + 1, node)
s = f'{name} > {threshold}'
if node == 0:
pathto[node] = s
else:
pathto[node] = pathto[parent] + ' & ' + s
recurse(tree_.children_right[node], depth + 1, node)
else:
k = k + 1
if tree_.value[node].squeeze().argmax() == target_class:
explanation += f'({pathto[parent]}) | '
recurse(0, 1, 0)
return explanation[:-3] | dc0c1d03aab3f5ef458f74665a5a203a53db87ee | 3,631,862 |
import os
def update_user_in_cache(user):
"""Get all users and create Cache files for each user."""
logger.info("Creating user in cache files.")
if ENABLE_USER_CACHING:
try:
if not os.path.exists(USER_CACHE_DIR):
os.makedirs(USER_CACHE_DIR)
# Create file for each user into PVC having details about user
with open(USER_CACHE_DIR + "/" + user["user_id"] + ".json", 'w',
encoding='utf8') as file:
file.write("")
logger.info("Created cache of {} user".format(user["user_id"]))
message = "User cache is created."
except Exception as e:
message = str(e)
else:
message = "User caching is disabled."
return message | 769fbc5f8035943352b951181cd0da30da4c95a3 | 3,631,863 |
from datetime import datetime
def datefix(datestr):
""" transform string into a python datetime object
handle mm/dd/yy or mm/dd/yyyy or dashes instead of slashes """
fix = datestr.replace('-','/')
if len(fix) > 4:
try:
return datetime.strptime(fix, "%m/%d/%y")
except ValueError:
return datetime.strptime(fix, "%m/%d/%Y")
return datetime.utcnow() | 2cb728dfcec24b350d63a79fc3964d3325780b6a | 3,631,864 |
import math
def vector_angle(v1: Vector3D, v2: Vector3D) -> float:
"""
Calculate the angle between two given vectors.
Keyword arguments:
v1 -- First vector
v2 -- Second vector
"""
v1_n = normalize_vector(v1)
v2_n = normalize_vector(v2)
return math.acos(dot_product(v1_n, v2_n) / (vector_norm(v1_n) * vector_norm(v2_n))) | 694b5d49140ae409166bc55c7ecf6c19db8fe5bf | 3,631,865 |
from typing import Counter
def removed_mirrored_association(left_assoc, right_assoc):
"""
Remove the mirrored association (associations like (a, b) and (b, a)) that occurs in the intra-night associations.
The column id used to detect mirrored association are candid.
We keep the associations with the smallest jd in the left_assoc dataframe.
Parameters
----------
left_assoc : dataframe
left members of the intra-night associations
right_assoc : dataframe
right members of the intra-night associations
Returns
-------
drop_left : dataframe
left members of the intra-night associations without mirrored associations
drop_right : dataframe
right members of the intra-night associations without mirrored associations
Examples
--------
>>> test_1 = pd.DataFrame({
... "a" : [1, 2, 3, 4],
... "candid" : [10, 11, 12, 13],
... "jd" : [1, 2, 3, 4]
... })
>>> test_2 = pd.DataFrame({
... "a" : [30, 31, 32, 33],
... "candid" : [11, 10, 15, 16],
... "jd" : [2, 1, 5, 6]
... })
The mirror association is the candid 10 with the 11
(10 is associated with 11 and 11 is associated with 10 if we look between test_1 and test_2)
>>> tt_1, tt_2 = removed_mirrored_association(test_1, test_2)
>>> tt_1_expected = pd.DataFrame({
... "a" : [1,3,4],
... "candid" : [10,12,13],
... "jd" : [1,3,4]
... })
>>> tt_2_expected = pd.DataFrame({
... "a" : [30,32,33],
... "candid" : [11,15,16],
... "jd" : [2,5,6]
... })
>>> assert_frame_equal(tt_1.reset_index(drop=True), tt_1_expected)
>>> assert_frame_equal(tt_2.reset_index(drop=True), tt_2_expected)
>>> df1 = pd.DataFrame({
... "candid" : [1522165813015015004, 1522207623015015004],
... "objectId": ['ZTF21aanxwfq', 'ZTF21aanyhht'],
... "jd" : [2459276.66581, 2459276.707627]
... })
>>> df2 = pd.DataFrame({
... "candid" : [1522207623015015004, 1522165813015015004],
... "objectId": ['ZTF21aanyhht', 'ZTF21aanxwfq'],
... "jd" : [2459276.707627, 2459276.66581]
... })
>>> dd1, dd2 = removed_mirrored_association(df1, df2)
>>> dd1_expected = pd.DataFrame({
... "candid" : [1522165813015015004],
... "objectId" : ['ZTF21aanxwfq'],
... "jd" : [2459276.66581]
... })
>>> dd2_expected = pd.DataFrame({
... "candid" : [1522207623015015004],
... "objectId" : ['ZTF21aanyhht'],
... "jd" : [2459276.707627]
... })
>>> assert_frame_equal(dd1, dd1_expected)
>>> assert_frame_equal(dd2, dd2_expected)
"""
left_assoc = left_assoc.reset_index(drop=True)
right_assoc = right_assoc.reset_index(drop=True)
# concatanates the associations
all_assoc = pd.concat(
[left_assoc, right_assoc], axis=1, keys=["left", "right"]
).sort_values([("left", "jd")])
# function used to detect the mirrored rows
# taken from : https://stackoverflow.com/questions/58512147/how-to-removing-mirror-copy-rows-in-a-pandas-dataframe
def key(x):
"""
Examples
--------
>>> t_list = [10, 11]
>>> key(t_list)
frozenset({(11, 1), (10, 1)})
"""
return frozenset(Counter(x).items())
mask = (
all_assoc[[("left", "candid"), ("right", "candid")]]
.apply(key, axis=1)
.duplicated()
)
# remove the mirrored duplicates by applying the mask to the dataframe
drop_mirrored = all_assoc[~mask]
left_a, right_a = restore_left_right(drop_mirrored, len(left_assoc.columns.values))
return left_a, right_a | 1f3bcd16c0f8321ba43d2f47163f59d7c0b12f26 | 3,631,866 |
import tqdm
import time
def sample_trajectory(smc_N, alpha, beta, radius, n_samples, seq_dist,
jt_traj=None, debug=False, reset_cache=True):
""" A particle Gibbs implementation for approximating distributions over
junction trees.
Args:
smc_N (int): Number of particles in SMC in each Gibbs iteration
n_samples (int): Number of Gibbs iterations (samples)
alpha (float): sparsity parameter for the Christmas tree algorithm
beta (float): sparsity parameter for the Christmas tree algorithm
radius (float): defines the radius within which ned nodes are selected
seq_dist (SequentialJTDistributions): the distribution to be sampled from
Returns:
Trajectory: Markov chain of the underlying graphs of the junction trees sampled by pgibbs.
"""
graph_traj = mcmctraj.Trajectory()
graph_traj.set_sampling_method({"method": "pgibbs",
"params": {"N": smc_N,
"alpha": alpha,
"beta": beta,
"radius": radius}})
graph_traj.set_sequential_distribution(seq_dist)
neig_set_cache = {}
(trees, log_w) = (None, None)
prev_tree = None
for i in tqdm(range(n_samples), desc="Particle Gibbs samples"):
if reset_cache is True:
seq_dist.cache = {}
start_time = time.time()
if i == 0:
#start_graph = nx.Graph()
#start_graph.add_nodes_from(range(seqdist.p))
#start_tree = dlib.junction_tree(start_graph)
(trees, log_w) = approximate(smc_N, alpha, beta, radius, seq_dist, neig_set_cache=neig_set_cache)
else:
# Sample backwards trajectories
perm_traj = sp.backward_perm_traj_sample(seq_dist.p, radius)
T_traj = trilearn.graph.junction_tree_collapser.backward_jt_traj_sample(perm_traj,
prev_tree)
(trees, log_w, Is) = approximate_cond(smc_N,
alpha,
beta,
radius,
seq_dist,
T_traj,
perm_traj, neig_set_cache=neig_set_cache)
# Sample T from T_1..p
log_w_array = np.array(log_w.T)[seq_dist.p - 1]
log_w_rescaled = log_w_array - max(log_w_array)
w_rescaled = np.exp(log_w_rescaled)
norm_w = w_rescaled / sum(w_rescaled)
I = np.random.choice(smc_N, size=1, p=norm_w)[0]
T = trees[I]
prev_tree = T
graph = jtlib.graph(T)
end_time = time.time()
graph_traj.add_sample(graph, end_time - start_time)
return graph_traj | 6ee321c6cfe183c562fbb9b3d7c78a8ef7d36992 | 3,631,867 |
import re
def valgrind_supports_exit_early():
"""Checks if we support early exit from valgrind"""
version = helpers.run_subprocess(['valgrind', '--version'])
match = re.match(r'valgrind-(\d)\.(\d+).*', version)
if match:
return int(match.group(2)) >= 14
return False | a33d9795587b2f678c88d58ec058aead215b36fb | 3,631,868 |
import csv
import sys
def read_rows(input_file, expected_fields):
"""Read the input_file as a CSV; validate that the expected_fields are
present. Sys.exit if not
:return: pair of list of dicts (rows in the CSV), and a list of headers
found in the input_file"""
reader = csv.DictReader(input_file)
all_rows = [d for d in reader]
for field in expected_fields:
if field not in reader.fieldnames:
logger.error('Field "%s" not present in CSV', field)
sys.exit(1)
return all_rows, reader.fieldnames | ab5a8d08a792db43e4569ac71417ff53783dba94 | 3,631,869 |
from typing import List
import hashlib
def document_etag(value: dict, ignore_fields: List[str] = None) -> str:
"""Computes and returns a valid ETag for the input value."""
h = hashlib.sha1()
h.update(dumps(value, sort_keys=True).encode("utf-8"))
return h.hexdigest() | 5415ee356f610728d764139eb1813f987f1bcce3 | 3,631,870 |
def answer(request):
"""
Save the answer.
GET parameters:
html:
turn on the HTML version of the API
BODY
json in following format:
{
"answer": #answer, -- for one answer
"answers": [#answer, #answer, #answer ...] -- for multiple answers
}
answer = {
"answer_class": str, -- class of answer to save (e.g., flashcard_answer)
"response_time": int, -- response time in milliseconds
"meta": "str" -- optional information
"time_gap": int -- waiting time in frontend in seconds
... -- other fields depending on aswer type
(see from_json method of Django model class)
}
"""
if request.method == 'GET':
return render(request, 'models_answer.html', {}, help_text=answer.__doc__)
elif request.method == 'POST':
practice_filter = get_filter(request)
practice_context = PracticeContext.objects.from_content(practice_filter)
saved_answers = _save_answers(request, practice_context, True)
return render_json(request, saved_answers, status=200, template='models_answer.html')
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | 61ed14331bd682cdf85b428dfb89695c35237087 | 3,631,871 |
def valid_client_model(initialize_db):
"""
A fixture for creating a valid client model.
Args:
initialize_db (None): initializes the database and drops tables when
test function finishes.
"""
return Client(username='Leroy Jenkins', avatar_url='').save() | cc9a7d3bea9f50a5250d1fe98781af5441fba492 | 3,631,872 |
def merge_features(df: pd.DataFrame)-> pd.DataFrame:
"""
Merges features that estimate the same thing
"""
# kelvin conversions
df['station_max_temp_c'] += 273.15
df['station_min_temp_c'] += 273.15
df['station_avg_temp_c'] += 273.15
df = (df
.fillna(method = 'backfill')
.assign( # create average estimates for data
ndvi_n = lambda x:
(x['ndvi_ne'] + x['ndvi_nw']) / 2,
ndvi_s = lambda x:
(x['ndvi_se'] + x['ndvi_sw']) / 2,
max_temp_k = lambda x:
(x['station_max_temp_c'] + x['reanalysis_max_air_temp_k']) /2,
min_temp_k = lambda x:
(x['station_min_temp_c'] + x['reanalysis_min_air_temp_k']) /2,
avg_temp_k = lambda x:
(x['station_avg_temp_c'] + x['reanalysis_avg_temp_k'] + x['reanalysis_air_temp_k']) / 3,
precip_mm = lambda x:
(x['station_precip_mm'] + x['reanalysis_sat_precip_amt_mm']) / 2,
diur_temp_rng_k = lambda x:
(x['station_diur_temp_rng_c'] + x['reanalysis_tdtr_k']) / 2
)
.drop( # features for which we created average estimates
[
'ndvi_ne', 'ndvi_nw', 'ndvi_se', 'ndvi_sw',
'station_max_temp_c', 'reanalysis_max_air_temp_k',
'station_min_temp_c', 'reanalysis_min_air_temp_k',
'station_avg_temp_c', 'reanalysis_avg_temp_k', 'reanalysis_air_temp_k',
'station_precip_mm', 'reanalysis_sat_precip_amt_mm',
'station_diur_temp_rng_c', 'reanalysis_tdtr_k'
],
axis = 1
)
)
return df | 33c085a0b4defddcfc3310644ce371a9532dea1b | 3,631,873 |
def threatActorSTIX(adversaries):
"""
Parse the adversaries key to convert it to STIX
"""
adversariesList = []
for adv in adversaries: adversariesList.append(ThreatActor(name="%s"%(adv))) if len(adversaries) >= 1 else adversariesList.append(ThreatActor(name="%s"%(adversaries[0])))
return adversariesList | 723224b0c271ea0c7b9cedb99cfd27557cd9a5f1 | 3,631,874 |
import scipy
def merge_channels(data, sampling_rate, filter_data: bool = True):
"""Merge channels based on a running maximum.
Args:
data (ndarray): [samples, channels]
sampling_rate (num): in Hz
Returns:
ndarray: merged across
"""
data = np.array(data) # ensure data is an np.array (and not dask) - otherwise np.interp will fail
mask = ~np.isfinite(data) # remove all nan/inf data
data[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), data[~mask])
# band-pass filter out noise on each channel
b, a = scipy.signal.butter(6, (25, 1500), btype='bandpass', fs=sampling_rate)
data = scipy.signal.filtfilt(b, a, data, axis=0, method='pad')
# find loudest channel in 101-sample windows
if filter_data:
sng_max = maximum_filter1d(np.abs(data), size=101, axis=0)
loudest_channel = np.argmax(sng_max, axis=-1)
# get linear index and merge channels
idx = np.ravel_multi_index((np.arange(sng_max.shape[0]), loudest_channel), data.shape)
data_merged_max = data.ravel()[idx]
data_merged_max = data_merged_max[:, np.newaxis] # shape needs to be [nb_samples, 1]
return data_merged_max | 6d6df0ef40ee350786b6ef19396b889f08e945cc | 3,631,875 |
def maximalEigenvector(A):
""" using the eig function to compute eigenvectors """
n = A.shape[1]
_,v = np.linalg.eig(A)
return abs(np.real(v[:n,0])/np.linalg.norm(v[:n,0],1)) | b45b9a1b7b44b98575c9ca282cdb3e4ef1cb58f2 | 3,631,876 |
from operator import sub
def remove_hyperlinks(text):
"""Remove hyperlinks from text."""
# If text is empty, return None.
if not text: return None
# If is tokenized, merge tokens.
if is_tokenized(text):
was_tokenized = True
normalized_text = merge_tokens(text)
else:
was_tokenized = False
normalized_text = text
# Replace hyperlinks with space.
normalized_text = sub(get_hyperlink_pattern(), r' ', normalized_text)
# Then remove multiple adjacent spaces.
normalized_text = sub(' +', ' ', normalized_text)
# Strip text.
normalized_text = normalized_text.strip()
# If text was tokenized, re-tokenize text.
if was_tokenized:
normalized_text = word_tokenize(normalized_text)
# Return normalized text.
return normalized_text | 5b1ee46644cb12365b4f4939cfb0ac6d00eebcea | 3,631,877 |
import os
import copy
def read_struct_file(struct_file, return_type=GeoStruct):
"""read an existing PEST-type structure file into a GeoStruct instance
Args:
struct_file (`str`): existing pest-type structure file
return_type (`object`): the instance type to return. Default is GeoStruct
Returns:
[`pyemu.GeoStruct`]: list of `GeoStruct` instances. If
only one `GeoStruct` is in the file, then a `GeoStruct` is returned
Example::
gs = pyemu.utils.geostats.reads_struct_file("struct.dat")
"""
VARTYPE = {1: SphVario, 2: ExpVario, 3: GauVario, 4: None}
assert os.path.exists(struct_file)
structures = []
variograms = []
with open(struct_file, "r") as f:
while True:
line = f.readline()
if line == "":
break
line = line.strip().lower()
if line.startswith("structure"):
name = line.strip().split()[1]
nugget, transform, variogram_info = _read_structure_attributes(f)
s = return_type(nugget=nugget, transform=transform, name=name)
s.variogram_info = variogram_info
# not sure what is going on, but if I don't copy s here,
# all the structures end up sharing all the variograms later
structures.append(copy.deepcopy(s))
elif line.startswith("variogram"):
name = line.strip().split()[1].lower()
vartype, bearing, a, anisotropy = _read_variogram(f)
if name in variogram_info:
v = VARTYPE[vartype](
variogram_info[name],
a,
anisotropy=anisotropy,
bearing=bearing,
name=name,
)
variograms.append(v)
for i, st in enumerate(structures):
for vname in st.variogram_info:
vfound = None
for v in variograms:
if v.name == vname:
vfound = v
break
if vfound is None:
raise Exception(
"variogram {0} not found for structure {1}".format(vname, s.name)
)
st.variograms.append(vfound)
if len(structures) == 1:
return structures[0]
return structures | d7bed84565e48b7ee817ecab8ba62e2b988b4023 | 3,631,878 |
def _check_nx(path):
"""NX - This mitigation technique attempts to mark
as the binary as non-executable memory. E.g. An attacker
can't as easily fill a buffer with shellcode and jump
to the start address. It is common for this to be disabled
for things like JIT interpreters.
"""
headers = _elf_prog_headers(path)
if headers is None:
return (None, Result.CONF_GUESS)
for line in headers.split(b'\n'):
if b'GNU_STACK' in line and b'RWE' not in line:
return (True, Result.CONF_SURE)
return (False, Result.CONF_SURE) | 1dfd1c14e7b49e211c7a6a42e10bd3201dc5262b | 3,631,879 |
def verify_auth(username, password):
""" Verify the HTTP Basic Auth credentials """
config = app.config
return username == config['USERNAME'] and password == config['PASSWORD'] | 18960b0355f158b601e4097694eb1b417715227c | 3,631,880 |
def mat_list_to_rf_array(mats_list: list) -> (np.ndarray, dict):
"""Make an RF array from a list of mats"""
rf_array = np.array(
[open_rf(x) for x in mats_list]
)
parameters = open_parameters(mats_list[0])
return rf_array, parameters | 3b6a84b76a096eabe0c14183d23a795da8c742f1 | 3,631,881 |
def show_options(last_row):
"""
Show the options. The user can choose what to do.
last_row: the last row in the worksheet (list).
"""
while True:
choose = input('What to do? (Q)uit/(L)ist/(N)ew [N]: ')
if choose is '' or choose.lower()[0] is 'n':
return True
elif choose.lower()[0] is 'q':
return False
elif choose.lower()[0] is 'l':
last_row = print_last_rows() | 1d20d5de02f6011cfe4d4f635d43ee097cfc6568 | 3,631,882 |
def is_pilot_snipe(sortie):
"""
A pilot snipe is when a plane goes down because the pilot gets killed, and not because the aircraft is crtically
damaged. Currently, in the logs, a pilot snipe looks rather similar to a normal death. Even in a pilot snipe,
the logs think the aircraft gets shotdown before the pilot dies - i.e. it emits "plane shotdown" before "pilot dead"
Instead the logs sees to relay the information that it was a pilot snipe by "damage to the pilot". I.e. a pilot
snipe has "damage to pilot X by plane Y" events, whereas a death due to a not pilot snipe has "damgage to pilot X
without a plane" events.
So, to check for pilot snipe we check:
1. The pilot must have died to a player/AI object.
2. That the death didn't happen much later than the shotdown, otherwise it could've been someone strafing a plane
which was already dead.
3. That the shotdown didn't happen much later than the last damage to pilot event, otherwise it could be as above.
4. That there was sufficent damage to the pilot from enemy planes to cause a death to the pilot.
If all 4 conditions are satisified, then it's a pilot snipe.
"""
death_event = (LogEntry.objects
.filter(Q(cact_sortie_id=sortie.id),
Q(type='killed'), act_object_id__isnull=False))
shotdown_event = (LogEntry.objects
.filter(Q(cact_sortie_id=sortie.id),
Q(type='killed'), act_object_id__isnull=False))
wound_events = (LogEntry.objects
.filter(Q(cact_sortie_id=sortie.id),
Q(type='wounded'), act_object_id__isnull=False)
.order_by('-tik'))
if not death_event.exists() or not shotdown_event.exists() or not wound_events.exists():
# Condition 1 in function description.
return False
death_event = death_event[0]
shotdown_event = shotdown_event[0]
if death_event.tik - shotdown_event.tik > 20:
# Condition 2 in function description
# Threshold is 20 tiks = 0.4 seconds.
return False
if wound_events[0].tik - shotdown_event.tik > 20:
# Condition 3 in function description.
# Threshold is 20 tiks = 0.4 seconds.
return False
wound_damage = 0
for wound_event in wound_events:
if type(wound_event.extra_data['damage']) is dict:
wound_damage += wound_event.extra_data['damage']['pct']
else:
wound_damage += wound_event.extra_data['damage']
return wound_damage > 0.95 | 112770e8dceb339af7f67bb074739c4066b8121d | 3,631,883 |
import os
def merge_meta(meta: dict) -> dict:
"""
merge data for api: get meta config.
"""
modules = meta.pop(CONFIG_MODULE)
_meta = {meta.pop(CONFIG_NAME): meta}
for meta_name, detail in modules.items():
if SOURCE_META in detail.keys():
# meta config
file_path = os.getcwd()
for item in detail[SOURCE_META].split('.'):
file_path = os.path.join(file_path, item)
file_path = f'{file_path}.json'
slave_meta = json_load(file_path)
module_meta = merge_meta(slave_meta)
_meta.update(**module_meta)
if SOURCE_MODEL in detail.keys():
# model config
module_model = {meta_name: {CONFIG_TYPE: "", CONFIG_META: {}, CONFIG_API: {}}}
# module_model = {"name": meta_name, "type": "", "meta": {}, "api": {}}
cls = get_class_from_path(detail[SOURCE_MODEL])
for item in cls._meta.fields:
field_type = MODEL_FIELD_MAPPING[item.__class__]
field_config = {
FIELD_TITLE: item.verbose_name,
FIELD_TYPE: {
REVERSE_META_FIELD_MAPPING[field_type]: {}
}
}
module_model[meta_name][SOURCE_META].update(**{item.name: field_config})
_meta.update(**module_model)
return _meta | c2f76d82881a4b7fb49be4525734c2e984a35bcb | 3,631,884 |
import os
def load_messages(language):
"""Load translation messages for given language from all `setup_wizard_requires`
javascript files"""
frappe.clear_cache()
set_default_language(get_language_code(language))
frappe.db.commit()
m = get_dict("page", "setup-wizard")
for path in frappe.get_hooks("setup_wizard_requires"):
# common folder `assets` served from `sites/`
js_file_path = os.path.abspath(frappe.get_site_path("..", *path.strip("/").split("/")))
m.update(get_dict("jsfile", js_file_path))
m.update(get_dict("boot"))
send_translations(m)
return frappe.local.lang | 1e778ecf5ec28cc16fc031529978d368e79ba1b7 | 3,631,885 |
def change_lang(request):
"""
Change current documentation language.
"""
lang = request.GET.get('lang_code', 'en')
response = redirect('/')
portal_helper.set_preferred_language(request, response, lang)
return response | 8b9ffce5b15159d3dad0c565d3caac8ed2a4fd71 | 3,631,886 |
import re
def cleanHtml(sentence):
"""
remove all Html canvas from the sentence
:param sentence {str} sentence
:return:
{str}: sentence without html canvas
"""
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, ' ', str(sentence))
return cleantext | 1a3edcd7227468f8f3102525538a728a9bc93fc0 | 3,631,887 |
def simple_decoder_fn_train(encoder_state, name=None):
""" Simple decoder function for a sequence-to-sequence model used in the
`dynamic_rnn_decoder`.
The `simple_decoder_fn_train` is a simple training function for a
sequence-to-sequence model. It should be used when `dynamic_rnn_decoder` is
in the training mode.
The `simple_decoder_fn_train` is called with a set of the user arguments and
returns the `decoder_fn`, which can be passed to the `dynamic_rnn_decoder`,
such that
```
dynamic_fn_train = simple_decoder_fn_train(encoder_state)
outputs_train, state_train = dynamic_rnn_decoder(
decoder_fn=dynamic_fn_train, ...)
```
Further usage can be found in the `kernel_tests/seq2seq_test.py`.
Args:
encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.
name: (default: `None`) NameScope for the decoder function;
defaults to "simple_decoder_fn_train"
Returns:
A decoder function with the required interface of `dynamic_rnn_decoder`
intended for training.
"""
with ops.name_scope(name, "simple_decoder_fn_train", [encoder_state]):
pass
def decoder_fn(time, cell_state, cell_input, cell_output, context_state):
""" Decoder function used in the `dynamic_rnn_decoder` with the purpose of
training.
Args:
time: positive integer constant reflecting the current timestep.
cell_state: state of RNNCell.
cell_input: input provided by `dynamic_rnn_decoder`.
cell_output: output of RNNCell.
context_state: context state provided by `dynamic_rnn_decoder`.
Returns:
A tuple (done, next state, next input, emit output, next context state)
where:
done: `None`, which is used by the `dynamic_rnn_decoder` to indicate
that `sequence_lengths` in `dynamic_rnn_decoder` should be used.
next state: `cell_state`, this decoder function does not modify the
given state.
next input: `cell_input`, this decoder function does not modify the
given input. The input could be modified when applying e.g. attention.
emit output: `cell_output`, this decoder function does not modify the
given output.
next context state: `context_state`, this decoder function does not
modify the given context state. The context state could be modified when
applying e.g. beam search.
"""
with ops.name_scope(name, "simple_decoder_fn_train",
[time, cell_state, cell_input, cell_output,
context_state]):
if cell_state is None: # first call, return encoder_state
return (None, encoder_state, cell_input, cell_output, context_state)
else:
return (None, cell_state, cell_input, cell_output, context_state)
return decoder_fn | e23c0d47096b2234e670ce7720f1936c4ee7b7b7 | 3,631,888 |
def explain_point_local(data_row, neighbors, oversampled_data, model_features, categorical_features, numeric_features, budget=999, show_pos_neg = False):
"""
Provides explanations on each point in the selected subset for local explanations.
Parameters:
-----------------
data_row: integer, the index of each individual data-point in the subset
neighbors: array, list of nearest neighbors of the point
oversampled_data: nD array, approximated neighborhood of the point
model_features: list, name of all features of the model
categorical_features: list, name of categorical features
numeric_features: name of all numeric features
budget: integer, number of features to be displayed as output
show_pos_neg: boolean, show both positive and negative influences of features
Returns:
-----------------
corr_feat_dist: nD array, correlations between the features contribution and distances
feature_dict: dict, dictionary of feature influences
feature_distance_contribution: nD array, contribution of each features in the relative distances between points
dvs_matrix: nD array, distance variance score matrix
sorted_indexes: nD array, sorted indexes of data points according to proximity
"""
distances = []
#weights = []
cat_features = []
if len(categorical_features) != 0:
for i in range (0, oversampled_data.shape[1]):
cat_features.append(True) if i in categorical_features else cat_features.append(False)
# for index in range (0,len(oversampled_data.T)):
# weights.append(1/float(len(oversampled_data.T)))
for index in range(0, len(oversampled_data)):
point_vectors = np.array([oversampled_data[0], oversampled_data[index]])
if len(categorical_features) == 0:
#distances.append(pdist(point_vectors,wminkowski,2, weights)[0])
distances.append(pdist(point_vectors)[0])
else:
#distances.append(Distances.gower_distances(point_vectors, w=weights, categorical_features=cat_features)[0][1])
distances.append(gower_distances(point_vectors, categorical_features=cat_features)[0][1])
sorted_indexes = np.argsort(distances)
sorted_distances = np.sort(distances)
## Order neighbors based on sorted positions
sorted_neighbors = np.empty((0,data_row.shape[0]), float)
for index in sorted_indexes:
sorted_neighbors = np.append(sorted_neighbors , np.array([oversampled_data[index,:]]), axis=0)
## calculate feature difference matrix
feature_difference = np.zeros((sorted_neighbors.shape[0]-1, sorted_neighbors.shape[1]))
for col_index in range(0,sorted_neighbors.shape[1]):
if col_index in numeric_features:
for row_index in range(0,sorted_neighbors.shape[0]-1):
feature_difference[row_index][col_index] = (abs(sorted_neighbors[row_index+1][col_index] - sorted_neighbors[row_index][col_index])
/ (np.max(sorted_neighbors[:,col_index])-np.min(sorted_neighbors[:,col_index])))
else:
for row_index in range(0,sorted_neighbors.shape[0]-1):
feature_difference[row_index][col_index] = 0 if sorted_neighbors[row_index+1][col_index] == sorted_neighbors[row_index][col_index] else 1
## calculate the feature contribution matrix and distance variance score matrix
feature_distance_contribution = np.zeros((sorted_neighbors.shape[0]-1, sorted_neighbors.shape[1]))
dvs_matrix = np.zeros((sorted_neighbors.shape[0]-1, sorted_neighbors.shape[1]))
for col_index in range(0,sorted_neighbors.shape[1]):
for row_index in range(0,sorted_neighbors.shape[0]-1):
feature_distance_contribution[row_index][col_index] = feature_difference[row_index][col_index] / sorted_distances[row_index]
dvs_matrix[row_index][col_index] = feature_distance_contribution[row_index][col_index] * np.var(sorted_neighbors[:,col_index])
## calculate feature covariance with distances -> Experiment a little bit with this
feature_distance_contribution = np.nan_to_num(feature_distance_contribution)
corr_feat_dist = np.zeros(feature_difference.shape[1])
for col_index in range(0,feature_difference.shape[1]):
corr_feat_dist[col_index] = np.corrcoef(feature_difference[:,col_index],sorted_distances[:-1])[0][1]
if budget != 999 and show_pos_neg == False:
sub_features = [np.argsort(abs(corr_feat_dist), axis=0)[::-1][x] for x in range(0,budget)]
feature_dict= dict(zip([model_features[x] for x in sub_features], [corr_feat_dist[x] for x in sub_features]))
elif budget != 999 and show_pos_neg == True:
sub_features = [np.argsort(corr_feat_dist, axis=0)[::-1][x] for x in range(0,len(corr_feat_dist))]
feature_dict= dict(zip([model_features[x] for x in sub_features[0:int(budget/2)]], [corr_feat_dist[x] for x in sub_features[0:int(budget/2)]]))
feature_dict.update(zip([model_features[x] for x in sub_features[(len(sub_features)-int(budget/2)): len(sub_features)]], [corr_feat_dist[x] for x in sub_features[(len(sub_features)-int(budget/2)): len(sub_features)]]))
else:
feature_dict= dict(zip(model_features, corr_feat_dist))
return corr_feat_dist, feature_dict, feature_distance_contribution, dvs_matrix, sorted_indexes | dd4ef3aafbe7ac97d4bead509e7f746554b4f015 | 3,631,889 |
def pots_scan(n_src, ele_lims, true_csd_xlims,
total_ele, ele_pos, R_init=0.23):
"""
Investigates kCSD reconstructions for unitary potential on different
electrodes
Parameters
----------
n_src: int
Number of basis sources.
ele_lims: list
Boundaries for electrodes placement.
true_csd_xlims: list
Boundaries for ground truth space.
total_ele: int
Number of electrodes.
ele_pos: numpy array
Electrodes positions.
Returns
-------
obj_all: class object
eigenvalues: numpy array
Eigenvalues of k_pot matrix.
eigenvectors: numpy array
Eigen vectors of k_pot matrix.
"""
obj_all = []
est_csd = []
for i, value in enumerate(ele_pos):
pots = np.zeros(len(ele_pos))
pots[i] = 1
pots = pots.reshape((len(ele_pos), 1))
obj = KCSD1D(ele_pos, pots, src_type='gauss', sigma=0.3, h=0.25,
gdx=0.01, n_src_init=n_src, ext_x=0, xmin=0, xmax=1,
R_init=R_init)
est_csd.append(obj.values('CSD'))
obj_all.append(obj)
return obj_all, est_csd | 442b093422907801858efeba6e493ebf0c8e82c6 | 3,631,890 |
from typing import Any
def add_nav_entry(mkdocs_settings, nav_entry: NavEntry) -> Any:
"""
Add an additional entry to the Nav in mkdocs.yml
Args:
mkdocs_settings (): The mkdocs settings to update.
nav_entry (NavEntry): The NavEntry to add
Returns:
The updated mkdocs_settings
"""
mkdocs_settings = deepcopy(mkdocs_settings)
nav = mkdocs_to_navlist(mkdocs_settings["nav"]) + [nav_entry]
# we need to deduplicate
nav = list(unique_everseen(nav))
mkdocs_nav = navlist_to_mkdocs(nav)
mkdocs_settings["nav"] = mkdocs_nav
return mkdocs_settings | 06899c76b1788096b88237f3f12f6ef7cd786191 | 3,631,891 |
import re
def is_guid(value):
"""
проверяет на наличие только [a-zA-z/-]
"""
if re.match("^[A-Za-z0-9_-]*$", value):
return value
return None | ca9c84ebfe271d93bd7c8d3043f8dd1849fb3239 | 3,631,892 |
def load_scikit_learn_model(model_uri):
"""
Load a scikit-learn model from a local file.
:param model_uri: The location, in URI format, of the aiflow model, for example:
- ``/Users/aiflow/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
:return: A scikit-learn model.
"""
return _load_model_from_local_file(path=model_uri) | d10d22ec1f5eb659a18c4720860e72aa1a03a387 | 3,631,893 |
def epimorphism_in_laurent(tri, angle, cycles, ZH):
"""
The argument cycles specifies a group epimorphism from the
manifold to the filled manifold. This function returns the image
of the generators of the group ring under the induced epimorphism.
"""
n = tri.countTetrahedra()
S,U,V = faces_in_smith(tri, angle, []) # basis before filling, so no cycles
r = rank_of_quotient(S)[0]
S2, U2, V2 = faces_in_smith(tri, angle, cycles) # basis after filling
r2 = rank_of_quotient(S2)[0]
A = U.inverse().delete_columns(range(n+1-r))
B = U2.delete_rows(range(n+1-r2))
image_on_gens = (B*A).columns()
image_on_gens = [tuple(col) for col in image_on_gens]
image_in_laurent = [ZH( { image_on_gens[i]:1 } ) for i in range(r)]
return image_in_laurent | 8563c400ecc9420682144300be1a990df531b861 | 3,631,894 |
def merge_config(a, b):
"""Merges config b in a."""
for key, b_value in b.items():
if not isinstance(b_value, dict):
a[key] = b_value
else:
a_value = a.get(key)
if a_value is not None and isinstance(a_value, dict):
merge_config(a_value, b_value)
else:
a[key] = b_value
return a | 2e194d9b19c2270968cd205062b4d3ec992cfced | 3,631,895 |
def tsi_moving_average(df, periods=7):
"""Function calculating Moving Average (MA) for TSI
Args:
df (pandas.DataFrame): Quotes with TSI values
periods (int, optional): The number of periods from which MA is calculated. Defaults to 7.
Returns:
pandas.DataFrame: Quotes extended by the calculated TSI MA
"""
ma = pd.Series(df["TSI"].rolling(periods, min_periods=periods).mean(), name="TSI_MA")
df = df.join(ma.round(4))
return df | 5491dc1a82b26d152baaa7d9a53048d63a658f69 | 3,631,896 |
def fit_index(dataset, list_variables):
""" Mapping between index and category, for categorical variables
For each (categorical) variable, create 2 dictionaries:
- index_to_categorical: from the index to the category
- categorical_to_index: from the category to the index
Parameters
----------
dataset: pandas.core.frame.DataFrame
DataFrame with (partly) categorical variables
list_variables: list(str)
List of variable names to index
Returns
-------
index: dict
For each categorical column, we have the 2 mappings: idx2cat & idx2cat
"""
index = dict()
for icol in list_variables:
if icol not in dataset.columns:
raise RuntimeError(f'{icol} not found in dataframe')
idx2cat = {ii: jj for ii, jj in enumerate(dataset.loc[:, icol].unique())}
cat2idx = {jj: ii for ii, jj in idx2cat.items()}
index[icol] = {
'index_to_categorical': idx2cat,
'categorical_to_index': cat2idx
}
return index | 7b8c73a5d23de2e537c1f28078d2e032095d6b1c | 3,631,897 |
def theoretical_motion(input, g):
"""
Compute the theoretical projectile motion.
Args:
input: ndarray with shape (num_samples, 3) for t, v0_x, v0_z
g: gravity acceleration
Returns:
theoretical motion of x, z.
"""
t, v0_x, v0_z = np.split(input, 3, axis=-1)
x = v0_x * t
z = v0_z * t - 0.5 * g * t * t
return x, z | 200a2430a79239f21e22db07feaf315d8919f21b | 3,631,898 |
from typing import List
import requests
def list_analyses() -> List[str]:
"""Get a list of all supported analyses."""
response = requests.get(_url("/info/analyses"))
assays = response.json()
return assays | 8f14ed36d572ca222df53dfa2fe8605b2975db48 | 3,631,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.