content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def texture_symmetry_predict_patches(classifier, data=None, data_backup_file='FeaturesForPreds'):
"""Predict if symetric pairs of patches taken in a dermoscopic image are similar or not using features extracted
with the `texture_symmetry_features()` function and stored in the "FeatureForPreds.csv" file.
# Arguments :
classifier: The trained random forest classifier (with patchesDataSet).
data: As returned by the texture_symmetry_features function (optional).
data_backup_filename: Only if data is None, file to load data from.
# Outputs :
preds: The predictions (0 if non similar, 1 if similar).
nonSimilarNum: Int. The number of non similar matches.
similarNum: Int. The number of similar matches.
"""
if data is None:
data = pd.read_csv(f"{package_path()}/data/patchesDataSet/{data_backup_file}.csv", index_col=False)
features = list(data)
del features[0]
else:
features = list(data)
toPredict = data[features]
preds = classifier.predict(toPredict)
nonSimilarNum = list(preds).count(0)
similarNum = list(preds).count(1)
return preds, nonSimilarNum, similarNum
| 15,300
|
def stack_bands(source_path, mgrs_coordinate, save_path):
"""
Stacks bands
:param source_path: Path to source images
:param mgrs_coordinates: List of MGRS coordinates, for example ['31/U/FQ', '31/U/FO']
:param save_path: Directory where a stacked raster would be saved
"""
# selecting the SWIR, NIR and Green band names from the remote sensing dataset
dict_of_bands = {"SWIR-band": "B12", "NIR-band": "B8A", "Green-band": "B03"}
list_of_bands = list(dict_of_bands.values())
path_to_scene = os.path.join(source_path, mgrs_coordinate)
dict_source_paths, bands_string, meta_source = create_source_paths(
list_of_bands, path_to_scene
)
dict_target_paths = create_target_paths(
dict_source_paths, save_path, mgrs_coordinate, bands_string, list_of_bands
)
write_bands(dict_source_paths, meta_source, dict_target_paths, list_of_bands)
| 15,301
|
def instantiateSong(fileName):
"""Create an AudioSegment with the data from the given file"""
ext = detectFormat(fileName)
if(ext == "mp3"):
return pd.AudioSegment.from_mp3(fileName)
elif(ext == "wav"):
return pd.AudioSegment.from_wav(fileName)
elif(ext == "ogg"):
return pd.AudioSegment.from_ogg(fileName)
elif(ext == "flv"):
return pd.AudioSegment.from_flv(fileName)
elif(ext == "m4a"):
return pd.AudioSegment.from_file(fileName, "mp4")
else:
return pd.AudioSegment.from_file(fileName, ext)
| 15,302
|
def get_href(link: bs4.element.Tag) -> str:
"""If a link has an href attribute, return it
:param link: The link to be checked
:returns: An href
"""
if (link.has_attr("href")):
return (link["href"])
| 15,303
|
def get_contrib_requirements(filepath: str) -> Dict:
"""
Parse the python file from filepath to identify a "library_metadata" dictionary in any defined classes, and return a requirements_info object that includes a list of pip-installable requirements for each class that defines them.
Note, currently we are handling all dependencies at the module level. To support future expandability and detail, this method also returns per-class requirements in addition to the concatenated list.
Args:
filepath: the path to the file to parse and analyze
Returns:
A dictionary:
{
"requirements": [ all_requirements_found_in_any_library_metadata_in_file ],
class_name: [ requirements ]
}
"""
with open(filepath) as file:
tree = ast.parse(file.read())
requirements_info = {"requirements": []}
for child in ast.iter_child_nodes(tree):
if not isinstance(child, ast.ClassDef):
continue
current_class = child.name
for node in ast.walk(child):
if isinstance(node, ast.Assign):
try:
target_ids = [target.id for target in node.targets]
except (ValueError, AttributeError):
# some assignment types assign to non-node objects (e.g. Tuple)
target_ids = []
if "library_metadata" in target_ids:
library_metadata = ast.literal_eval(node.value)
requirements = library_metadata.get("requirements", [])
requirements_info[current_class] = requirements
requirements_info["requirements"] += requirements
return requirements_info
| 15,304
|
def main(files: List[pathlib.Path], name: str) -> None:
"""Create samplesheet for list of files"""
data = []
for file in files:
dir = file.parent
id_ = dir.stem
otu_table = dir / "otu_table.tsv"
obs_metadata = dir / "obs_metadata.tsv"
sample_metadata = dir / "sample_metadata.tsv"
children_map = dir / "children_map.json"
data.append(
{
"id": id_,
"otu_table": otu_table,
"obs_metadata": obs_metadata,
"sample_metadata": sample_metadata,
"children_map": children_map,
}
)
df = pd.DataFrame(data, columns=HEADER)
cwd = pathlib.Path(f"../../pipeline/{name}")
df.to_csv(cwd / "samplesheet.csv", index=False, sep=",")
| 15,305
|
def test_database(app: Flask):
"""Initializes database for testing purposes."""
# Create the database and the database table
db_session = connect_db(app)
# db_session.create_all()
yield db_session, {"Project": Project, "Item": Item, "Option": Option}
# db_session.drop_all()
| 15,306
|
def _append_to_kokoro_bazel_invocations(invocation_id: str) -> None:
"""Kokoro can display "Bazel" result link on kokoro jobs if told so."""
# to get "bazel" link for kokoro build, we need to upload
# the "bazel_invocation_ids" file with bazel invocation ID as artifact.
kokoro_artifacts_dir = os.getenv('KOKORO_ARTIFACTS_DIR')
if kokoro_artifacts_dir:
# append the bazel invocation UUID to the bazel_invocation_ids file.
with open(os.path.join(kokoro_artifacts_dir, 'bazel_invocation_ids'),
'a') as f:
f.write(invocation_id + '\n')
print(
'Added invocation ID %s to kokoro "bazel_invocation_ids" artifact' %
invocation_id,
file=sys.stderr)
else:
print(
'Skipped adding invocation ID %s to kokoro "bazel_invocation_ids" artifact'
% invocation_id,
file=sys.stderr)
pass
| 15,307
|
def watch(endpoint, key):
"""watch sends watch request to etcd.
Examples:
curl -L http://localhost:2379/v3alpha/watch \
-X POST -d ''{"create_request": {"key":"Zm9v"} }'
"""
# Python 2
# key_str = base64.b64encode(key)
# Python 3 base64 requires utf-08 encoded bytes
# Python 3 JSON encoder requires string
key_str = base64.b64encode(bytes(key, "utf-8")).decode()
req = {'create_request': {"key": key_str}}
while True:
try:
rresp = requests.post(endpoint + '/v3alpha/watch',
data=json.dumps(req), stream=True)
for line in rresp.iter_lines():
# filter out keep-alive new lines
if line:
decoded_line = line.decode('utf-8')
resp = json.loads(decoded_line)
if 'result' not in resp:
log.warning('{0} does not have result'.format(resp))
return ''
if 'created' in resp['result']:
if resp['result']['created']:
log.warning('watching {0}'.format(key))
continue
if 'events' not in resp['result']:
log.warning('{0} returned no events: {1}'.format(key,
resp))
return None
if len(resp['result']['events']) != 1:
log.warning('{0} returned >1 event: {1}'.format(key,
resp))
return None
if 'kv' in resp['result']['events'][0]:
if 'value' in resp['result']['events'][0]['kv']:
val = resp['result']['events'][0]['kv']['value']
return base64.b64decode(val)
else:
log.warning('no value in ', resp)
return None
else:
log.warning('no kv in ', resp)
return None
except requests.exceptions.ConnectionError as err:
log.warning('Connection error: {0}'.format(err))
time.sleep(5)
except:
log.warning('Unexpected error:', sys.exc_info()[0])
raise
| 15,308
|
def __zedwalther(kin):
"""
Calculate the z-parameter for the Walther equation (ASTM D341).
Parameters
----------
kin: scalar
The kinematic viscosity of the lubricant.
Returns
-------
zed: scalar
The z-parameter.
"""
zed = kin + 0.7 + 10 ** (-1.47 - 1.84 * kin - 0.51 * kin ** 2)
return zed
| 15,309
|
def o1_cosmologies_list():
"""
Return the list of $\\sigma_8$ values used in training Q1
:return: A numpy array of 20 $\\sigma_8$ values
"""
return np.array([0.969, 0.654, 1.06, 0.703,
1.1615, 0.759, 0.885, 0.6295,
0.605, 0.7205, 1.1685, 1.179,
0.857, 1.123, 0.843, 0.5245,
0.99, 0.7485, 0.528, 1.1265,
0.8535, 0.9165])
| 15,310
|
def chrelerr(fbest, stop):
"""
checks whether the required tolerance for a test function with known
global minimum has already been achieved
Input:
fbest function value to be checked
stop(0) relative error with which a global minimum with not too
small absolute value should be reached
stop(1) global minimum function value of a test function
stop(2) if abs(fglob) is very small, we stop if the function
value is less than stop(2)
Output:
flag = 0 the required tolerance has been achieved
= 1 otherwise
"""
fglob = stop[1]
if fbest - fglob <= max(stop[0] * abs(fglob), stop[2]):
return 0
return 1
| 15,311
|
def prop_test(df):
"""
Inspired from R package caret confusionMatrix.R
"""
from scipy.stats import binom
x = np.diag(df).sum()
n = df.sum().sum()
p = (df.sum(axis=0) / df.sum().sum()).max()
d = {
"statistic": x, # number of successes
"parameter": n, # number of trials
"null.value": p, # probability of success
"p.value": binom.sf(x - 1, n, p), # see https://en.wikipedia.org/wiki/Binomial_test
}
return(d)
| 15,312
|
def time_and_log_query( fn ):
"""
Decorator to time operation of method
From High Performance Python, p.27
"""
@wraps( fn )
def measure_time( *args, **kwargs ):
t1 = time.time()
result = fn( *args, **kwargs )
t2 = time.time()
elapsed = t2 - t1
log_query( elapsed )
log_query_timestamp()
# print(("@timefn:%s took %s seconds" % (fn.__name__, elapsed)))
return result
return measure_time
| 15,313
|
def parse_anchor_body(anchor_body):
"""
Given the body of an anchor, parse it to determine what topic ID it's
anchored to and what text the anchor uses in the source help file.
This always returns a 2-tuple, though based on the anchor body in the file
it may end up thinking that the topic ID and the text are identical.
"""
c_pos = anchor_body.find(':')
if c_pos >= 0:
id_val = anchor_body[:c_pos]
anchor_body = anchor_body[c_pos+1:]
id_val = id_val or anchor_body
else:
id_val = anchor_body
return (id_val.casefold().rstrip(), anchor_body.strip())
| 15,314
|
def func_parameters(func_name):
"""
Generates function parameters for a particular function.
Parameters
----------
func_name : string
Name of function.
Returns
--------
d : integer
Size of dimension.
g : gradient of objective function.
`g(x, *func_args) -> 1-D array with shape (d, )`
where `x` is a 1-D array with shape(d, ) and func_args is a
tuple of arguments needed to compute the gradient.
func_args : tuple
Arguments passed to f and g.
bound_1 : integer
Lower bound used to generate starting points.
bound_2 : integer
Upper bound used to generate starting points.
"""
if func_name == 'styb':
d = 5
g = mt_obj.styblinski_tang_gradient
func_args = ()
bound_1 = -5
bound_2 = 5
elif func_name == 'qing':
d = 5
g = mt_obj.qing_gradient
func_args = (d,)
bound_1 = -3
bound_2 = 3
elif func_name == 'zak':
d = 10
g = mt_obj.zakharov_grad
func_args = (d,)
bound_1 = -5
bound_2 = 10
elif func_name == 'hart':
d = 6
g = mt_obj.hartmann6_grad
a, c, p = mt_obj.hartmann6_func_params()
func_args = d, a, c, p
bound_1 = 0
bound_2 = 1
return d, g, func_args, bound_1, bound_2
| 15,315
|
def range_overlap(range1, range2):
"""
determine range1 is within range2 (or is completely the same)
:param range range1: a range
:param range range2: another range
:rtype: bool
:return: True, range1 is subset of range2, False, not the case
"""
result = all([
range1.start >= range2.start,
range1.stop <= range2.stop
])
return result
| 15,316
|
def _expand_query_list(session, queries, recursive=False, verbose=False):
"""This function expands ls queries by resolving relative paths,
expanding wildcards and expanding recursive queries. If the user provides no
queries, the method defaults to a single nonrecursive query for the current working directory."""
results = []
# If no queries are supplied by the user, default to a query for the
# current working directory
if len(queries) == 0:
queries = [get_cwd()]
# Wildcard expansion is performed first, so it can be combined with other types
# of expansion, such as recursive expansion of subcollections later. Each collection
# or data object is expanded only once.
preprocessed_queries = []
already_expanded = {}
for query in queries:
# Currently only wildcards without a collection path are supported
# e.g. "*.dat", but not "../*.dat" or "*/data.dat".
if "/" not in query and ("?" in query or "*" in query):
for d in get_dataobjects_in_collection(session, get_cwd()):
if fnmatch(d["name"],
query) and d["full_name"] not in already_expanded:
preprocessed_queries.append(d["full_name"])
already_expanded[d["full_name"]] = 1
for c in get_direct_subcollections(session, get_cwd()):
parent, coll = os.path.split(c["name"])
if fnmatch(coll, query) and d["name"] not in already_expanded:
preprocessed_queries.append(c["name"])
already_expanded[d["name"]] = 1
else:
preprocessed_queries.append(query)
for query in preprocessed_queries:
absquery = convert_to_absolute_path(query)
if collection_exists(session, absquery):
results.append({"original_query": query, "expanded_query": absquery,
"expanded_query_type": "collection"})
if verbose:
print_debug("Argument \"{}\" is a collection.".format(query))
if recursive:
for subcollection in get_subcollections(session, absquery):
if verbose:
print_debug("Recursively adding subcollection " +
subcollection + " to queries.")
results.append({"original_query": query,
"expanded_query": subcollection,
"expanded_query_type": "collection"})
elif dataobject_exists(session, absquery):
results.append({"original_query": query, "expanded_query": absquery,
"expanded_query_type": "dataobject"})
if verbose:
print_debug("Argument \"{}\" is a data object.".format(query))
else:
print_error(
"Query \"{}\" could not be resolved. Ignoring ... ".format(query))
return results
| 15,317
|
def qsnorm(p):
"""
rational approximation for x where q(x)=d, q being the cumulative
normal distribution function. taken from Abramowitz & Stegun p. 933
|error(x)| < 4.5*10**-4
"""
d = p
if d < 0. or d > 1.:
print('d not in (1,1) ')
sys.exit()
x = 0.
if (d - 0.5) > 0:
d = 1. - d
if (d - 0.5) < 0:
t2 = -2. * np.log(d)
t = np.sqrt(t2)
x = t - old_div((2.515517 + .802853 * t + .010328 * t2),
(1. + 1.432788 * t + .189269 * t2 + .001308 * t * t2))
if p < 0.5:
x = -x
return x
| 15,318
|
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
user = User.query.filter(User.name==username and User.password_hash==encrypt_password(passsword)).first()
if user == None:
return False
else:
return True
| 15,319
|
def redirect_success():
"""Save complete jsPsych dataset to disk."""
if request.is_json:
## Retrieve jsPsych data.
JSON = request.get_json()
## Save jsPsch data to disk.
write_data(session, JSON, method='pass')
## Flag experiment as complete.
session['complete'] = 'success'
write_metadata(session, ['complete','code_success'], 'a')
## DEV NOTE:
## This function returns the HTTP response status code: 200
## Code 200 signifies the POST request has succeeded.
## The corresponding jsPsych function handles the redirect.
## For a full list of status codes, see:
## https://developer.mozilla.org/en-US/docs/Web/HTTP/Status
return ('', 200)
| 15,320
|
def fill_region(compound, n_compounds, region, overlap=0.2,
seed=12345, edge=0.2, temp_file=None):
"""Fill a region of a box with a compound using packmol.
Parameters
----------
compound : mb.Compound or list of mb.Compound
Compound or list of compounds to be put in region.
n_compounds : int or list of int
Number of compounds to be put in region.
region : mb.Box or list of mb.Box
Region to be filled by compounds.
overlap : float, units nm, default=0.2
Minimum separation between atoms of different molecules.
seed : int, default=12345
Random seed to be passed to PACKMOL.
edge : float, units nm, default=0.2
Buffer at the edge of the region to not place molecules. This is
necessary in some systems because PACKMOL does not account for
periodic boundary conditions in its optimization.
temp_file : str, default=None
File name to write PACKMOL's raw output to.
Returns
-------
filled : mb.Compound
If using mulitple regions and compounds, the nth value in each list are used in order.
For example, if the third compound will be put in the third region using the third value in n_compounds.
"""
_check_packmol(PACKMOL)
if not isinstance(compound, (list, set)):
compound = [compound]
if not isinstance(n_compounds, (list, set)):
n_compounds = [n_compounds]
if compound is not None and n_compounds is not None:
if len(compound) != len(n_compounds):
msg = ("`compound` and `n_compounds` must be of equal length.")
raise ValueError(msg)
# See if region is a single region or list
if isinstance(region, Box): # Cannot iterate over boxes
region = [region]
elif not any(isinstance(reg, (list, set, Box)) for reg in region):
region = [region]
region = [_validate_box(reg) for reg in region]
# In angstroms for packmol.
overlap *= 10
# Build the input file and call packmol.
filled_pdb = tempfile.mkstemp(suffix='.pdb')[1]
input_text = PACKMOL_HEADER.format(overlap, filled_pdb, seed)
for comp, m_compounds, reg in zip(compound, n_compounds, region):
m_compounds = int(m_compounds)
compound_pdb = tempfile.mkstemp(suffix='.pdb')[1]
comp.save(compound_pdb, overwrite=True)
reg_mins = reg.mins * 10
reg_maxs = reg.maxs * 10
reg_maxs -= edge * 10 # Apply edge buffer
input_text += PACKMOL_BOX.format(compound_pdb, m_compounds,
reg_mins[0], reg_mins[1], reg_mins[2],
reg_maxs[0], reg_maxs[1], reg_maxs[2])
_run_packmol(input_text, filled_pdb, temp_file)
# Create the topology and update the coordinates.
filled = Compound()
for comp, m_compounds in zip(compound, n_compounds):
for _ in range(m_compounds):
filled.add(clone(comp))
filled.update_coordinates(filled_pdb)
return filled
| 15,321
|
def morris_traversal(root):
"""
Morris(InOrder) travaersal is a tree traversal algorithm that does not employ
the use of recursion or a stack. In this traversal, links are created as
successors and nodes are printed using these links.
Finally, the changes are reverted back to restore the original tree.
root = Node(4)
temp = root
temp.left = Node(2)
temp.right = Node(8)
temp = temp.left
temp.left = Node(1)
temp.right = Node(5)
"""
inorder_traversal = []
# set current to root of binary tree
current = root
while current is not None:
if current.left is None:
inorder_traversal.append(current.data)
current = current.right
else:
# find the previous (prev) of curr
previous = current.left
while previous.right is not None and previous.right != current:
previous = previous.right
# make curr as right child of its prev
if previous.right is None:
previous.right = current
current = current.left
# firx the right child of prev
else:
previous.right = None
inorder_traversal.append(current.data)
current = current.right
return inorder_traversal
| 15,322
|
def validateILine(lineno, line, prevline, filename):
""" Checks all lines that start with 'i' and raises an exepction if
the line is malformed.
i lines are made up of five fields after the 'i':
From http://genome.ucsc.edu/FAQ/FAQformat#format5 :
src -- The name of the source sequence for the alignment.
Should be the same as the 's' line immediately above this line.
leftStatus -- A character that specifies the relationship between the
sequence in this block and the sequence that appears in
the previous block.
leftCount -- Usually the number of bases in the aligning species between
the start of this alignment and the end of the previous one.
rightStatus -- A character that specifies the relationship between the
sequence in this block and the sequence that appears in
the subsequent block.
rightCount -- Usually the number of bases in the aligning species between
the end of this alignment and the start of the next one.
The status characters can be one of the following values:
C -- the sequence before or after is contiguous with this block.
I -- there are bases between the bases in this block and the one before or after it.
N -- this is the first sequence from this src chrom or scaffold.
n -- this is the first sequence from this src chrom or scaffold but it is
bridged by another alignment from a different chrom or scaffold.
M -- there is missing data before or after this block (Ns in the sequence).
T -- the sequence in this block has been used before in a previous block (likely a tandem duplication)
"""
d = line.split()
p = prevline.split()
if len(d) != 6:
raise ILineFormatError('maf %s contains an "i" line that has too many fields on line number %d: '
'%s' % (filename, lineno, line))
if p[0] != 's':
raise ILineFormatError('maf %s contains an "i" line that does not follow an "s" line on line number %d: '
'%s' % (filename, lineno, line))
for i in [3, 5]:
try:
n = int(d[i])
except ValueError:
raise ILineFormatError('maf %s contains an "i" line that has non integer Count "%s" on line number %d: '
'%s' % (filename, d[i], lineno, line))
if int(d[i]) < 0:
raise ILineFormatError('maf %s contains an "i" line that has negative Count "%s" on line number %d: '
'%s' % (filename, d[i], lineno, line))
for i in [2, 4]:
if d[i] not in ['C', 'I', 'N', 'n', 'M', 'T']:
raise ILineFormatError('maf %s contains an "i" line with an invalid Status "%s" on line number %d: '
'%s' % (filename, d[i], lineno, line))
if d[i] == 'I' and int(d[i + 1]) < 1:
raise ILineFormatError('maf %s contains an "i" line with an invalid Count "%s" on line number %d: '
'%s' % (filename, d[i + 1], lineno, line))
if p[1] != d[1]:
raise ILineFormatError('maf %s contains an "i" line with a different src value "%s" than on the previous '
'"s" line "%s" on line number %d: '
'%s' % (filename, d[1], p[1], lineno, line))
| 15,323
|
def normalize_matrix_rows(A):
"""
Normalize the rows of an array.
:param A: An array.
:return: Array with rows normalized.
"""
return A / np.linalg.norm(A, axis=1)[:, None]
| 15,324
|
def UADDW(cpu_context: ProcessorContext, instruction: Instruction):
"""Unsigned add wide (vector form)"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
| 15,325
|
def clear_cache() -> int:
"""
Очистка локального кэша форматов, меню и прочих ресурсов,
прочитанных с сервера.
:return: код возврата
"""
return IC_clearresourse()
| 15,326
|
def plot_inner_iterations(err):
"""
Auxiliary function for plotting inner iterations error.
"""
plt.yscale('log') #logarithmic scale for y axis
plt.plot(np.arange(err.size),err,'.-')
plt.ylabel("Log relative error: $f_o(x^k)$ y $p^*$",size=12)
plt.xlabel("Inner iterations",size=12)
plt.grid()
plt.show()
| 15,327
|
def get_files_by_ymd(dir_path, time_start, time_end, ext=None, pattern_ymd=None):
"""
:param dir_path: 文件夹
:param time_start: 开始时间
:param time_end: 结束时间
:param ext: 后缀名, '.hdf5'
:param pattern_ymd: 匹配时间的模式, 可以是 r".*(\d{8})_(\d{4})_"
:return: list
"""
files_found = []
if pattern_ymd is not None:
pattern = pattern_ymd
else:
pattern = r".*(\d{8})"
for root, dirs, files in os.walk(dir_path):
for file_name in files:
if ext is not None:
if '.' not in ext:
ext = '.' + ext
if os.path.splitext(file_name)[1].lower() != ext.lower():
continue
re_result = re.match(pattern, file_name)
if re_result is not None:
time_file = ''.join(re_result.groups())
else:
continue
if int(time_start) <= int(time_file) <= int(time_end):
files_found.append(os.path.join(root, file_name))
files_found.sort()
return files_found
| 15,328
|
def transform(shiftX=0.0, shiftY=0.0, rotate=0.0, skew=0.0, scale=1.0):
"""
Returns an NSAffineTransform object for transforming layers.
Apply an NSAffineTransform t object like this:
Layer.transform_checkForSelection_doComponents_(t,False,True)
Access its transformation matrix like this:
tMatrix = t.transformStruct() # returns the 6-float tuple
Apply the matrix tuple like this:
Layer.applyTransform(tMatrix)
Component.applyTransform(tMatrix)
Path.applyTransform(tMatrix)
Chain multiple NSAffineTransform objects t1, t2 like this:
t1.appendTransform_(t2)
"""
myTransform = NSAffineTransform.transform()
if rotate:
myTransform.rotateByDegrees_(rotate)
if scale != 1.0:
myTransform.scaleBy_(scale)
if not (shiftX == 0.0 and shiftY == 0.0):
myTransform.translateXBy_yBy_(shiftX,shiftY)
if skew:
skewStruct = NSAffineTransformStruct()
skewStruct.m11 = 1.0
skewStruct.m22 = 1.0
skewStruct.m21 = tan(radians(skew))
skewTransform = NSAffineTransform.transform()
skewTransform.setTransformStruct_(skewStruct)
myTransform.appendTransform_(skewTransform)
return myTransform
| 15,329
|
def test_create_order(function_arn, table_name, order_request):
"""
Test the CreateOrder function
"""
order_request = copy.deepcopy(order_request)
table = boto3.resource("dynamodb").Table(table_name) #pylint: disable=no-member
lambda_ = boto3.client("lambda")
# Trigger the function
response = lambda_.invoke(
FunctionName=function_arn,
InvocationType="RequestResponse",
Payload=json.dumps(order_request).encode()
)
response = json.load(response["Payload"])
print(response)
# Check the output of the Function
assert response["success"] == True
assert "order" in response
assert len(response.get("errors", [])) == 0
del order_request["order"]["products"]
compare_dict(order_request["order"], response["order"])
assert response["order"]["userId"] == order_request["userId"]
# Check the table
ddb_response = table.get_item(Key={"orderId": response["order"]["orderId"]})
assert "Item" in ddb_response
mandatory_fields = [
"orderId", "userId", "createdDate", "modifiedDate", "status",
"products", "address", "deliveryPrice", "total"
]
for field in mandatory_fields:
assert field in ddb_response["Item"]
assert ddb_response["Item"]["status"] == "NEW"
compare_dict(order_request["order"], ddb_response["Item"])
# Cleanup the table
table.delete_item(Key={"orderId": response["order"]["orderId"]})
| 15,330
|
def get_entity(text, tokens):
"""获取ner结果
"""
# 如果text长度小于规定的max_len长度,则只保留text长度的tokens
text_len = len(text)
tokens = tokens[:text_len]
entities = []
entity = ""
for idx, char, token in zip(range(text_len), text, tokens):
if token.startswith("O") or token.startswith(app.model_configs["tag_padding"]):
token_prefix = token
token_suffix = None
else:
token_prefix, token_suffix = token.split("-")
if token_prefix == "S":
entities.append([token_suffix, char])
entity = ""
elif token_prefix == "B":
if entity != "":
entities.append([tokens[idx-1].split("-")[-1], entity])
entity = ""
else:
entity += char
elif token_prefix == "I":
if entity != "":
entity += char
else:
entity = ""
else:
if entity != "":
entities.append([tokens[idx-1].split("-")[-1], entity])
entity = ""
else:
continue
return entities
| 15,331
|
def new_func(message):
"""
new func
:param message:
:return:
"""
def get_message(message):
"""
get message
:param message:
:return:
"""
print('Got a message:{}'.format(message))
return get_message(message)
| 15,332
|
def run_cmd(cmd: Union[list, str], capture_output: bool = False) -> None:
"""Run a shell command.
:param cmd: The command to run.
:param capture_output: Whether to capture stdout and stderr of the command.
"""
proc = sp.run(
cmd, shell=isinstance(cmd, str), check=True, capture_output=capture_output
)
logging.debug(proc.args)
| 15,333
|
async def test_new_users_available(hass):
"""Test setting up when new users available on Plex server."""
MONITORED_USERS = {"Owner": {"enabled": True}}
OPTIONS_WITH_USERS = copy.deepcopy(DEFAULT_OPTIONS)
OPTIONS_WITH_USERS[MP_DOMAIN][CONF_MONITORED_USERS] = MONITORED_USERS
entry = MockConfigEntry(
domain=DOMAIN,
data=DEFAULT_DATA,
options=OPTIONS_WITH_USERS,
unique_id=DEFAULT_DATA["server_id"],
)
mock_plex_server = MockPlexServer(config_entry=entry)
with patch("plexapi.server.PlexServer", return_value=mock_plex_server), patch(
"homeassistant.components.plex.PlexWebsocket.listen"
):
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
server_id = mock_plex_server.machineIdentifier
async_dispatcher_send(hass, PLEX_UPDATE_PLATFORMS_SIGNAL.format(server_id))
await hass.async_block_till_done()
monitored_users = hass.data[DOMAIN][SERVERS][server_id].option_monitored_users
ignored_users = [x for x in monitored_users if not monitored_users[x]["enabled"]]
assert len(monitored_users) == 1
assert len(ignored_users) == 0
sensor = hass.states.get("sensor.plex_plex_server_1")
assert sensor.state == str(len(mock_plex_server.accounts))
| 15,334
|
def process_midi(raw_mid, max_seq, random_seq, condition_token=False, interval = False, octave = False, fusion=False, absolute=False, logscale=False, label = 0):
"""
----------
Author: Damon Gwinn
----------
Takes in pre-processed raw midi and returns the input and target. Can use a random sequence or
go from the start based on random_seq.
----------
"""
if interval and octave:
x = torch.full((max_seq, ), TOKEN_PAD_OCTAVE_INTERVAL, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq, ), TOKEN_PAD_OCTAVE_INTERVAL, dtype=TORCH_LABEL_TYPE, device=cpu_device())
elif interval and not octave:
x = torch.full((max_seq, ), TOKEN_PAD_INTERVAL, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq, ), TOKEN_PAD_INTERVAL, dtype=TORCH_LABEL_TYPE, device=cpu_device())
elif octave and fusion and absolute:
x = torch.full((max_seq,), TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq,), TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE, dtype=TORCH_LABEL_TYPE, device=cpu_device())
elif octave and fusion:
x = torch.full((max_seq,), TOKEN_PAD_OCTAVE_FUSION, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq,), TOKEN_PAD_OCTAVE_FUSION, dtype=TORCH_LABEL_TYPE, device=cpu_device())
elif not interval and octave:
x = torch.full((max_seq, ), TOKEN_PAD_OCTAVE, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq, ), TOKEN_PAD_OCTAVE, dtype=TORCH_LABEL_TYPE, device=cpu_device())
elif logscale:
x = torch.full((max_seq, ), TOKEN_PAD_RELATIVE, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq, ), TOKEN_PAD_RELATIVE, dtype=TORCH_LABEL_TYPE, device=cpu_device())
else:
x = torch.full((max_seq, ), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq, ), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=cpu_device())
raw_len = len(raw_mid)
full_seq = max_seq + 1 # Performing seq2seq
if(raw_len == 0):
return x, tgt
if(raw_len < full_seq):
if interval and logscale and absolute:
start_pitch = -1
last_pitch = -1
data_temp = numpy.array([])
for token in raw_mid:
token_cpu = token.cpu().detach().numpy()
if token_cpu in range(128, 128+255):
if start_pitch == -1:
start_pitch = token_cpu - 127
last_pitch = token_cpu -127
token_cpu = 127
data_temp = numpy.append(start_pitch, data_temp) # 앞에 절대음 토큰
else:
token_cpu = (token_cpu-last_pitch)+127
last_pitch = last_pitch + token_cpu - 127
data_temp = numpy.append(data_temp, token_cpu)
else:
data_temp = numpy.append(data_temp, token_cpu)
raw_mid = torch.tensor(data_temp[:], dtype=TORCH_LABEL_TYPE, device=cpu_device())
x[:raw_len] = raw_mid
tgt[:raw_len-1] = raw_mid[1:]
if interval and octave:
tgt[raw_len] = TOKEN_END_OCTAVE_INTERVAL
elif interval and not octave:
tgt[raw_len] = TOKEN_END_INTERVAL
elif octave and fusion and absolute:
tgt[raw_len] = TOKEN_END_OCTAVE_FUSION_ABSOLUTE
elif octave and fusion:
tgt[raw_len] = TOKEN_END_OCTAVE_FUSION
elif not interval and octave:
tgt[raw_len] = TOKEN_END_OCTAVE
elif logscale:
tgt[raw_len] = TOKEN_END_RELATIVE
else:
tgt[raw_len] = TOKEN_END
else:
# Randomly selecting a range
if(random_seq):
end_range = raw_len - full_seq
start = random.randint(SEQUENCE_START, end_range)
# Always taking from the start to as far as we can
else:
start = SEQUENCE_START
end = start + full_seq
data = raw_mid[start:end]
# 음차 만들어주기
if interval and logscale and absolute:
start_pitch = -1
last_pitch = -1
data_temp = numpy.array([])
for token in data:
token_cpu = token.cpu().detach().numpy()
if token_cpu in range(128, 128+255):
if start_pitch == -1:
start_pitch = token_cpu - 127
last_pitch = token_cpu -127
token_cpu = 127
data_temp = numpy.append(start_pitch, data_temp) # 앞에 절대음 토큰
else:
token_cpu = (token_cpu-last_pitch)+127
last_pitch = last_pitch + token_cpu - 127
data_temp = numpy.append(data_temp, token_cpu)
else:
data_temp = numpy.append(data_temp, token_cpu)
data_temp = numpy.append(data_temp, token_cpu)
data = torch.tensor(data_temp, dtype=TORCH_LABEL_TYPE, device=cpu_device())
# condition_token이 true면 label에 따라 조건코드를 추가해주자
if condition_token:
if label == 0:
data = torch.tensor(CONDITION_CLASSIC) + raw_mid[start:end]
elif label == 1:
data = torch.tensor(CONDITION_POP) + raw_mid[start:end]
x = data[:max_seq]
tgt = data[1:full_seq]
# print("x:",x)
# print("tgt:",tgt)
return x, tgt
| 15,335
|
async def test_secure_inner_join(
alice: DatabaseOwner, bob: DatabaseOwner, henri: Helper
) -> None:
"""
Tests entire protocol
:param alice: first database owner
:param bob: second database owner
:param henri: helper party
"""
await asyncio.gather(
*[alice.run_protocol(), bob.run_protocol(), henri.run_protocol()]
)
correct_outcome = np.array(
[
[
2,
12.5,
5,
10,
],
[
-1,
31.232,
40,
8,
],
[
3,
23.11,
30,
1,
],
]
)
actual_outcome = alice.shares + bob.shares
np.testing.assert_array_equal(
actual_outcome[np.argsort(actual_outcome[:, 0]), :],
correct_outcome[np.argsort(correct_outcome[:, 0]), :],
)
| 15,336
|
def _rack_models():
"""
Models list (for racks)
"""
models = list(Rack.objects. \
values_list('rack_model', flat=True).distinct())
models.sort()
return models
| 15,337
|
def confidence_ellipse(cov, means, ax, n_std=3.0, facecolor='none', **kwargs):
"""
Create a plot of the covariance confidence ellipse of *x* and *y*.
Parameters
----------
cov : array-like, shape (2, 2)
Covariance matrix
means: array-like, shape (2, )
Means array
ax : matplotlib.axes.Axes
The axes object to draw the ellipse into.
n_std : float
The number of standard deviations to determine the ellipse's radiuses.
Returns
-------
matplotlib.patches.Ellipse
Other parameters
----------------
kwargs : `~matplotlib.patches.Patch` properties
"""
pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])
# Using a special case to obtain the eigenvalues of this
# two-dimensionl dataset.
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse((0, 0),
width=ell_radius_x * 2,
height=ell_radius_y * 2,
facecolor=facecolor,
**kwargs)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(cov[0, 0]) * n_std
mean_x = means[0]
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(cov[1, 1]) * n_std
mean_y = means[1]
transf = transforms.Affine2D() \
.rotate_deg(45) \
.scale(scale_x, scale_y) \
.translate(mean_x, mean_y)
ellipse.set_transform(transf + ax.transData)
return ax.add_patch(ellipse)
| 15,338
|
def catalogResolveURI(URI):
"""Do a complete resolution lookup of an URI """
ret = libxml2mod.xmlCatalogResolveURI(URI)
return ret
| 15,339
|
def high_low_difference(dataframe: pd.DataFrame, scale: float = 1.0, constant: float = 0.0) -> pd.DataFrame:
"""
Returns an allocation based on the difference in high and low values. This has been added as an
example with multiple series and parameters.
parameters:
scale: determines amplitude factor.
constant: scalar value added to the allocation size.
"""
dataframe[PandasEnum.ALLOCATION.value] = (dataframe["high"] - dataframe["low"]) * scale + constant
return dataframe
| 15,340
|
def e_x(x, terms=10):
"""Approximates e^x using a given number of terms of
the Maclaurin series
"""
n = np.arange(terms)
return np.sum((x ** n) / fac(n))
| 15,341
|
async def parser(html: str) -> list:
"""解析页面
Args:
html (str): 返回页面的源码
Returns:
list: 最先的3个搜图结果(不满3个则返回所有,没有结果则返回str)
"""
if "No hits found" in html:
return "没有找到符合的本子!"
soup = BeautifulSoup(html, "lxml").find_all("table", class_="itg gltc")[0].contents
all_list = []
for index, item in enumerate(soup):
if index == 0:
continue
elif index > 3:
break
imdata = {
"type": item.find("div", class_=re.compile(r"cn ct\d")).string,
"title": item.find("div", class_="glink").string,
"link": item.find("td", class_="gl3c glname").contents[0].attrs["href"],
"page_count": item.find("td", class_="gl4c glhide").contents[1].string,
"im_seq": "",
}
imdata["im_seq"] = await dl_image(imdata["link"])
all_list.append(imdata)
return all_list
| 15,342
|
def set_namespace_root(namespace):
"""
Stores the GO ID for the root of the selected namespace.
Parameters
----------
namespace : str
A string containing the desired namespace. E.g. biological_process, cellular_component
or molecular_function.
Returns
-------
list
The list of GO ID's of the root terms of the selected namespace.
"""
if namespace == 'biological_process':
namespace_list = ['GO:0008150']
elif namespace == 'cellular_component':
namespace_list = ['GO:0005575']
elif namespace == 'molecular_function':
namespace_list = ['GO:0003674']
else:
namespace_list = ['GO:0008150', 'GO:0005575', 'GO:0003674']
return namespace_list
| 15,343
|
def get_commit(oid):
"""
get commit by oid
"""
parents = []
commit = data.get_object(oid, 'commit').decode()
lines = iter(commit.splitlines())
for line in itertools.takewhile(operator.truth, lines):
key, value = line.split(' ', 1)
if key == 'tree':
tree = value
elif key == 'parent':
parents = []
else:
assert False, f'Unknown field {key}'
message = '\n'.join(lines)
return Commit(tree=tree, parents=parents, message=message)
| 15,344
|
def _gaussian_dilated_conv2d_oneLearned(x, kernel_size, num_o, dilation_factor, name, top_scope, biased=False):
"""
Dilated conv2d with antecedent gaussian filter and without BN or relu.
"""
num_x = x.shape[3].value
filter_size = dilation_factor - 1
sigma = _get_sigma(top_scope)
# create kernel grid
ax = np.arange(-filter_size // 2 + 1., filter_size // 2 + 1.)
xx, yy = np.meshgrid(ax, ax)
kernel = np.exp(-(xx**2 + yy**2))
mask = np.zeros([filter_size,filter_size, 1, 1, 1], dtype=np.float32)
mask[:, :, 0, 0, 0] = kernel
w_gauss_value = tf.Variable(tf.constant(0.0,
shape=[filter_size,filter_size, 1,1,1]), name='w_gauss_value',trainable=False)
# create gaussian filter
w_gauss_value = tf.add(w_gauss_value, tf.constant(mask, dtype=tf.float32))
w_gauss_value = tf.div(w_gauss_value, tf.exp(2.0 * sigma**2))
w_gauss_value = tf.div(w_gauss_value, tf.reduce_sum(w_gauss_value))
# perform separable convolution
o_gauss = tf.expand_dims(x, -1)
o_gauss = tf.nn.conv3d(o_gauss, w_gauss_value, strides=[1,1,1,1,1], padding='SAME')
o_gauss = tf.squeeze(o_gauss, -1)
with tf.variable_scope(name) as scope:
# perform dilated convolution
w = tf.get_variable('weights', shape=[kernel_size, kernel_size, num_x, num_o])
o = tf.nn.atrous_conv2d(o_gauss, w, dilation_factor, padding='SAME')
if biased:
b = tf.get_variable('biases', shape=[num_o])
o = tf.nn.bias_add(o, b)
return o
| 15,345
|
def get_cos_similarity(hy_vec, ref_vec):
"""
measure similarity from two vec
"""
return (1 - spatial.distance.cosine(hy_vec, ref_vec))
| 15,346
|
def addDepthDimension (ds):
"""
Create depth coordinate
Parameters
----------
ds : xarray DataSet
OOI Profiler mooring data for one profiler
Returns
-------
ds : xarray DataSet
dataset with iDEPTH coordinate set as a dimension
"""
if ( 'prof_depth' not in ds ):
raise TypeError('Couldn\'t find prof_depth data variable')
if ( 'actual_range' not in ds.prof_depth.attrs ):
raise TypeError('Couldn\'t find prof_depth range attribute')
iDEPTH = arange(max(abs(ds.prof_depth.attrs['actual_range'])) + 1)
return ds.expand_dims({"iDEPTH":iDEPTH})
| 15,347
|
def divide_dataset_by_dataarray(ds, dr, varlist=None):
"""
Divides variables in an xarray Dataset object by a single DataArray
object. Will also make sure that the Dataset variable attributes
are preserved.
This method can be useful for certain types of model diagnostics
that have to be divided by a counter array. For example, local
noontime J-value variables in a Dataset can be divided by the
fraction of time it was local noon in each grid box, etc.
Args:
-----
ds: xarray Dataset
The Dataset object containing variables to be divided.
dr: xarray DataArray
The DataArray object that will be used to divide the
variables of ds.
Keyword Args (optional):
------------------------
varlist: list of str
If passed, then only those variables of ds that are listed
in varlist will be divided by dr. Otherwise, all variables
of ds will be divided by dr.
Returns:
--------
ds_new : xarray Dataset
A new xarray Dataset object with its variables divided by dr.
"""
# -----------------------------
# Check arguments
# -----------------------------
if not isinstance(ds, xr.Dataset):
raise TypeError("The ds argument must be of type xarray.Dataset!")
if not isinstance(dr, xr.DataArray):
raise TypeError("The dr argument must be of type xarray.DataArray!")
if varlist is None:
varlist = ds.data_vars.keys()
# -----------------------------
# Do the division
# -----------------------------
# Keep all Dataset attributes
with xr.set_options(keep_attrs=True):
# Loop over variables
for v in varlist:
# Divide each variable of ds by dr
ds[v] = ds[v] / dr
return ds
| 15,348
|
async def connect(bot: DonLee_Robot_V2, update):
"""
A Funtion To Handle Incoming /add Command TO COnnect A Chat With Group
"""
chat_id = update.chat.id
user_id = update.from_user.id if update.from_user else None
target_chat = update.text.split(None, 1)
global VERIFY
if VERIFY.get(str(chat_id)) == None: # Make Admin's ID List
admin_list = []
async for x in bot.iter_chat_members(chat_id=chat_id, filter="administrators"):
admin_id = x.user.id
admin_list.append(admin_id)
admin_list.append(None)
VERIFY[str(chat_id)] = admin_list
if not user_id in VERIFY.get(str(chat_id)):
return
try:
if target_chat[1].startswith("@"):
if len(target_chat[1]) < 5:
await update.reply_text("𝖨𝗇𝗏𝖺𝗅𝗂𝖽 𝖴𝗌𝖾𝗋𝗇𝖺𝗆𝖾...!!!")
return
target = target_chat[1]
elif not target_chat[1].startswith("@"):
if len(target_chat[1]) < 14:
await update.reply_text("𝖨𝗇𝗏𝖺𝗅𝗂𝖽 𝖢𝗁𝖺𝗍 𝖨𝖽...\n𝖢𝗁𝖺𝗍 𝖨𝖣 𝖲𝗁𝗈𝗎𝗅𝖽 𝖡𝖾 𝖲𝗈𝗆𝖾𝗍𝗁𝗂𝗇𝗀 𝖫𝗂𝗄𝖾 𝖳𝗁𝗂𝗌: <code>-100xxxxxxxxxx</code>")
return
target = int(target_chat[1])
except Exception:
await update.reply_text("𝖨𝗇𝗏𝖺𝗅𝗂𝖽 𝖨𝗇𝗉𝗎𝗍...\n𝖸𝗈𝗎 𝖲𝗁𝗈𝗎𝗅𝖽 𝖲𝗉𝖾𝖼𝗂𝖿𝗒 𝖵𝖺𝗅𝗂𝖽 <code>chat_id(-100xxxxxxxxxx)</code> or <code>@username</code>")
return
# Exports invite link from target channel for user to join
try:
join_link = await bot.export_chat_invite_link(target)
join_link = join_link.replace('+', 'joinchat/')
except Exception as e:
logger.exception(e, exc_info=True)
await update.reply_text(f"Make Sure Im Admin At <code>{target}</code> And Have Permission For <i>Inviting Users via Link</i> And Try Again.....!!!\n\n<i><b>Error Logged:</b></i> <code>{e}</code>", parse_mode='html')
return
userbot_info = await bot.USER.get_me()
# Joins to targeted chat using above exported invite link
# If aldready joined, code just pass on to next code
try:
await bot.USER.join_chat(join_link)
except UserAlreadyParticipant:
pass
except Exception as e:
logger.exception(e, exc_info=True)
await update.reply_text(f"{userbot_info.mention} Couldnt Join The Channel <code>{target}</code> Make Sure Userbot Is Not Banned There Or Add It Manually And Try Again....!!\n\n<i><b>Error Logged:</b></i> <code>{e}</code>", parse_mode='html')
return
try:
c_chat = await bot.get_chat(target)
channel_id = c_chat.id
channel_name = c_chat.title
except Exception as e:
await update.reply_text("𝖤𝗇𝖼𝗈𝗎𝗇𝗍𝖾𝗋𝖾𝖽 𝖲𝗈𝗆𝖾 𝖨𝗌𝗌𝗎𝖾..𝖯𝗅𝖾𝖺𝗌𝖾 𝖢𝗁𝖾𝖼𝗄 𝖫𝗈𝗀𝗌..!!")
raise e
in_db = await db.in_db(chat_id, channel_id)
if in_db:
await update.reply_text("𝖢𝗁𝖺𝗇𝗇𝖾𝗅 𝖠𝗅𝖽𝗋𝖾𝖺𝖽𝗒 𝖨𝗇 𝖣𝖻...!!!")
return
wait_msg = await update.reply_text("𝖯𝗅𝖾𝖺𝗌𝖾 𝖶𝖺𝗂𝗍 𝖳𝗂𝗅𝗅 𝖨 𝖠𝖽𝖽 𝖠𝗅𝗅 𝖸𝗈𝗎𝗋 𝖥𝗂𝗅𝖾𝗌 𝖥𝗋𝗈𝗆 𝖢𝗁𝖺𝗇𝗇𝖾𝗅 𝖳𝗈 𝖣𝖻\n\n𝖳𝗁𝗂𝗌 𝖬𝖺𝗒 𝖳𝖺𝗄𝖾 10 𝗈𝗋 15 𝖬𝗂𝗇𝗌 𝖣𝖾𝗉𝖾𝗇𝖽𝗂𝗇𝗀 𝖮𝗇 𝖸𝗈𝗎𝗋 𝖭𝗈. 𝖮𝖿 𝖥𝗂𝗅𝖾𝗌 𝖨𝗇 𝖢𝗁𝖺𝗇𝗇𝖾𝗅.....\n\n𝖴𝗇𝗍𝗂𝗅 𝖳𝗁𝖾𝗇 𝖯𝗅𝖾𝖺𝗌𝖾 𝖣𝗈𝗇𝗍 𝖲𝖾𝗇𝗍 𝖠𝗇𝗒 𝖮𝗍𝗁𝖾𝗋 𝖢𝗈𝗆𝗆𝖺𝗇𝖽 𝖮𝗋 𝖳𝗁𝗂𝗌 𝖮𝗉𝖾𝗋𝖺𝗍𝗂𝗈𝗇 𝖬𝖺𝗒 𝖡𝖾 𝖨𝗇𝗍𝗋𝗎𝗉𝗍𝖾𝖽....")
try:
type_list = ["video", "audio", "document"]
data = []
skipCT = 0
for typ in type_list:
async for msgs in bot.USER.search_messages(channel_id,filter=typ): #Thanks To @PrgOfficial For Suggesting
# Using 'if elif' instead of 'or' to determine 'file_type'
# Better Way? Make A PR
try:
if msgs.video:
try:
file_id = await bot.get_messages(channel_id, message_ids=msgs.message_id)
except FloodWait as e:
asyncio.sleep(e.x)
file_id = await bot.get_messages(channel_id, message_ids=msgs.message_id)
except Exception as e:
print(e)
continue
file_id = file_id.video.file_id
file_name = msgs.video.file_name[0:-4]
file_caption = msgs.caption if msgs.caption else ""
file_size = msgs.video.file_size
file_type = "video"
elif msgs.audio:
try:
file_id = await bot.get_messages(channel_id, message_ids=msgs.message_id)
except FloodWait as e:
asyncio.sleep(e.x)
file_id = await bot.get_messages(channel_id, message_ids=msgs.message_id)
except Exception as e:
print(e)
continue
file_id = file_id.audio.file_id
file_name = msgs.audio.file_name[0:-4]
file_caption = msgs.caption if msgs.caption else ""
file_size = msgs.audio.file_size
file_type = "audio"
elif msgs.document:
try:
file_id = await bot.get_messages(channel_id, message_ids=msgs.message_id)
except FloodWait as e:
asyncio.sleep(e.x)
file_id = await bot.get_messages(channel_id, message_ids=msgs.message_id)
except Exception as e:
print(str(e))
continue
file_id = file_id.document.file_id
file_name = msgs.document.file_name[0:-4]
file_caption = msgs.caption if msgs.caption else ""
file_size = msgs.document.file_size
file_type = "document"
for i in ["_", "|", "-", "."]: # Work Around
try:
file_name = file_name.replace(i, " ")
except Exception:
pass
file_link = msgs.link
group_id = chat_id
unique_id = ''.join(
random.choice(
string.ascii_lowercase +
string.ascii_uppercase +
string.digits
) for _ in range(15)
)
dicted = dict(
file_id=file_id, # Done
unique_id=unique_id,
file_name=file_name,
file_caption=file_caption,
file_size=file_size,
file_type=file_type,
file_link=file_link,
chat_id=channel_id,
group_id=group_id,
)
data.append(dicted)
except Exception as e:
if 'NoneType' in str(e): # For Some Unknown Reason Some File Names are NoneType
skipCT +=1
continue
print(e)
print(f"{skipCT} Files Been Skipped Due To File Name Been None..... #BlameTG")
except Exception as e:
await wait_msg.edit_text("Couldnt Fetch Files From Channel... Please look Into Logs For More Details")
raise e
await db.add_filters(data)
await db.add_chat(chat_id, channel_id, channel_name)
await recacher(chat_id, True, True, bot, update)
await wait_msg.edit_text(f"Channel Was Sucessfully Added With <code>{len(data)}</code> Files..")
| 15,349
|
def count_words(my_str):
"""
count number of word in string sentence by using string spilt function.
INPUT - This is testing program
OUTPUT - 4
"""
my_str_list = my_str.split(" ")
return len(my_str_list)
| 15,350
|
def suggest_create():
"""Create a suggestion for a resource."""
descriptors = Descriptor.query.all()
for descriptor in descriptors:
if descriptor.is_option_descriptor and \
descriptor.name != 'supercategories':
choices = [(str(i), v) for i, v in enumerate(descriptor.values)]
if descriptor.name == 'city':
setattr(
ResourceSuggestionForm,
descriptor.name,
SelectField(choices=choices))
else:
setattr(
ResourceSuggestionForm,
descriptor.name,
SelectMultipleField(choices=choices))
for descriptor in descriptors:
if not descriptor.is_option_descriptor and \
descriptor.name != 'report count':
setattr(ResourceSuggestionForm, descriptor.name, TextAreaField())
# Add form fields asking for the suggester's name, email, and phone number.
# Dynamically added here so that form's fields are displayed in the
# correct order.
# setattr(ResourceSuggestionForm, 'contact_information',
# FormField(ContactInformationForm))
form = ResourceSuggestionForm()
if form.validate_on_submit():
resource_suggestion = ResourceSuggestion(
name=form.name.data,
# contact_name=form.contact_information.contact_name.data,
# contact_email=form.contact_information.contact_email.data,
# contact_phone_number=form.contact_information.contact_phone_number.
# data,
# additional_information=form.contact_information.
# additional_information.data,
submission_time=datetime.now(pytz.timezone('US/Eastern')))
if form.address.data:
resource_suggestion.address = form.address.data
save_associations(resource_suggestion, form, descriptors, False)
db.session.add(resource_suggestion)
try:
db.session.commit()
# app = create_app(os.getenv('FLASK_CONFIG') or 'default')
# contact_email = app.config['ADMIN_EMAIL']
# get_queue().enqueue(
# send_email,
# recipient=contact_email,
# subject='New Suggestion',
# template='suggestion/email/suggestion',
# # name=form.contact_name.data,
# # email=form.contact_email.data,
# # phone=form.contact_phone_number.data,
# # message=form.suggestion_text.data,
# resource_name=form.name.data,
# resource_address=form.address.data,
# )
flash('Thanks for the suggestion!', 'success')
return redirect(url_for('main.index'))
except IntegrityError:
db.session.rollback()
flash('Database error occurred. Please try again.', 'error')
return render_template('suggestion/suggest.html', form=form, name=None)
| 15,351
|
async def test_manual_update(hass: HomeAssistant) -> None:
"""Annual collection."""
config_entry: MockConfigEntry = MockConfigEntry(
domain=const.DOMAIN,
data={
"name": "test",
"frequency": "blank",
"manual_update": True,
},
title="blank",
version=4.5,
)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.blank")
assert sensor.state == "2"
assert sensor.attributes["days"] is None
assert sensor.attributes["next_date"] is None
await hass.services.async_call(
const.DOMAIN,
"add_date",
service_data={"entity_id": "sensor.blank", "date": date(2020, 4, 1)},
blocking=True,
)
await hass.services.async_call(
const.DOMAIN,
"add_date",
service_data={"entity_id": "sensor.blank", "date": date(2020, 4, 2)},
blocking=True,
)
await hass.services.async_call(
const.DOMAIN,
"update_state",
service_data={"entity_id": "sensor.blank"},
blocking=True,
)
entity: GarbageCollection = hass.data["garbage_collection"]["sensor"][
"sensor.blank"
]
assert entity.state == 0
assert entity.extra_state_attributes["days"] == 0
assert isinstance(entity.extra_state_attributes["next_date"], datetime)
assert entity.extra_state_attributes["next_date"].date() == date(2020, 4, 1)
await hass.services.async_call(
const.DOMAIN,
"remove_date",
service_data={"entity_id": "sensor.blank", "date": date(2020, 4, 1)},
blocking=True,
)
await hass.services.async_call(
const.DOMAIN,
"update_state",
service_data={"entity_id": "sensor.blank"},
blocking=True,
)
assert entity.state == 1
assert entity.extra_state_attributes["days"] == 1
assert isinstance(entity.extra_state_attributes["next_date"], datetime)
assert entity.extra_state_attributes["next_date"].date() == date(2020, 4, 2)
| 15,352
|
def remove_dataset_tags():
"""Command for removing tags from a dataset."""
command = Command().command(_remove_dataset_tags).lock_dataset()
return command.require_migration().with_commit(commit_only=DATASET_METADATA_PATHS)
| 15,353
|
def delete_device(connection: Connection, id: str, error_msg: Optional[str] = None):
"""Delete a device.
Args:
connection: MicroStrategy REST API connection object
id: ID of the device
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
Complete HTTP response object. Expected status is 204.
"""
url = f"{connection.base_url}/api/v2/devices/{id}"
return connection.delete(url=url)
| 15,354
|
def run_job(job):
"""Handler that blocks until job is finished."""
job.poll(interval=2)
if job.status != "finished":
raise ValueError("Calculation job finished with status '{}'".format(job.status))
| 15,355
|
def build_model():
"""
Build a ML pipeline with RandomForest classifier GriSearch
:return: GridSearch Output
"""
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
parameters = {'clf__estimator__n_estimators': [50, 60],
'clf__estimator__min_samples_split': [2, 3, 4],
'clf__estimator__criterion': ['entropy', 'gini']
}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
| 15,356
|
def init_bot():
"""Inits the bot."""
# We create the Reddit instance.
reddit = praw.Reddit(client_id=config.APP_ID, client_secret=config.APP_SECRET,
user_agent=config.USER_AGENT, username=config.REDDIT_USERNAME,
password=config.REDDIT_PASSWORD)
# Load the pre-existing sirebar text.
sidebar_text = open("sidebar.txt", "r", encoding="utf-8").read()
# Start the Markdown table with 3 columns.
table_text = """\n\n| | | |\n| --- | --- | --- |\n"""
# We iterate over INVESTING_DICT and call the same function.
for k, v in INVESTING_DICT.items():
temp_data = get_investing_data(k, v)
# Add the data to the Markdown table.
table_text += "| {} | {} | {} |\n".format(
temp_data[0], temp_data[1], temp_data[2])
time.sleep(1)
# We add the rest of financial instruments.
for item in get_cetes():
table_text += "| {} | | {} |\n".format(item[0], item[1])
# Prepare the footer with the current date and time.
now = datetime.now()
footer = "\nÚltima actualización: {:%d-%m-%Y a las %H:%M:%S}".format(now)
# Update the sidebar on old Reddit.
reddit.subreddit(
config.SUBREDDIT).wiki["config/sidebar"].edit(sidebar_text + table_text + footer)
# Update a sidebar text widget on new Reddit.
for widget in reddit.subreddit(config.SUBREDDIT).widgets.sidebar:
if widget.shortName == "Indicadores Financieros":
widget.mod.update(text=table_text + footer)
break
| 15,357
|
def check_table_id_fn(
key_interaction):
"""Adds interaction, table and question id if missing."""
key, interaction = key_interaction
if not _has_valid_shape(interaction.table):
beam.metrics.Metrics.counter(_NS, "Tables empty or of ragged shape").inc()
return
if interaction.id and interaction.table.table_id and all(
bool(q.id) for q in interaction.questions):
yield key_interaction
else:
new_interaction = interaction_pb2.Interaction()
new_interaction.CopyFrom(interaction)
for question in new_interaction.questions:
if not question.id:
question.id = key
beam.metrics.Metrics.counter(_NS, "Question Ids added").inc()
if not new_interaction.table.table_id:
new_interaction.table.table_id = key
beam.metrics.Metrics.counter(_NS, "Table Ids added").inc()
if not new_interaction.id:
new_interaction.id = key
beam.metrics.Metrics.counter(_NS, "Interaction Ids added").inc()
yield key, new_interaction
| 15,358
|
def test_renders_xml(app, context):
"""An xml extension results in no doctype and a application/xml mimetype"""
with app.test_request_context():
rendered = render_response("test.xml", context)
assert rendered.mimetype == "application/xml"
assert rendered.data == b"<name>Rudolf</name>"
rendered = render("test.xml", **context)
assert rendered.mimetype == "application/xml"
assert rendered.data == b"<name>Rudolf</name>"
| 15,359
|
def basic_auth(func):
"""Decorator for basic auth"""
def wrapper(request, *args, **kwargs):
try:
if is_authenticated(request):
return func(request, *args, **kwargs)
else:
return HttpResponseForbidden()
except Exception, ex:
return HttpResponse(json.dumps({'success': False, 'error': ex.message}), mimetype='text/json')
return wrapper
| 15,360
|
async def test_transmute(request, user: str, env: str=None, group: [str]=None):
"""
API Description: Transmute Get. This will show in the swagger page (localhost:8000/api/v1/).
"""
return {
"user": user,
"env": env,
"group": group,
}
| 15,361
|
def simdispim(incat=None, config=None, lambda_psf=None, dispim_name=None,
model_spectra=None, model_images=None, nx=None, ny=None,
exptime=None, bck_flux=0.0, extraction=True, extrfwhm=3.0,
orient=True, slitless_geom=True, adj_sens=True, silent=True):
"""
Main function for the task SIMDISPIM
This module is the high level wrapper function for the
task SIMDISPIM. All necessary actions are done, feedback
is given to the user
@param incat: name of model object table
@type incat: string
@param config: aXe configuration file name
@type config: string
@param lambda_psf: wavelength the object shapes were determined at
@type lambda_psf: float
@param dispim_name: name of dispersed image
@type dispim_name: string
@param model_spectra: name of model spectra file
@type model_spectra: string
@param model_images: name of model images
@type model_image: string
@param nx: number of pixels in x
@type nx: int
@param ny: number of pixels in y
@type ny: int
@param exptime: exposure time
@type exptime: dloat
@param bck_flux: flux in background
@type bck_flux: float
@param extraction: flag for default extraction
@type extraction: boolean
@param extrfwhm: multiplier for extraction width
@type extrfwhm: float
@param orient: flag for tilted extraction
@type orient: boolean
@param slitless_geom: flag for slitless optimized extraction
@type slitless_geom: boolean
@param adj_sens: flag for adjusted flux conversion
@type adj_sens: boolean
@param silent: flag for silent run
@type silen: boolean
"""
from . import imagemaker
from . import modspeclist
from . import axecommands
from . import realworld
from . import configfile
from .inputchecks import InputChecker
# give brief feedback
print('\nSIMDISPIM: Starting ...')
# just set the environments
axe_setup(axesim=True)
if incat == None or config==None:
print(__doc__)
return 1
# check the input parameters
in_check = InputChecker(taskname='simdispim')
# for the 'simdisp'-task
in_check.check_simdispim_input(incat, config, lambda_psf,
model_spectra, model_images, nx,
ny, exptime, bck_flux, extraction,
extrfwhm, orient, slitless_geom,
adj_sens=adj_sens)
if dispim_name == None:
# derive the output name
pos = incat.rfind('.')
if pos < 0:
dirima_name = incat + '_direct.fits'
grisima_name = incat + '_slitless.fits'
else:
dirima_name = incat[:pos] + '_direct.fits'
grisima_name = incat[:pos] + '_slitless.fits'
else:
dirima_name = dispim_name.replace('.fits','_direct.fits')
grisima_name = dispim_name
# make a full path to the
# direct image as dummy and as final output
dummy_dirima_path = getIMAGE(get_random_filename('t', '.fits'))
dummy_grisima_path = getIMAGE(get_random_filename('t', '.fits'))
final_dirima_path = getOUTSIM(dirima_name)
final_grisima_path = getOUTSIM(grisima_name)
try:
# to convert the background value
# to a float
bck_flux = float(bck_flux)
except ValueError:
# now it must be a file;
# check for its existence
if not os.path.isfile(getCONF(bck_flux)):
err_msg = 'Missing background image: ' + getCONF(bck_flux)
raise aXeSIMError(err_msg)
# store the path to the
# background image
bck_flux = getCONF(bck_flux)
# load the aXe configuration file
conf = configfile.ConfigFile(getCONF(config))
# make the simulation configuration
# file pointing the correct extensions
config_simul = conf.axesim_prep()
# delete the object
# explicitly
del conf
# load the simulation configuration file
conf_simul = configfile.ConfigFile(getCONF(config_simul))
# make sure a reasonable default
# for lambda_psf is given if needed
if lambda_psf == None:
lambda_psf = conf_simul.confirm_lambda_psf()
print('SIMDISPIM: Input Model Object List: %s' % getIMAGE(incat))
print('SIMDISPIM: Input aXe configuration file: %s' % getCONF(config))
if model_spectra != None:
print('SIMDISPIM: Input Model Spectra: %s' % getIMAGE(model_spectra))
if model_images != None:
print('SIMDISPIM: Input Model Spectra: %s' % getIMAGE(model_images))
print('SIMDISPIM: Fixed wavlength for PSF: %s' % str(lambda_psf))
print('SIMDISPIM: Background flux/image: %s' % str(bck_flux))
if exptime != None:
print('SIMDISPIM: Input exposure time: %s' % str(exptime))
if nx == None and ny == None:
print('SIMDISPIM: Input image dimensions: %s' % 'AUTO')
else:
print('SIMDISPIM: Input image dimensions: (%s,%s)' % (str(nx),str(ny)))
print('SIMDISPIM: Output dispersed image: %s' % final_grisima_path)
if extraction:
print('SIMDISPIM: Extraction width scaling: %.2f' % extrfwhm)
print('SIMDISPIM: Extraction tilted: %s' % str(orient))
print('SIMDISPIM: Extraction slitless optimized: %s' % str(slitless_geom))
print('SIMDISPIM: Size-adjusted flux conversion: %s' % str(adj_sens))
print('SIMDISPIM: Output extracted spectra: %s' % final_grisima_path.replace('.fits', '_2.SPC.fits'))
print('SIMDISPIM: Output stamp images: %s' % final_grisima_path.replace('.fits', '_2.STP.fits'))
print('')
# create the dummy image maker
i_maker = imagemaker.DummyImages(getCONF(config_simul), dummy_grisima_path,
dummy_dirima_path, nx, ny)
# nake the dummy images
i_maker.makeImages()
# load the model object table
inobjects = modspeclist.ModelObjectTable(getIMAGE(incat))
# fill the model object table
inobjects.fill_columns(i_maker.WCSimage, i_maker.WCSext)
# load the object to make the grism simulations
grismator = axecommands.DispImator(i_maker, config_simul, getIMAGE(incat),
lambda_psf, model_spectra, model_images)
grismator.run(silent=silent)
grismator.mopup()
# get the name of the result image, which is the contamination image
result_image = getOUTPUT(os.path.basename(dummy_grisima_path).replace('.fits','_2.CONT.fits'))
# convert the 'contamination' image into
# a full output image with three extensions
# and noise (if desired)
rworld = realworld.RealWorld(result_image, extname='SCI', exptime=exptime,
bck_flux=bck_flux, rdnoise=conf_simul['RDNOISE'],
instrument=conf_simul['INSTRUMENT'])
rworld.make_real()
# move the resulting image to the correct
# name and place
shutil.move(result_image, final_grisima_path)
# check whether an extraction
# is desired
if extraction:
# create and extractor
extractor = axecommands.DummyExtractor(i_maker, final_grisima_path,
config_simul, getIMAGE(incat), bck_flux,
extrfwhm, orient, slitless_geom, adj_sens,
lambda_mark=lambda_psf)
# make the extraction
extractor.prepare_extraction()
extractor.run(silent=silent)
extractor.mopup()
# delete the dummy images
i_maker.deleteImages()
# give brief feedback
print('SIMDISPIM: Done ...\n')
return 0
| 15,362
|
def ref_from_rfgc(sample):
"""
rename columns from RFGC catalog
"""
ref = dict(
ra = sample['RAJ2000'],
dec = sample['DEJ2000'],
a = sample['aO'],
b = sample['bO'],
PA = sample['PA']
)
return ref
| 15,363
|
def build_cmdline():
"""
creates OptionParser instance and populates command-line options
and returns OptionParser instance (cmd)
"""
cmd=optparse.OptionParser(version=__version__)
cmd.add_option('-c', '', dest='config_fname',type="string", help='WHM/WHMCS configuration file', metavar="FILE")
cmd.add_option('-s', '', dest="whm_section", type="string", help="WHM server to use. Specify section name. eg: -s ds01", metavar="SERVER")
cmd.add_option('','--search', action="store", dest='search', type="string", help="Search client by DNS domain name or cPanel username", metavar="STRING")
cmd.add_option('-d', '', dest='whmcs_deptid', type="int", help="WHMCS Department ID", metavar="INT")
cmd.add_option('-m', '', dest='whmcs_ticketmsg_fname', type="string", help="WHMCS abuse ticket template file", metavar='FILE')
cmd.add_option('-r', '', dest='whm_suspendmsg_fname', type="string", help='cPanel account suspension reason template file', metavar='FILE')
cmd.add_option('-f', '', dest='whmcs_proofmsg_fname', type="string", help='Abuse proof file which will be appended to abuse ticket message', metavar='FILE')
cmd.add_option('', '--subject', dest='whmcs_subject', type="string", help='Specify abuse ticket subject title.', metavar="STRING")
cmd.add_option('-y', '--allyes', dest='allyes', action="store_true", default=False, help='Assume yes as an answer to any question which would be asked')
return cmd
| 15,364
|
def get_repo_slugname(repo):
"""
>>> get_repo_slugname("https://build.frida.re")
build.frida.re
>>> get_repo_slugname("https://build.frida.re/./foo/bar")
build.frida.re
>>> get_repo_slugname("://build.frida.re")
build.frida.re
"""
parse_result = urllib.parse.urlparse(repo)
return parse_result.netloc
| 15,365
|
def check_asserttruefalse(logical_line, filename):
"""N328 - Don't use assertEqual(True/False, observed)."""
if 'ovn_octavia_provider/tests/' in filename:
if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?", logical_line):
msg = ("N328: Use assertTrue(observed) instead of "
"assertEqual(True, observed)")
yield (0, msg)
if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?", logical_line):
msg = ("N328: Use assertTrue(observed) instead of "
"assertEqual(True, observed)")
yield (0, msg)
if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?", logical_line):
msg = ("N328: Use assertFalse(observed) instead of "
"assertEqual(False, observed)")
yield (0, msg)
if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?", logical_line):
msg = ("N328: Use assertFalse(observed) instead of "
"assertEqual(False, observed)")
yield (0, msg)
| 15,366
|
def run_file(file_path, globals_, script_dir=SCRIPT_DIR):
"""Execute the file at the specified path with the passed-in globals."""
script_name = os.path.basename(file_path)
if re.match(OAUTH_CLIENT_EXTRA_PATH_SCRIPTS, script_name):
extra_extra_paths = OAUTH_CLIENT_EXTRA_PATHS
elif re.match(GOOGLE_SQL_EXTRA_PATH_SCRIPTS, script_name):
extra_extra_paths = GOOGLE_SQL_EXTRA_PATHS
elif re.match(API_SERVER_EXTRA_PATH_SCRIPTS, script_name):
extra_extra_paths = API_SERVER_EXTRA_PATHS
else:
extra_extra_paths = []
fix_sys_path(extra_extra_paths)
script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
script_dir = SCRIPT_DIR_EXCEPTIONS.get(script_name, script_dir)
script_path = os.path.join(script_dir, script_name)
execfile(script_path, globals_)
| 15,367
|
def create_znode(zookeeper_quorum, solr_znode, java64_home, retry = 5 , interval = 10):
"""
Create znode if does not exists, throws exception if zookeeper is not accessible.
"""
solr_cli_prefix = __create_solr_cloud_cli_prefix(zookeeper_quorum, solr_znode, java64_home, True)
create_znode_cmd = format('{solr_cli_prefix} --create-znode --retry {retry} --interval {interval}')
Execute(create_znode_cmd)
| 15,368
|
def lowess(x, y, f=2. / 3., itera=3):
"""lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
"""
n = len(x)
r = int(ceil(f * n))
h = [np.sort(np.abs(x - x[i]))[r] for i in range(n)]
#h = [ (np.abs(x - x[i]))[r] for i in range(n)]
w = np.clip(np.abs((x[:, None] - x[None, :]) / h), 0.0, 1.0)
w = np.nan_to_num(w, nan=0.0)
w = (1 - w ** 3) ** 3
s= np.diagonal(w)
yest = np.zeros(n)
delta = np.ones(n)
for iteration in range(itera):
for i in range(n):
weights = delta * w[:, i]
b = np.array([np.sum(weights * y), np.sum(weights * y * x)])
A = np.array([[np.sum(weights), np.sum(weights * x)],
[np.sum(weights * x), np.sum(weights * x * x)]])
beta = linalg.solve(A, b)
yest[i] = beta[0] + beta[1] * x[i]
residuals = y - yest
s = np.median(np.abs(residuals))
delta = np.clip(residuals / (6.0 * s), -1, 1)
delta = (1 - delta ** 2) ** 2
return yest
| 15,369
|
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
tibber_connection = tibber.Tibber(
access_token=entry.data[CONF_ACCESS_TOKEN],
websession=async_get_clientsession(hass),
time_zone=dt_util.DEFAULT_TIME_ZONE,
)
hass.data[DOMAIN] = tibber_connection
async def _close(event):
await tibber_connection.rt_disconnect()
entry.async_on_unload(hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close))
try:
await tibber_connection.update_info()
except asyncio.TimeoutError as err:
raise ConfigEntryNotReady from err
except aiohttp.ClientError as err:
_LOGGER.error("Error connecting to Tibber: %s ", err)
return False
except tibber.InvalidLogin as exp:
_LOGGER.error("Failed to login. %s", exp)
return False
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
# set up notify platform, no entry support for notify component yet,
# have to use discovery to load platform.
hass.async_create_task(
discovery.async_load_platform(
hass, "notify", DOMAIN, {CONF_NAME: DOMAIN}, hass.data[DATA_HASS_CONFIG]
)
)
return True
| 15,370
|
def scenario_mask_vulnerable(plot=plt, show=False):
"""
creates scenario with different groups that are more or less vulnerable
Args:
plot: plot to show
show (bool): variable if graphic should be shown
Returns:
plot: plot to show
ani_humans: animation of the humans
ani_stack: animation of the stackplot
"""
# variables that influence the simulation
prob, infection_radius, number_of_humans, temperature, number_vulnerable_humans, number_humans_with_mask = ask_for_different_input()
number_standard_humans = number_of_humans - \
number_vulnerable_humans - number_humans_with_mask
# plot setup
fig = plot.figure(figsize=(10, 4))
# for healthy and vulnerable humans
plot_humans = fig.add_subplot(1, 2, 1)
plot_humans.axes.xaxis.set_visible(False)
plot_humans.axes.yaxis.set_visible(False)
# for stackplot
plot_stack = fig.add_subplot(1, 2, 2)
plot_stack.set_frame_on(False)
plot_stack.axes.xaxis.set_visible(False)
plot_stack.axes.yaxis.set_visible(False)
# setting up the list of humans
global_humans, energy = init.init_sys(
temperature,
prob,
number_of_humans,
infection_radius=infection_radius,
world_limit=world_limit,
)
global_humans = init.make_vulnerable(
global_humans, number_of_humans, number_vulnerable_humans, infection_radius, prob)
global_humans = init.wear_mask(
global_humans, number_of_humans, number_humans_with_mask, infection_radius, prob)
inf = []
suc = []
rec = []
inf_mask = []
suc_mask = []
rec_mask = []
inf_vulnerable = []
suc_vulnerable = []
rec_vulnerable = []
steps = []
# animation of the movement of humans
ani_humans = animation.FuncAnimation(
fig,
scenario_basic_animation,
fargs=[global_humans, plot_humans, time_step, energy],
interval=plot_refresh_rate,
)
# animation of the stackplot
ani_stack = animation.FuncAnimation(
fig,
stack_animation_mask_vulnerable,
fargs=[
global_humans,
plot_stack,
time_step,
inf_vulnerable, inf, inf_mask,
rec_vulnerable, rec, rec_mask,
suc_vulnerable, suc, suc_mask,
steps,
number_of_humans,
infection_radius],
interval=plot_refresh_rate)
if show:
plot.show()
return plot, ani_humans, ani_stack
| 15,371
|
def _operator_parser(expr, first, current):
"""This method parses the expression string and substitutes
the temporal operators with numerical values.
Supported operators for relative and absolute time are:
- td() - the time delta of the current interval in days
and fractions of days or the unit in case of relative time
- start_time() - The start time of the interval from the begin of the
time series in days and fractions of days or the unit
in case of relative time
- end_time() - The end time of the current interval from the begin of
the time series in days and fractions of days or the
unit in case of relative time
Supported operators for absolute time:
- start_doy() - Day of year (doy) from the start time [1 - 366]
- start_dow() - Day of week (dow) from the start time [1 - 7],
the start of the week is monday == 1
- start_year() - The year of the start time [0 - 9999]
- start_month() - The month of the start time [1 - 12]
- start_week() - Week of year of the start time [1 - 54]
- start_day() - Day of month from the start time [1 - 31]
- start_hour() - The hour of the start time [0 - 23]
- start_minute() - The minute of the start time [0 - 59]
- start_second() - The second of the start time [0 - 59]
- end_doy() - Day of year (doy) from the end time [1 - 366]
- end_dow() - Day of week (dow) from the end time [1 - 7],
the start of the week is monday == 1
- end_year() - The year of the end time [0 - 9999]
- end_month() - The month of the end time [1 - 12]
- end_week() - Week of year of the end time [1 - 54]
- end_day() - Day of month from the end time [1 - 31]
- end_hour() - The hour of the end time [0 - 23]
- end_minute() - The minute of the end time [0 - 59]
- end_second() - The minute of the end time [0 - 59]
The modified expression is returned.
"""
is_time_absolute = first.is_time_absolute()
expr = _parse_td_operator(expr, is_time_absolute, first, current)
expr = _parse_start_time_operator(expr, is_time_absolute, first, current)
expr = _parse_end_time_operator(expr, is_time_absolute, first, current)
expr = _parse_start_operators(expr, is_time_absolute, current)
expr = _parse_end_operators(expr, is_time_absolute, current)
return expr
| 15,372
|
def _read_data(filename):
"""
Read the script and return is as string
:param filename:
:return:
"""
javascript_path = _get_data_absolute_path(filename)
with open(javascript_path) as javascript:
return javascript.read()
| 15,373
|
def xonshconfig(env):
"""Ensures and returns the $XONSHCONFIG"""
xcd = env.get("XONSH_CONFIG_DIR")
xc = os.path.join(xcd, "config.json")
return xc
| 15,374
|
def file_open(filename, mode='r', encoding='utf8'):
"""Open file with implicit gzip/bz2 support
Uses text mode by default regardless of the compression.
In write mode, creates the output directory if it does not exist.
"""
if 'w' in mode and not os.path.isdir(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
if filename.endswith('.bz2'):
if mode in {'r', 'w', 'x', 'a'}:
mode += 't'
return bz2.open(filename, mode=mode, encoding=encoding)
if filename.endswith('.xz'):
if mode in {'r', 'w', 'x', 'a'}:
mode += 't'
return lzma.open(filename, mode=mode, encoding=encoding)
if filename.endswith('.gz'):
if mode in {'r', 'w', 'x', 'a'}:
mode += 't'
return gzip.open(filename, mode=mode, encoding=encoding)
return open(filename, mode=mode, encoding=encoding)
| 15,375
|
def schoollist():
"""
Return all the schools.
Return an empty schools object if no schools
:return:
"""
items = get_schools()
if items:
return response_for_schools_list(get_schools_json_list(items))
return response_for_schools_list([])
| 15,376
|
def getNumNullops(duration, max_sample=1.0):
"""Return number of do-nothing loop iterations."""
for amount in [2**x for x in range(100)]: # 1,2,4,8,...
begin = datetime.now()
for ii in xrange(amount): pass
elapsed = (datetime.now() - begin).total_seconds()
if elapsed > max_sample:
break
return int(amount/elapsed*duration)
| 15,377
|
def get_dependencies_from_wheel_cache(ireq):
"""Retrieves dependencies for the given install requirement from the wheel cache.
:param ireq: A single InstallRequirement
:type ireq: :class:`~pip._internal.req.req_install.InstallRequirement`
:return: A set of dependency lines for generating new InstallRequirements.
:rtype: set(str) or None
"""
if ireq.editable or not is_pinned_requirement(ireq):
return
matches = WHEEL_CACHE.get(ireq.link, name_from_req(ireq.req))
if matches:
matches = set(matches)
if not DEPENDENCY_CACHE.get(ireq):
DEPENDENCY_CACHE[ireq] = [format_requirement(m) for m in matches]
return matches
return
| 15,378
|
def hbp_fn():
"""Create a ReLU layer with HBP functionality."""
return HBPReLU()
| 15,379
|
def plot_models_results(results, names, figsize=(16, 14), save=False, prefix_name_fig=None, folder='Charts'):
"""Compare the models plotting their results in a boxplot
Arguments --> the model results, their names, the figure size, a boolean to indicate if the plot has to be saved or not, the prefix name for the saved file and the folder where to save the chart
"""
fig = plt.figure(figsize=figsize)
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
if save == True:
prefix_name_fig = prefix_name_fig + '_' if prefix_name_fig is not None else ''
plt.savefig(folder + '/' + prefix_name_fig + '.png')
| 15,380
|
def xtrans(r):
"""RBDA Tab. 2.2, p. 23:
Spatial coordinate transform (translation of origin).
Calculates the coordinate transform matrix from A to B coordinates
for spatial motion vectors, in which frame B is translated by an
amount r (3D vector) relative to frame A.
"""
r1,r2,r3 = r
return matrix.sqr((
1, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0,
0, r3, -r2, 1, 0, 0,
-r3, 0, r1, 0, 1, 0,
r2, -r1, 0, 0, 0, 1))
| 15,381
|
def get_chop_flux(obs, chunk_method="nanmedian", method="nanmean",
err_type="internal", weight=None, on_off=True):
"""
Calculate the flux in chopped data. The data will first be processed in each
chop chunk by chunk_method, unless the chunk_method is set to None or
'none' and the data will be left as it is. Then the data will be separated
into on-chop and off-chop part, by which the difference is the flux. The
function supports two ways to calculate error: if err_type is 'internal',
the difference between mean of all on and off chop data is the flux, and
the combined error of the two parts of the data is the final error; if
err_type is 'external', then the difference of each on-off pair will be
taken in the first step, and then the mean and error of these differences
is used. The method of calculating mean in this step is denoted by the
variable method, which supports 'mean', 'nanmean', 'median', 'nanmedian'.
:param obs: Obs or ObsArray object containing data and chop_
:type obs: Obs or ObsArray
:param str chunk_method: str, method parameter passed to chunk_proc() to
chunk the data as the first step. If set to None or 'none', the data
will skip the chunk step and the flux will be extracted from the raw
data
:param str method: str, the method parameter passed to
weighted_proc_along_axis() to calculate the flux and error, suggested
values are "nanmean" or "nanmedian"
:param str err_type: str, allowed values are 'internal' and 'external'
:param weight: Obs or ObsArray object containing weight, should of the same
type as obs. If left None, will treat all data point as the same weight.
:type weight: Obs or ObsArray
:param bool on_off: bool flag of flux calculation using on chop - off chop,
if False, flux is off chop - on chop
:return: tuple of (flux, error, weight) objects of the same type as input
obs
:rtype: tuple
:raises TypeError: invalid input type
:raises ValueError: invalid method value
"""
if not isinstance(obs, Obs):
raise TypeError("Invalid input type for obs, expect Obs/ObsArray.")
obs = obs.copy()
mean_obs = obs.proc_along_time(method="nanmean")
if obs.empty_flag_ or obs.chop_.empty_flag_:
raise ValueError("obs data_ or chop_ is empty.")
if weight is None:
weight = obs.replace(arr_in=np.ones(obs.shape_))
weight = weight.copy()
weight.fill_by_mask(mask=np.isnan(obs.data_), fill_value=np.nan)
if (chunk_method is None) or chunk_method.strip().lower() == "none":
obs_chunk_on = obs.take_by_flag_along_time(chop=True)
obs_chunk_off = obs.take_by_flag_along_time(chop=False)
wt_chunk_on = weight.take_by_flag_along_time(flag_arr=obs.chop_.data_)
wt_chunk_off = weight.take_by_flag_along_time(flag_arr=~obs.chop_.data_)
else:
obs_chunk = obs.chunk_proc(method=chunk_method)
obs_chunk_on = obs_chunk.take_by_flag_along_time(chop=True)
obs_chunk_off = obs_chunk.take_by_flag_along_time(chop=False)
wt_chunk_method = "nansum" if chunk_method.strip().lower()[:3] == "nan" \
else "sum"
wt_chunk = weight.chunk_proc(chunk_edge_idxs=obs.chop_.chunk_edge_idxs_,
method=wt_chunk_method)
wt_chunk_on = wt_chunk.take_by_flag_along_time(
flag_arr=obs_chunk.chop_.data_)
wt_chunk_off = wt_chunk.take_by_flag_along_time(
flag_arr=~obs_chunk.chop_.data_)
if err_type.strip().lower()[0] == "i":
obs_chunk_on_mean, obs_chunk_on_err, obs_chunk_on_wt = \
weighted_proc_along_axis(obs=obs_chunk_on, method=method,
weight=wt_chunk_on, axis=-1)
obs_chunk_off_mean, obs_chunk_off_err, obs_chunk_off_wt = \
weighted_proc_along_axis(obs=obs_chunk_off, method=method,
weight=wt_chunk_off, axis=-1)
obs_flux = obs_chunk_on_mean - obs_chunk_off_mean
obs_err = np.sqrt(obs_chunk_on_err ** 2 + obs_chunk_off_err ** 2)
obs_wt = obs_chunk_on_wt + obs_chunk_off_wt
elif err_type.strip().lower()[0] == "e":
flag_arr1, flag_arr2 = get_match_phase_flags(
chop1=obs_chunk_on.chop_, chop2=obs_chunk_off.chop_,
match_same_phase=False)
if (len(flag_arr1) != 0) and (len(flag_arr2) != 0):
obs_chunk_on_match = obs_chunk_on.take_by_flag_along_time(
flag_arr=flag_arr1)
obs_chunk_off_match = obs_chunk_off.take_by_flag_along_time(
flag_arr=flag_arr2)
wt_chunk_on_match = wt_chunk_on.take_by_flag_along_time(
flag_arr=flag_arr1)
wt_chunk_off_match = wt_chunk_off.take_by_flag_along_time(
flag_arr=flag_arr2)
obs_chunk_diff = obs_chunk_on_match - obs_chunk_off_match
wt_chunk_diff = 1 / (1 / wt_chunk_on_match + 1 / wt_chunk_off_match)
wt_chunk_diff.fill_by_mask(mask=~np.isfinite(wt_chunk_diff.data_),
fill_value=np.nan)
obs_flux, obs_err, obs_wt = weighted_proc_along_axis(
obs=obs_chunk_diff, method=method, weight=wt_chunk_diff,
axis=-1)
else:
obs_flux, obs_err, obs_wt = (
mean_obs.replace(
arr_in=np.full(mean_obs.shape_, fill_value=np.nan)),
mean_obs.replace(
arr_in=np.full(mean_obs.shape_, fill_value=np.nan)),
mean_obs.replace(
arr_in=np.full(mean_obs.shape_, fill_value=0)))
else:
raise ValueError("Invalid value for err_type.")
if not on_off:
obs_flux *= -1
obs_flux = mean_obs.replace(arr_in=obs_flux.data_)
obs_err = mean_obs.replace(arr_in=obs_err.data_)
obs_wt = mean_obs.replace(arr_in=obs_wt.data_)
return obs_flux, obs_err, obs_wt
| 15,382
|
def my_example_embeddings_method(paths, embedding_size, default_value=1):
"""
:param paths: (list) a list of BGP paths; a BGP path is a list of integers (ASNs)
:param embedding_size: (int) the size of the embedding
:param default_value: (int) the value for the embeddings
:return: (pandas dataframe object) a dataframe with index the ASN numbers included in the paths where each row has <embedding_size> embeddings all with the same <default_value>
"""
unique_ASNs = set()
for path in paths:
unique_ASNs.update(path)
columns = ['embedding_' + str(i) for i in range(embedding_size)]
data = pd.DataFrame(default_value, index=unique_ASNs, columns=columns)
return data
| 15,383
|
def process_prompt_choice(value, prompt_type):
"""Convert command value to business value."""
if value is not None:
idx = prompt_type(value)
return idx
raise CommandError("The choice is not exist, please choice again.")
| 15,384
|
def summarise_input(rawimg, labelimg):
"""This function takes as input: 'rawimg' (the data) and 'labelimg' (the cell boundary cartoon)
Then using the z=1, channel=1 frame, produces a summary table for inspection.
It also calculates which label is the background, assuming it is the largest cell.
It returns the following:
(e.g. if there are three labels labeled 17, 20, 41. Where "20" is the background. This function will return:
1. A list of all the labels (e.g. [17,20,41])
2. The number of labels (e.g. 3)
3. The index of the background (e.g. 1)
4. The label name of the background (e.g. 20)
"""
#Take a snapshot of the image at z=1 and c=1 for this analysis
inputimg=Duplicator().run(rawimg, 1, 1, 1, 1, 1, 1); #ImagePlus imp, int firstC, int lastC, int firstZ, int lastZ, int firstT, int lastT)
results = ArrayList()
im = IntensityMeasures( inputimg, labelimg )
results.add( im.getMean() )
results.add( im.getStdDev() )
results.add( im.getNumberOfVoxels())
results.add( im.getMin() )
results.add( im.getMax() )
results.add( im.getMedian() )
results.add( im.getMode() )
mergedTable = ResultsTable()
numLabels = results.get(0).getCounter()
###Create a dictionary to store data###
d={}
d["label"]=[]
for i in xrange(results.size()): #for each heading (mean, std. dev. etc.)
measure = results.get( i ).getColumnHeading( 0 )
d[measure]=[]
######################################
for i in xrange(numLabels):
mergedTable.incrementCounter()
label = results.get( 0 ).getLabel( i ) #obtains the 0-indexed ith label, regardless of its string-name.
d["label"].append(label)
mergedTable.addLabel(label)
for j in xrange(results.size()):
measure = results.get( j ).getColumnHeading( 0 )
value = results.get( j ).getValue( measure, i )
mergedTable.addValue( measure, value )
d[measure].append(value)
if show_table:
mergedTable.show( inputimg.getShortTitle() +"-intensity-measurements" )
###Ensure labels file is in the correct format: ###
#Labels sometimes have gaps (e.g. labels=[4,40,82] is possible).
#The Python script stores them in a python list, and accesses them by “python indexes” (i.e. their order, starting with 0)
#In this example, label 4 would have a python index of 0 and label 40 would have a python index of 1 etc.
tmp=map(int, d["label"]) #convert label numbers (strings) to integers
assert sorted(tmp) == tmp, "FATAL ERROR: The labels provided are not in numerical order, \
whereas this script was written assuming they are. \
If this error occurs, it means the script needs editing"
###################################################
if manually_assign_backgroundlayer_to_label:
background_label_index=tmp.index(manually_assign_backgroundlayer_to_label)
print("The background has been manually selected as label {} (i.e. python index {})".format(manually_assign_backgroundlayer_to_label, background_label_index))
else:
background_label_index, background_number_of_voxels = max(enumerate(d["NumberOfVoxels"]), key=operator.itemgetter(1))
print("The auto-selected background is at label {} (i.e. python index {})".format(d["label"][background_label_index], background_label_index))
return d["label"], numLabels, background_label_index, d["label"][background_label_index]
| 15,385
|
def create_inchi_from_ctfile_obj(ctf, **options):
"""Create ``InChI`` from ``CTfile`` instance.
:param ctf: Instance of :class:`~ctfile.ctfile.CTfile`.
:type ctf: :class:`~ctfile.ctfile.CTfile`
:return: ``InChI`` string.
:rtype: :py:class:`str`
"""
# apply fixed hydrogen layer when atom charges are present
atom_charges = [atom.charge for atom in ctf.atoms if atom.charge != '0']
if atom_charges:
options.update({'fixedH': '-xF'})
with tempfile.NamedTemporaryFile(mode='w') as moltempfh, tempfile.NamedTemporaryFile(mode='r') as inchitempfh:
moltempfh.write(ctf.writestr(file_format='ctfile'))
moltempfh.flush()
openbabel.convert(input_file_path=moltempfh.name,
output_file_path=inchitempfh.name,
input_format='mol',
output_format='inchi',
**options)
inchi_result = inchitempfh.read()
return inchi_result.strip()
| 15,386
|
def get_info(df, verbose = None,max_cols = None, memory_usage = None, null_counts = None):
""" Returns the .info() output of a dataframe
"""
assert type(df) is pd.DataFrame
buffer = io.StringIO()
df.info(verbose, buffer, max_cols, memory_usage, null_counts)
return buffer.getvalue()
| 15,387
|
def get_directory_size(directory):
"""" Get directory disk usage in MB"""
directory_size = 0
for (path, dirs, files) in os.walk(directory):
for file in files:
directory_size += os.path.getsize(os.path.join(path, file))
return directory_size / (1024 * 1024.0)
| 15,388
|
def iwave_modes(N2, dz, k=None):
"""
Calculates the eigenvalues and eigenfunctions to the internal wave eigenvalue problem:
$$
\left[ \frac{d^2}{dz^2} - \frac{1}{c_0} \bar{\rho}_z \right] \phi = 0
$$
with boundary conditions
"""
nz = N2.shape[0] # Remove the surface values
dz2 = 1/dz**2
# Construct the LHS matrix, A
A = np.diag(-1*dz2*np.ones((nz-1)),-1) + \
np.diag(2*dz2*np.ones((nz,)),0) + \
np.diag(-1*dz2*np.ones((nz-1)),1)
# BC's
A[0,0] = -1.
A[0,1] = 0.
A[-1,-1] = -1.
A[-1,-2] = 0.
# Construct the RHS matrix i.e. put N^2 along diagonals
B = np.diag(N2,0)
# Solve... (use scipy not numpy)
w, phi = linalg.eig(A, b=B)
c = 1. / np.power(w, 0.5) # since term is ... + N^2/c^2 \phi
# Sort by the eigenvalues
idx = np.argsort(c)[::-1] # descending order
# Calculate the actual phase speed
cn = np.real( c[idx] )
return phi[:,idx], cn
| 15,389
|
def test_hollowsphere_degenerate_neighborhood():
"""Test either we sustain empty neighborhoods
"""
hs = ne.HollowSphere(1, inner_radius=0, element_sizes=(3,3,3))
assert_equal(len(hs((1,1,1))), 0)
| 15,390
|
def run_sql_migration(config, migration):
"""
Returns bool
Runs all statements in a SQL migration file one-at-a-time. Uses get_statements as a generator in a loop.
"""
conn = config['conn']
write_log(config, "SQL migration from file '{}'".format(migration['filename']))
with open(migration['filename'], 'r') as sqlFile:
for stmt in get_statements(sqlFile):
write_log(config, "Executing statement:\n{}".format(stmt))
pre_statement(config, migration, stmt)
with conn.cursor() as cur:
cur.execute(stmt)
post_statement(config, migration, stmt)
return True
| 15,391
|
def get_variable_type(n: int, data: Dict[str, Any]) -> str:
"""Given an index n, and a set of data,
return the type of a variable with the same index."""
if n in data[s.BOOL_IDX]:
return VariableTypes.BINARY
elif n in data[s.INT_IDX]:
return VariableTypes.INTEGER
return VariableTypes.CONTINUOUS
| 15,392
|
def draw_boxes_on_image(image_path: str,
boxes: np.ndarray,
scores: np.ndarray,
labels: np.ndarray,
label_names: list,
score_thresh: float = 0.5,
save_path: str = 'result'):
"""Draw boxes on images."""
image = np.array(PIL.Image.open(image_path))
plt.figure()
_, ax = plt.subplots(1)
ax.imshow(image)
image_name = image_path.split('/')[-1]
print("Image {} detect: ".format(image_name))
colors = {}
for box, score, label in zip(boxes, scores, labels):
if score < score_thresh:
continue
if box[2] <= box[0] or box[3] <= box[1]:
continue
label = int(label)
if label not in colors:
colors[label] = plt.get_cmap('hsv')(label / len(label_names))
x1, y1, x2, y2 = box[0], box[1], box[2], box[3]
rect = plt.Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, linewidth=2.0, edgecolor=colors[label])
ax.add_patch(rect)
ax.text(
x1,
y1,
'{} {:.4f}'.format(label_names[label], score),
verticalalignment='bottom',
horizontalalignment='left',
bbox={
'facecolor': colors[label],
'alpha': 0.5,
'pad': 0
},
fontsize=8,
color='white')
print("\t {:15s} at {:25} score: {:.5f}".format(label_names[int(label)], str(list(map(int, list(box)))), score))
image_name = image_name.replace('jpg', 'png')
plt.axis('off')
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.savefig("{}/{}".format(save_path, image_name), bbox_inches='tight', pad_inches=0.0)
plt.cla()
plt.close('all')
| 15,393
|
def pretty_print_expr(expr, indent=0):
"""Print Expr in a pretty way by recursion and indentation.
Arguments:
expr {Expr} -- the Expr to print
Keyword Arguments:
indent {int} -- spaces to indent (default: {0})
"""
if expr.op == 'id':
print(indent*'\t', expr.args[0])
else:
print(indent*'\t', expr.op)
for e in expr.args:
pretty_print_expr(e, indent+1)
| 15,394
|
def online_decompilation_main(result_path,path):
"""
:param online_decompiler_result_save_file: Store all the contract information in the name result.json, and then save it in this folder
:param solidity_code_result: The address of the folder where the source code of the contract obtained by parsing the file is stored
:param opcode_result: The operation code of the contract obtained by parsing the address of the folder that should be stored
:param html_path: Store the html file in this folder, read the html file in the html folder for analysis
:param path: All address information is stored in this path
:return:
"""
# url = input("please input the contract tx:")
# url = sys.argv[0]
online_decompiler_result_save_file = result_path +"result/"
solidity_code_result = result_path + "source_code_path/"
opcode_result = result_path + "opcode_path/"
html_path = result_path + "html_path/"
f = open(path, )
data = json.load(f) # data is a list, and each list is a dictionary, which forms the json format
all_num = 0
time_out = 0
list = []
l1 = path.split("/")
list2 = []
result_json_name = l1[-1]
for i in data:
print(all_num,end=' ')
all_num = all_num+1
url = i.get("address")
dict = {"address":url}
dict["tx_count"] = i.get("tx_count")
dict["parse_lose"] = False
dict["parse_timeout_information"] = ""
start = time.time()
try:
http_get(url,html_path) # Get the address of the contract, crawl the content of the contract at that address, and then store the web page in the address of a folder in html_path
except Exception as e:
time_out = time_out + 1
list2.append(url)
print(e)
pass
continue
# dict["parsetime"] = 0
# dict["size"]
str1, str2 = parsehtml(url,html_path) # Parse the html file corresponding to the contract
if(str1==""):
dict["parse_lose"] = True
dict["parse_information"] = "parse html fail~!"
end = time.time()
dict["parsetime"] = end - start
dict["size"] = len(str1)
# print("url",url)
# print(end-start)
save_to_file(solidity_code_result + url + ".sol", str1)
save_to_file(opcode_result + url + ".txt", str2)
list.append(dict) # Save the acquired contract information in the list, and then save the list in a file
write_list_to_json(list,result_json_name ,online_decompiler_result_save_file)
return all_num,time_out,list2
# Write the list into a file, the list contains all the information obtained by the parsed contract, and then save it in a folder named result.json
| 15,395
|
def filter_unit_name(merged_df:pd.DataFrame)->pd.DataFrame:
"""
Iteratively selects names that are close together based
on the Levenstein distance (number of added/inserted/
removed letters of make two strings identical).
TODO: this iterative approach is very inefficient and would
not scale. Future work would speed up this algorithm.
TODO: the max_dist parameter has been manually tuned to be 6.
In future, some thought would be put into how to calculate this
programmatically.
"""
# accepted string distance between names
max_dist = 6
filter_df = pd.DataFrame(columns=merged_df.columns)
for dist in range(max_dist):
for index, row in merged_df.iterrows():
# this checks of the unit_name is already in
# filtered_df, if so skip the row
# unit_name_entso is row index 4
if not any(filter_df.unit_name.isin([row[4]])):
# UNIT_platts is index 10
if editdistance.eval(row[4], row[10]) < dist:
filter_df = filter_df.append(row)
return filter_df
| 15,396
|
def calculate_snr(
Efield: np.ndarray,
freqRange: tuple,
h_obs: float = 525.0,
Nants: int = 1,
gain: float = 10.0,
) -> np.ndarray:
"""
given a peak electric field in V/m and a frequency range, calculate snr
Parameters
Efield: np.ndarray
peak electric field in V/m
freqRange: float
tuple with low and high end of frequency band in MHz
h_obs: float
height in km above the earth surface of your observer (default = 525km)
Nants: int
number of antennas phased together (default = 1)
gain: float
gain of the antenna(s) in dBi
Returns
SNR for each trial
"""
df = (
10.0 # efields made with 10 MHz bins, would need to redo for different bin size
)
freqs = np.arange(freqRange[0], freqRange[1], df) + df / 2.0
V_sig = Nants * voltage_from_field(Efield, freqs, gain)
V_noise = np.sqrt(Nants * np.sum(noise_voltage(freqs, h_obs) ** 2.0))
V_sigsum = np.sum(V_sig, axis=1)
# print(V_sigsum.mean())
# print(V_noise)
return V_sigsum / V_noise
| 15,397
|
def retrieve_obj_indices(batch_cls: np.ndarray, num_classes: int):
"""Helper function to save the object indices for later.
E.g. a batch of 3 samples with varying number of objects (1, 3, 1) will
produce a mapping [[0], [1,2,3], [4]]. This will be needed later on in the
bipartite matching.
Parameters
----------
batch_cls : np.ndarray
Batch class targets of shape [Batch Size, #Queries, 1].
num_classes : int
Number of target classes.
Returns
-------
obj_indices : list
Object indices indicating for each sample at which position the
associated objects are.
"""
obj_indices = []
batch_size = batch_cls.shape[0]
for idx in np.arange(0, batch_size, dtype=np.int32):
sample = batch_cls[idx]
object_indices = np.where(sample != num_classes)[0]
num_objects_in_sample = len(object_indices)
if idx == 0:
sample_obj_indices = np.arange(0, num_objects_in_sample, dtype=np.int32)
obj_indices.append(sample_obj_indices.tolist())
last_num_objects = num_objects_in_sample
else:
start, upto = last_num_objects, last_num_objects + num_objects_in_sample
sample_obj_indices = np.arange(start, upto, dtype=np.int32)
obj_indices.append(sample_obj_indices.tolist())
last_num_objects = upto
return obj_indices
| 15,398
|
def get_code_type(code):
"""
判断代码是属于那种类型,目前仅支持 ['fund', 'stock']
:return str 返回code类型, fund 基金 stock 股票
"""
if code.startswith(('00', '30', '60')):
return 'stock'
return 'fund'
| 15,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.