content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def set_season():
"""
Facilitates the user entering what season it is. Returns the appropriate
string.
"""
error = False
while True:
clear_screen()
options = ["Spring", "Summer", "Fall"]
print("What season is it?")
# Prints out the options array in a numbered fashion
print_options(options)
if error:
print("Sorry I didn't understand. Please enter either 1, 2, or 3")
error = False
else:
print("")
p_input = input()
if p_input.isdigit() and 1 <= int(p_input) <= 3:
return options[int(p_input) - 1].lower()
else:
error = True | 7be0a588460d165fd716bdf83f30b9175f58347f | 3,628,900 |
import http
def extract_rooms_or_global(req, admin=True):
"""
Extracts the rooms / global parameters from the request body checking them for validity and
expanding them as appropriate.
Throws a flask abort on failure, returns (rooms, global) which will be either ([list of Rooms],
None) for a room operation or (None, True) for a global operation.
admin specifies whether we require admin permission (if True) or just moderator permission,
either in all rooms specified, or globally. (Similarly it affects what `rooms=['*']` expands
to).
"""
if not isinstance(req, dict):
app.logger.warning(f"Invalid request: expected a JSON object body, not {type(req)}")
abort(http.BAD_REQUEST)
room_tokens, global_ = req.get('rooms'), req.get('global', False)
if room_tokens and not isinstance(room_tokens, list):
app.logger.warning("Invalid request: rooms must be a list")
abort(http.BAD_REQUEST)
if room_tokens and global_:
app.logger.warning("Invalid moderator request: cannot specify both 'rooms' and 'global'")
abort(http.BAD_REQUEST)
if not room_tokens and not global_:
app.logger.warning("Invalid moderator request: neither 'rooms' nor 'global' specified")
abort(http.BAD_REQUEST)
if room_tokens:
if len(room_tokens) > 1 and '*' in room_tokens:
app.logger.warning("Invalid moderator request: room '*' must be the only rooms value")
abort(http.BAD_REQUEST)
if room_tokens == ['*']:
room_tokens = None
try:
rooms = mroom.get_rooms_with_permission(
g.user, tokens=room_tokens, moderator=True, admin=True if admin else None
)
except Exception as e:
# This is almost certainly a bad room token passed in:
app.logger.warning(f"Cannot get rooms for adding a moderator: {e}")
abort(http.NOT_FOUND)
if room_tokens:
if len(rooms) != len(room_tokens):
abort(http.FORBIDDEN)
elif not rooms:
abort(http.FORBIDDEN)
return (rooms, None)
if not g.user.global_moderator or (admin and not g.user.global_admin):
abort(http.FORBIDDEN)
return (None, True) | 7ebd6ba77462feb9465cc21e01854f365287a2d6 | 3,628,901 |
def info(msg):
"""
Informational logging statement
:param msg: the message to print
:returns: True -- to allow it as an #assert statement
"""
return log("info", msg, logger) | 2fb1dc02aaf703ffc87381bdd271460e299022c7 | 3,628,902 |
from typing import Counter
def error_correct_BC_or_UMI(records, key, threshold=1):
"""
:param records: should be list of records all from the same gene!
"""
assert key in ('BC', 'UMI')
merge_map = {}
bc_count = Counter()
for r in records: bc_count[r[key]] += 1
# most common BC, in decreasing order
bc_sorted = [bc for bc,count in bc_count.most_common()]
i = 0
while i < len(bc_sorted)-1:
j = i + 1
while j < len(bc_sorted):
if edit_distance(bc_sorted[i], bc_sorted[j]) <= threshold:
#merge j into i
merge_map[bc_sorted[j]] = bc_sorted[i]
bc_sorted.pop(j)
else:
j += 1
i += 1
#if len(merge_map) > 0:
# print merge_map
## raw_input()
return merge_map | 4d06243d18b11d2ed18e356882e57e9f6d6bbf81 | 3,628,903 |
def add_category():
"""
Create category for database.
Inject all form data to new category document on submit.
"""
all_plant_types = mongo.db.plant_types.find()
all_shade_tolerance = mongo.db.shade_tolerance.find()
return render_template('addcategory.html',
plant_types=all_plant_types,
shade_tolerance=all_shade_tolerance) | 60d512b1ee22f1b57ab4df5fad4d85a990b567ff | 3,628,904 |
def load_seed_from_file(seed_path):
"""Load urls seed from file"""
seed_urls = urls.load_urls_from_file(seed_path)
return seed_urls | 5161506657dd348a6119f2d97d95230ec9273d3c | 3,628,905 |
def loads(csvdoc, columns=None, cls=Row, delimiter=",", quotechar='"',
typetransfer=False, csv_size_max=None, newline="\n"):
"""Loads csv, but as a python string
Note: Due to way python's internal csv library works, identical headers will overwrite each other.
"""
return _loads(csvdoc, columns=columns, cls=cls, delimiter=delimiter, quotechar=quotechar,
typetransfer=typetransfer, csv_size_max=csv_size_max, newline=newline) | 54373d743a515375e6040b4cdc518866838de610 | 3,628,906 |
import warnings
def cluster_association_test(res, y_col='cmember', method='fishers'):
"""Use output of cluster tallies to test for enrichment of traits within a cluster.
Use Fisher's exact test (test='fishers') to detect enrichment/association of the neighborhood
with one variable.
Tests the 2 x 2 table for each clone:
+----+--------+-------+--------+
| | Neighborhood |
| +-------+--------+
| | Y | N |
+----+----+-------+--------+
| | 0 (+) | a | b |
| X +--------+-------+--------+
| | 1 (-) | c | d |
+----+--------+-------+--------+
Note that the first level of an x_col (defined by sort order in the pd.DataFrame index) will
be encoded as "+" in the output. Similarly, cluster membership indicate by a value of 1
Use the chi-squared test (test='chi2') to detect association across multiple variables.
Note that with sparse neighborhoods Chi-squared tests are unreliable.
Use the Cochran-Mantel-Haenszel test (test='chm') to test stratified 2 x 2 tables:
one X-var vs. neighborhood (Y), over several strata defined in other X variables.
Use x_cols[0] as the primary (binary) variable and other x_cols for the categorical
strata-defining variables. This tests the overall null that OR = 1 for x_cols[0].
A test is also performed for homogeneity of the ORs among the strata (Breslow-Day test).
Parameters
----------
res : pd.DataFrame
Result from one of the "tally" functions
y_col : col in res
Column indicating cluster membership. Almost certainly is 'cmember' if used a "tally" function
method : str
Method for testing: fishers, chi2, chm"""
n = np.max([int(k.split('_')[1]) for k in res.columns if 'val_' in k]) + 1
ct_cols = ['ct_%d' % i for i in range(n)]
val_cols = ['val_%d' % i for i in range(n)]
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if method == 'fishers':
if n != 4:
raise ValueError("Number of ct_cols must equal 4 (2x2) to use Fisher's exact test")
out = _fisherNBR(res, ct_cols=['ct_%d' % i for i in range(4)])
res = res.assign(**out)
else:
if method in ['chisq', 'chi2']:
tmp = {'chisq':np.nan * np.zeros(res.shape[0]),
'pvalue':np.nan * np.zeros(res.shape[0])}
for i in range(res.shape[0]):
tab = res[ct_cols].iloc[i].values.reshape((len(ct_cols) // 2, 2))
tmp['chisq'][i], tmp['pvalue'][i] = _chi2NBR(tab)
res = res.assign(**tmp)
elif method in ['chm', 'cmh']:
"""Need to figure out how to efficiently refactor this test that wants the counts gby from tally"""
tmp = []
for i in range(res.shape[0]):
counts = _dict_to_nby2(res[ct_cols + val_cols + ['ct_columns', 'levels']].iloc[i].to_dict())
"""Flip columns so that (0, 0) is cluster member (1)"""
counts = counts.unstack(y_col)[['MEM+', 'MEM-']]
tables = []
for i, gby in counts.groupby(level=counts.index.names[1:]):
if gby.shape == (2, 2):
tmp_tab = gby.values
"""Flip the rows of each table so that (0, 0) is X+ (second value of X)"""
# tmp_tab = tmp_tab[::-1, :]
tables.append(tmp_tab)
tmp.append(_CMH_NBR(tables))
tmp = pd.DataFrame(tmp)
res = pd.concat((res, tmp), axis=1)
for c in [c for c in res.columns if 'pvalue' in c]:
res = res.assign(**{c.replace('pvalue', 'FWERp'):fishersapi.adjustnonnan(res[c].values, method='holm'),
c.replace('pvalue', 'FDRq'):fishersapi.adjustnonnan(res[c].values, method='fdr_bh')})
return res | 13c6f5de6c3548eb396ba73128adbc8714a09b5f | 3,628,907 |
def parse_collection_page(wikitext):
"""Parse wikitext of a MediaWiki collection page created by the Collection
extension for MediaWiki.
@param wikitext: wikitext of a MediaWiki collection page
@type mwcollection: unicode
@returns: metabook.collection
@rtype: metabook.collection
"""
mb = metabook.collection()
summary = False
noTemplate = True
for line in wikitext.splitlines():
line = line.strip()
if not line:
continue
res = alltogether_rex.search(line)
if not res:
continue
#look for initial templates and summaries
#multilinetemplates need different handling to those that fit into one line
if res.group('template_end') or res.group('template'):
summary = True
noTemplate = False
elif res.group('template_start'):
noTemplate = False
elif res.group('summary'):
pass
else:
summary = False
noTemplate = False
if res.group('title'):
mb.title = res.group('title').strip()
elif res.group('subtitle'):
mb.subtitle = res.group('subtitle').strip()
elif res.group('chapter'):
mb.items.append(metabook.chapter(title=res.group('chapter').strip()))
elif res.group('article'):
mb.append_article(res.group('article'), res.group('displaytitle'))
elif res.group('oldarticle'):
mb.append_article(title=res.group('oldarticle'), displaytitle=res.group('olddisplaytitle'), revision=res.group('oldid'))
elif res.group('summary') and (noTemplate or summary):
mb.summary += res.group('summary') + " "
return mb | 5ed31d3a52d42e4496bc19f522bc1e223a0ed92d | 3,628,908 |
import logging
import shlex
import subprocess
import re
def get_track_info(srcpath, job):
"""Use HandBrake to get track info and updatte Track class\n
srcpath = Path to disc\n
job = Job instance\n
"""
charset_found = False
logging.info("Using HandBrake to get information on all the tracks on the disc. This will take a few minutes...")
cmd = '{0} -i {1} -t 0 --scan'.format(
cfg["HANDBRAKE_CLI"],
shlex.quote(srcpath)
)
logging.debug(f"Sending command: {cmd}")
try:
hb = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
shell=True
).decode('utf-8', 'ignore').splitlines()
except subprocess.CalledProcessError as hb_error:
logging.error("Couldn't find a valid track. Try running the command manually to see more specific errors.")
logging.error(f"Specific error is: {hb_error}")
else:
charset_found = True
if not charset_found:
try:
hb = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
shell=True
).decode('cp437').splitlines()
except subprocess.CalledProcessError as hb_error:
logging.error("Couldn't find a valid track. Try running the command manually to see more specific errors.")
logging.error(f"Specific error is: {hb_error}")
# If it doesnt work now we either have bad encoding or HB has ran into issues
return -1
t_pattern = re.compile(r'.*\+ title *')
pattern = re.compile(r'.*duration\:.*')
seconds = 0
t_no = 0
fps = float(0)
aspect = 0
result = None
mainfeature = False
for line in hb:
# get number of titles
if result is None:
if job.disctype == "bluray":
result = re.search('scan: BD has (.*) title\(s\)', line) # noqa: W605
else:
result = re.search('scan: DVD has (.*) title\(s\)', line) # noqa: W605
if result:
titles = result.group(1)
titles = titles.strip()
logging.debug(f"Line found is: {line}")
logging.info(f"Found {titles} titles")
job.no_of_titles = titles
db.session.commit()
if (re.search(t_pattern, line)) is not None:
if t_no == 0:
pass
else:
utils.put_track(job, t_no, seconds, aspect, fps, mainfeature, "handbrake")
mainfeature = False
t_no = line.rsplit(' ', 1)[-1]
t_no = t_no.replace(":", "")
if (re.search(pattern, line)) is not None:
t = line.split()
h, m, s = t[2].split(':')
seconds = int(h) * 3600 + int(m) * 60 + int(s)
if (re.search("Main Feature", line)) is not None:
mainfeature = True
if (re.search(" fps", line)) is not None:
fps = line.rsplit(' ', 2)[-2]
aspect = line.rsplit(' ', 3)[-3]
aspect = str(aspect).replace(",", "")
utils.put_track(job, t_no, seconds, aspect, fps, mainfeature, "handbrake") | f5fa232859cd3479acf9342a41b62a28ad0311a9 | 3,628,909 |
from typing import List
def _ensure_hms(inner_result: ParsedDate, remain_tokens: List[str]) -> ParsedDate:
"""
This function extract value of hour, minute, second
Parameters
----------
inner_result
already generated year, month, day value
remain_tokens
remained tokens used for generating hour, minute, second
"""
result = deepcopy(inner_result)
remain_str = remain_tokens[0]
hms_tokens = []
# Judge the expression of am pm
ispm = False
for token in AM:
if token in remain_str:
hms_tokens = split(remain_str, AM)
break
for token in PM:
if token in remain_str:
ispm = True
hms_tokens = split(remain_str, PM)
break
if len(hms_tokens) == 0:
hms_tokens = split(remain_str, [":"])
else:
hms_tokens = split(hms_tokens[0], [":"])
if ispm:
result = _ensure_pm(result, hms_tokens, 12)
else:
result = _ensure_pm(result, hms_tokens, 0)
return result | 5e24e2ec9a8ddd6b866aaaafce054d4e04adba8d | 3,628,910 |
from . import auth, tally
from .auth.models import User
from .tally.models import Bill, Category
def create_app(config=Config):
"""App factory."""
app = Flask(__name__)
app.config.from_object(config)
db.init_app(app)
migrate.init_app(app, db)
bcrypt.init_app(app)
login_manager.init_app(app)
app.register_blueprint(auth.bp)
app.register_blueprint(tally.bp)
@app.shell_context_processor
def make_shell_context(): # pylint:disable=unused-variable
"""Create context for "flask shell" CLI tool."""
return {"db": db, "User": User, "Category": Category, "Bill": Bill}
return app | 3a39f8ab497fa539ea6609a7e53195e441adce1f | 3,628,911 |
def helper(n, largest_digit):
"""
:param n: int, a number
:param largest_digit: int,
:return: int, the largest digit
"""
if n == 0: # base case
return largest_digit
else:
if n < 0: # convert negative n into positive if any
n = n * -1
if n % 10 > largest_digit:
largest_digit = n % 10
return helper(int(n/10), largest_digit) | 557a6ce39a31f7edd2438bea43be9d8abfec47c5 | 3,628,912 |
def peak_compare_data(request, peak_compare_list):
"""
:param request: Request for the peak data for the Peak Explorer page
:return: The cached url of the ajax data for the peak data table.
"""
analysis = Analysis.objects.get(name='Tissue Comparisons')
if peak_compare_list == "All":
peaks = Peak.objects.all()
else:
peak_compare_list = peak_compare_list.split(',')
peaks = Peak.objects.filter(id__in=list(peak_compare_list))
view_df1, _, _, _ = get_peak_compare_df(analysis, peaks)
view_df_sorted = sort_df_and_headers(view_df1, analysis)
view_df = view_df_sorted.fillna("-")
#
peak_compare_data = view_df.values.tolist()
logger.info("returning the peak comparison data")
return JsonResponse({'data': peak_compare_data}) | 2d78e803ea1643db24bf4c4695476d16ac254237 | 3,628,913 |
def _generic_filtering_element(F, Q, H, R, y):
"""
Equation 10 in "GPR in Logarithmic Time"
"""
S = H @ (Q @ H.T) + R
chol = cho_factor(S)
Kt = cho_solve(chol, H @ Q)
A = F - (Kt.T @ H) @ F
b= Kt.T @ y
C = Q - (Kt.T @ H) @ Q
HF = H @ F
eta = HF.T @ np.squeeze(cho_solve(chol, np.expand_dims(y, 1)))
J = HF.T @ cho_solve(chol, HF)
return A, b, C, J, eta | a74f198fc1afcd65f4cc668f9b8aa54b48de1132 | 3,628,914 |
def getStrFromAngles(angles):
"""
Converts all angles of a JointState() to a printable string
:param angles (sensor_msgs.msg.JointState): JointState() angles to be converted
:return (string): string of the angles
"""
d=getDictFromAngles(angles)
return str( dict(d.items())) | a1680e20dc1c58e2f17e8086089a30b3717e2083 | 3,628,915 |
from typing import Callable
import types
def jit_user_function(func: Callable, nopython: bool, nogil: bool, parallel: bool):
"""
JIT the user's function given the configurable arguments.
"""
numba = import_optional_dependency("numba")
if isinstance(func, numba.targets.registry.CPUDispatcher):
# Don't jit a user passed jitted function
numba_func = func
else:
@numba.generated_jit(nopython=nopython, nogil=nogil, parallel=parallel)
def numba_func(data, *_args):
if getattr(np, func.__name__, False) is func or isinstance(
func, types.BuiltinFunctionType
):
jf = func
else:
jf = numba.jit(func, nopython=nopython, nogil=nogil)
def impl(data, *_args):
return jf(data, *_args)
return impl
return numba_func | 41b8551e19ac103e8bd50188a16bf3ca7abfa27b | 3,628,916 |
def aggregate_on_dns(ip_values, ip_fqdns, is_numeric=True):
"""
Aggregates the values in ip_values based on domains accessed from
ip_fqdns. Values from same ip_addresses same domain names are combines
Args:
ip_values (dictionary): maps ip address to some computed value
host has already been removed from ip_values
ip_fqdns (dictionary): maps ip address to fqdns
is_numeric (boolean): are the values numeric? If so, add the values,
else create a list of values.
Returns:
a dictionary mapping tld domains to the values in ip_values,
aggregated as sums or as a list.
"""
# Coalesce fqdn packet counts using ip values dict
fqdn_domain_values = {}
fqdn_domain_values[UNKNOWN] = 0 if is_numeric else []
fqdn_domain_aliases = {}
fqdn_domain_aliases[UNKNOWN] = {UNKNOWN}
for ip, value in ip_values.items():
fqdns = ip_fqdns.get(ip, None)
if fqdns:
domain_set = set()
for fqdn in fqdns:
res = get_tld(fqdn, as_object=True, fail_silently=True,
fix_protocol=True)
domain_set.add(str(res))
domain_set = list(domain_set)
# Add domains to domain counts, only adding first entry if multiple
domain = domain_set[0]
if domain in fqdn_domain_values:
if is_numeric:
fqdn_domain_values[domain] += value
else:
fqdn_domain_values[domain].append(value)
else:
if is_numeric:
fqdn_domain_values[domain] = value
else:
fqdn_domain_values[domain] = [value]
# Add aliases for domains
for domain1 in domain_set:
if domain1 not in fqdn_domain_aliases:
fqdn_domain_aliases[domain1] = {domain1}
for domain2 in domain_set:
if domain2 not in fqdn_domain_aliases[domain1]:
if (domain2 in fqdn_domain_aliases):
for domain in fqdn_domain_aliases[domain2]:
if domain != domain1:
if (domain in fqdn_domain_aliases):
fqdn_domain_aliases[domain] = fqdn_domain_aliases[domain].union(fqdn_domain_aliases[domain1])
fqdn_domain_aliases[domain1] = fqdn_domain_aliases[domain].union(fqdn_domain_aliases[domain1])
fqdn_domain_aliases[domain1].add(domain2)
else:
if is_numeric:
fqdn_domain_values[UNKNOWN] += value
else:
fqdn_domain_values[UNKNOWN].append(value)
fqdn_alias_count = {}
for domain in fqdn_domain_values:
alias_list = list(fqdn_domain_aliases[domain])
alias_list.sort()
alias_name = ", ".join(alias_list)
if alias_name in fqdn_alias_count:
fqdn_alias_count[alias_name] += fqdn_domain_values[domain]
else:
fqdn_alias_count[alias_name] = fqdn_domain_values[domain]
return fqdn_alias_count | 036edc22544f9e566eb0d76280f59cf68d2bfb33 | 3,628,917 |
def get_users():
"""
Fetch a dictionary of username and their IDs from Slack
"""
slack_api_client = connect()
api_call = slack_api_client.api_call('users.list')
if api_call.get('ok'):
user_list = dict([(x['name'], x['id']) for x in api_call['members']])
return user_list
else:
print 'Error Fetching Users'
return None | 7a962d9360f6195ab5185dfb449a2370f5ebc0e0 | 3,628,918 |
import random
def topological_sort(graph):
# type: Dict[str, List[str]] -> Optional[List[Tuple[Union[str, int]]]]
"""Return linear ordering of the vertices of a directed graph.
https://leetcode.com/problems/course-schedule (analogous)
"""
result = []
counter = len(graph)
# Select random node
node = random.choice(list(graph.keys()))
visited = set([node])
while True:
# If node does not point to any other node, remove from graph
if len(graph[node]) == 0:
# Remove node from targets
for k, v in graph.items():
if node in v:
v.remove(node)
# Remove node from source
del graph[node]
# Append node with current counter and decrement counter
result.append((node, counter))
counter -= 1
# If all nodes accounted for, break from loop
if counter == 0:
break
# Otherwise select another random node and reset nodes visited
else:
node = random.choice(list(graph.keys()))
visited = set()
# If node does point to other nodes, check if there's a cycle
else:
targets = graph[node]
node = random.choice(targets)
# If going down target nodes ends up in a cycle, return early
if node in visited:
return None
visited.add(node)
return result | fc35bbb92cda977ce5183970844ee20207b3b896 | 3,628,919 |
def lower_bound(expressions):
"""Creates an `Expression` lower bounding the given expressions.
This function introduces a slack variable, and adds constraints forcing this
variable to lower bound all elements of the given expression list. It then
returns the slack variable.
If you're going to be lower-bounding or maximizing the result of this
function, then you can think of it as taking the `min` of its arguments. You
should *never* upper-bound or minimize the result, however, since the
consequence would be to decrease the value of the slack variable, without
affecting the contents of the expressions list.
Args:
expressions: list of `Expression`s, the quantities to lower-bound.
Returns:
An `Expression` representing an lower bound on the given expressions.
Raises:
ValueError: if the expressions list is empty.
TypeError: if the expressions list contains a non-`Expression`.
"""
if not expressions:
raise ValueError("lower_bound cannot be given an empty expression list")
if not all(isinstance(ee, expression.Expression) for ee in expressions):
raise TypeError(
"lower_bound expects a list of rate Expressions (perhaps you need to "
"call wrap_rate() to create an Expression from a Tensor?)")
# Ideally the slack variable would have the same dtype as the predictions, but
# we might not know their dtype (e.g. in eager mode), so instead we always use
# float32 with auto_cast=True.
bound = deferred_tensor.DeferredVariable(
0.0,
trainable=True,
name="tfco_lower_bound",
dtype=tf.float32,
auto_cast=True)
bound_basic_expression = basic_expression.BasicExpression(
[term.TensorTerm(bound)])
bound_expression = expression.ExplicitExpression(
penalty_expression=bound_basic_expression,
constraint_expression=bound_basic_expression)
extra_constraints = [ee >= bound_expression for ee in expressions]
# We wrap the result in a BoundedExpression so that we'll check if the user
# attempts to minimize of upper-bound the result of this function, and will
# raise an error if they do.
return expression.BoundedExpression(
lower_bound=expression.ConstrainedExpression(
expression.ExplicitExpression(
penalty_expression=bound_basic_expression,
constraint_expression=bound_basic_expression),
extra_constraints=extra_constraints),
upper_bound=expression.InvalidExpression(
"the result of a call to lower_bound() can only be maximized or "
"lower-bounded; it *cannot* be minimized or upper-bounded")) | 948089321ac043254fde3da6d4c139676b7a533c | 3,628,920 |
from typing import List
import os
from sys import version
def get_tensorboard_args(tb_version: str, tfevents_dir: str, add_args: List[str]) -> List[str]:
"""Build tensorboard startup args.
Args are added and deprecated at the mercy of tensorboard; all of the below are necessary to
support versions 1.14, 2.4, and 2.5
- Tensorboard 2+ no longer exposes all ports. Must pass in "--bind_all" to expose localhost
- Tensorboard 2.5.0 introduces an experimental feature (default load_fast=true)
which prevents multiple plugins from loading correctly.
"""
task_id = os.environ["DET_TASK_ID"]
port = os.environ["TENSORBOARD_PORT"]
tensorboard_args = [
"tensorboard",
f"--port={port}",
f"--path_prefix=/proxy/{task_id}",
*add_args,
]
# Version dependant args
if version.parse(tb_version) >= version.parse("2"):
tensorboard_args.append("--bind_all")
if version.parse(tb_version) >= version.parse("2.5"):
tensorboard_args.append("--load_fast=false")
tensorboard_args.append(f"--logdir={tfevents_dir}")
return tensorboard_args | cdb4509f6070625ac2e5ea78f077c860d06f1be9 | 3,628,921 |
import networkx
def do_to_networkx(do):
"""Return a networkx representation of do"""
terms = do.get_terms()
dox = networkx.MultiDiGraph()
dox.add_nodes_from(term for term in terms if not term.obsolete)
for term in dox:
for typedef, id_, name in term.relationships:
dox.add_edge(term, do.get_term(id_), key = typedef)
assert networkx.is_directed_acyclic_graph(dox)
return dox | 97c27a5e6ec3c0467fe42f34325aac5b565f5be3 | 3,628,922 |
from typing import List
from typing import Optional
from pathlib import Path
import os
def resolve_executable(name_variants: List[str], env_override: Optional[str] = None) -> str:
"""Resolve platform-specific path to given executable.
Args:
name_variants: List of executable names to look for.
env_override: Environment variable name to use as override if available.
Returns:
Path to executable and platform-specific command to execute.
"""
plat = Platform.get()
app_map = {
Platform.LINUX: Path("/usr/bin"),
Platform.WINDOWS: Path(r"C:\Program Files"),
Platform.DARWIN: Path("/Applications"),
}
app_ext = {
Platform.LINUX: "{name}",
Platform.WINDOWS: "{name}.exe",
Platform.DARWIN: "{name}.app/Contents/MacOS/{name}",
}
# oh, look at me. fancy walrus operator :)
if env_override and (override_path := os.environ.get(env_override)):
override_path = Path(override_path.strip())
if override_path.exists():
logger.info("using binary path override from %s: %s", env_override, override_path)
# allow use of env vars to override paths in case of resolution failure.
return override_path
def _resolve(names: List[str]) -> Path:
for name in names:
_app_root = app_map.get(plat, app_map[Platform.LINUX])
_app_ext = app_ext.get(plat, app_ext[Platform.LINUX]).format(name=name)
_app_path = _app_root / _app_ext
if _app_path.exists():
yield name, _app_path
try:
bin_path = utils.locate_program(name)
except Exception:
pass
else:
if bin_path is not None:
yield name, bin_path
try:
name, exec_path = next(_resolve(name_variants))
except StopIteration as e:
raise RuntimeError(
f"Could not locate {', '.join(name_variants)} binary! You can manually provide a path with the {env_override} env "
"variable. "
) from e
else:
logger.info("resolved executable for: %s @ %s", name, exec_path)
return exec_path | 2dd82100486521a53fb613b59346a1733918aefd | 3,628,923 |
def _prepare_data(data):
"""Takes the raw data from the database and prepares it for a sklearn workflow"""
# Get the number of turbines
n_turb = data[0]["lat"].size
# Split the data into the prediction and learning sets
data_learn = [d for d in data if not np.isnan(d["power"])]
data_new = [d for d in data if np.isnan(d["power"])]
# Get the timestamps for each data set
t_learn = [d["time"] for d in data_learn]
t_new = [d["time"] for d in data_new]
# Initialize arrays for the prepared data
X_learn = np.zeros((len(data_learn), 2 * n_turb))
X_new = np.zeros((len(data_new), 2 * n_turb))
y_learn = np.zeros(len(data_learn))
# Populate the learning data arrays
for i, d in enumerate(data_learn):
# Compute the wind velocity
vel = np.hypot(d["u"], d["v"])
# Add the turbine data
X_learn[i, :] = np.hstack((vel, d["T"]))
y_learn[i] = d["power"]
# Populate the prediction data arrays
for i, d in enumerate(data_new):
# Compute the wind velocity
vel = np.hypot(d["u"], d["v"])
# Add the turbine data
X_new[i, :] = np.hstack((vel, d["T"]))
# Return the prepared data
return t_learn, t_new, X_learn, y_learn, X_new | b1397ae90a725958242ac8257db8cccdf299a61f | 3,628,924 |
from .common import parse_gset_format as redirect_func
import warnings
def parse_gset_format(filename):
""" parse gset format """
# pylint: disable=import-outside-toplevel
warnings.warn("parse_gset_format function has been moved to "
"qiskit.optimization.ising.common, "
"the method here will be removed after Aqua 0.7+",
DeprecationWarning)
return redirect_func(filename) | 55a8259720a4680027c345868e2f47e2296532f1 | 3,628,925 |
def find_largest(line: str) -> int:
"""Return the largest value in line, which is a whitespace-delimited string
of integers that each end with a '.'.
>>> find_largest('1. 3. 2. 5. 2.')
5
"""
# The largest value seen so far.
largest = -1
for value in line.split():
# Remove the trailing period.
v = int(value[:-1])
# If we find a larger value, remember it.
if v > largest:
largest = v
return largest | 95ceb1e79812e9ef9c7338f393e9d22224eb5a03 | 3,628,926 |
def get_arch(bv):
"""Arch class that gives access to architecture-specific functionality."""
name = bv.arch.name
if name == "x86_64":
return AMD64Arch()
elif name == "x86":
return X86Arch()
elif name == "aarch64":
return AArch64Arch()
else:
raise UnhandledArchitectureType(
"Missing architecture object type for architecture '{}'".format(name)
) | 3895e5a9b8874aea5bdd5a54f46d91a89eeaef7d | 3,628,927 |
import re
from re import DEBUG
def magic_auth(request, magic_token=None):
"""
"""
if request.method == 'POST':
# Validate form
magic_auth_form = MagicAuthForm(request.POST)
if magic_auth_form.is_valid():
# Try to find the user or create a new one
try:
user = User.objects.get(email=magic_auth_form.cleaned_data['email'].lower())
except User.DoesNotExist:
email = magic_auth_form.cleaned_data['email'].lower()
m = re.match("^(.*)@aalto.fi$", email)
if m:
username = m.group(1)
user = User.objects.create_user(username,
email=email,
password=b64encode(sha256(urandom(56)).digest()))
else:
return JsonResponse({'modalMessage': 'Email not found or its not aalto email.'})
user.account.update_magic_token()
current_site = get_current_site(request)
# Send mail to user
mail = EmailMultiAlternatives(
subject="Namubufferi - Login",
body=("Hello. Authenticate to Namubufferi using this code. It's valid for 15 minutes.\n"
+ str(user.account.magic_token)),
to=[user.email]
)
try:
mail.send()
print("Mail sent")
except:
print("Mail not sent")
if DEBUG:
return JsonResponse({'modalMessage': '<br>login with ' + str(user.account.magic_token) + ' (Shown when DEBUG)'})
else:
return JsonResponse({'modalMessage': 'Check your email for the token.'})
else:
return HttpResponse('{"errors":' + magic_auth_form.errors.as_json() + '}', content_type="application/json")
else:
user = authenticate(magic_token=str(magic_token))
if user:
login(request, user)
return home(request)
else:
return redirect('/') | dae46830741ffd89a6c07b16d8e97712bdeae769 | 3,628,928 |
def _volume_1_atm(_T, ranged=True):
"""m**3 / mol"""
return 1 / _ro_one_atm(_T, ranged) | 47d8b0f21f9e893c79b477b9edb5892bc74951dd | 3,628,929 |
def isAmdDevice(device):
""" Return whether the specified device is an AMD device or not
Parameters:
device -- DRM device identifier
"""
vid = getSysfsValue(device, 'vendor')
if vid == '0x1002':
return True
return False | 4eb44cafaef5d2251a8152613f185dba680b8501 | 3,628,930 |
def pre_sif_mean_inner(mat, freqs, a, dtype=None):
"""
From *A Simple but Tough-to-Beat Baseline for Sentence Embeddings*
https://openreview.net/forum?id=SyK00v5xx
https://github.com/PrincetonML/SIF
"""
# 1. Normalize
mat = normalize(mat)
# 2. Reweight
rows, cols = mat.shape
for coli, freq in enumerate(freqs):
mat[coli, :] = (a / (a + freq)) * mat[coli, :]
return mat.mean(axis=0, dtype=dtype) | 4ba6f4b30ff4dd763b9cfe975daaaeae842ff866 | 3,628,931 |
import json
def v1() -> Response:
"""
Handle Endpoint: /a2j/v1/
:return: HTTP Response.
:rtype: Response
"""
return Response(json.dumps({
"endpoints": ["parse", "clean"]
}), mimetype="application/json") | 1aee5b7011841951bed21c669b5758bf5d4ef9ed | 3,628,932 |
def apply_phase(signal, phase, frequency, fs):
"""Apply phase fluctuations.
:param signal: Pressure signal.
:param phase: Phase fluctuations.
:param frequency: Frequency of tone.
:param fs: Sample frequency.
Phase fluctuations are applied through a resampling.
"""
delay = delay_fluctuations(phase, frequency)
signal = apply_delay(signal, delay, fs)
return signal | 19256fc1bf932b1c064a09ab6feec1367b9b8467 | 3,628,933 |
def __load_aug_img__(path, img_size, img_aug):
"""
"""
img = image.img_to_array(image.load_img(path, target_size=img_size))
if img_aug is not None:
img = img_aug.random_transform(img)
return img | b2be41b76a6a6667ed3ee3f99189975083b931d5 | 3,628,934 |
def _shot_id_to_int(shot_id):
"""
Returns: shot id to integer
"""
tokens = shot_id.split(".")
return int(tokens[0]) | 59d0ecabf874841d616a72ebea1ebac6e6dc3947 | 3,628,935 |
import ctypes
def get_native_pointer_type(pointer_size: int):
"""
:return: A type that can represent a pointer.
"""
return {
ctypes.sizeof(ctypes.c_uint32): ctypes.c_uint32,
ctypes.sizeof(ctypes.c_uint64): ctypes.c_uint64,
}[pointer_size] | 2364bde2f7bfb7ce2b743d8958551156c847f847 | 3,628,936 |
def get_bootinfo():
"""build and return boot info"""
vmraid.set_user_lang(vmraid.session.user)
bootinfo = vmraid._dict()
hooks = vmraid.get_hooks()
doclist = []
# user
get_user(bootinfo)
# system info
bootinfo.sitename = vmraid.local.site
bootinfo.sysdefaults = vmraid.defaults.get_defaults()
bootinfo.server_date = vmraid.utils.nowdate()
if vmraid.session['user'] != 'Guest':
bootinfo.user_info = get_user_info()
bootinfo.sid = vmraid.session['sid']
bootinfo.modules = {}
bootinfo.module_list = []
load_desktop_data(bootinfo)
bootinfo.letter_heads = get_letter_heads()
bootinfo.active_domains = vmraid.get_active_domains()
bootinfo.all_domains = [d.get("name") for d in vmraid.get_all("Domain")]
add_layouts(bootinfo)
bootinfo.module_app = vmraid.local.module_app
bootinfo.single_types = [d.name for d in vmraid.get_all('DocType', {'issingle': 1})]
bootinfo.nested_set_doctypes = [d.parent for d in vmraid.get_all('DocField', {'fieldname': 'lft'}, ['parent'])]
add_home_page(bootinfo, doclist)
bootinfo.page_info = get_allowed_pages()
load_translations(bootinfo)
add_timezone_info(bootinfo)
load_conf_settings(bootinfo)
load_print(bootinfo, doclist)
doclist.extend(get_meta_bundle("Page"))
bootinfo.home_folder = vmraid.db.get_value("File", {"is_home_folder": 1})
bootinfo.navbar_settings = get_navbar_settings()
bootinfo.notification_settings = get_notification_settings()
# ipinfo
if vmraid.session.data.get('ipinfo'):
bootinfo.ipinfo = vmraid.session['data']['ipinfo']
# add docs
bootinfo.docs = doclist
for method in hooks.boot_session or []:
vmraid.get_attr(method)(bootinfo)
if bootinfo.lang:
bootinfo.lang = text_type(bootinfo.lang)
bootinfo.versions = {k: v['version'] for k, v in get_versions().items()}
bootinfo.error_report_email = vmraid.conf.error_report_email
bootinfo.calendars = sorted(vmraid.get_hooks("calendars"))
bootinfo.treeviews = vmraid.get_hooks("treeviews") or []
bootinfo.lang_dict = get_lang_dict()
bootinfo.success_action = get_success_action()
bootinfo.update(get_email_accounts(user=vmraid.session.user))
bootinfo.energy_points_enabled = is_energy_point_enabled()
bootinfo.website_tracking_enabled = is_tracking_enabled()
bootinfo.points = get_energy_points(vmraid.session.user)
bootinfo.frequently_visited_links = frequently_visited_links()
bootinfo.link_preview_doctypes = get_link_preview_doctypes()
bootinfo.additional_filters_config = get_additional_filters_from_hooks()
bootinfo.desk_settings = get_desk_settings()
bootinfo.app_logo_url = get_app_logo()
return bootinfo | 1eb4060aa184f351b42a3b8fcad4e38e91efab2e | 3,628,937 |
def monkey_all_handler():
"""
@api {get} /v1/monkey/all 查询 Monkey 测试列表
@apiName GetMonkeyAll
@apiGroup 自动化测试
@apiDescription 查询 所有的 monkey 测试信息
@apiParam {int} [page_size] 分页-单页数目
@apiParam {int} [page_index] 分页-页数
@apiParam {int} [user_id] 用户 ID,获取当前用户 ID 的 monkey 测试信息
@apiParam {int} [id] Monkey ID,根据 ID 获取 monkey 信息
@apiParam {int} [test_type] 测试类型 1:monkey ,2:performance
@apiParamExample {json} Request-Example:
{
"page_index": 1,
"page_size": 10,
"user_id": 1,
"test_type": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"actual_run_time": "",
"app_default_activity": "com.mengtuiapp.mall.SplashActivity",
"app_id": 86,
"app_install_required": 1,
"app_name": "萌推",
"app_oss_url": "http://tcloud-static.ywopt.com/static/00c43b89-f68d-4348-940d-f4dc36979f47.apk",
"app_package_name": "com.mengtuiapp.mall",
"app_picture": "iVBORw0KGgoAAAANSUhEUgAAACwAAAAsCAMAAAApWqozAAABCFBMVEX/AEj/6YT/9fj/7PH/9/n/8/b/6O",
"app_size": "14.43",
"app_version": "2.4.7",
"begin_time": "2019-07-24 11:40:42",
"cancel_status": 1,
"creation_time": "2019-07-24 11:40:42",
"download_app_status": 1,
"end_time": "Wed, 24 Jul 2019 13:44:15 GMT",
"id": 111,
"jenkins_url": "http://ci.automancloud.com/job/monkey_autotest/325/",
"login_password": "",
"login_required": 0,
"login_username": "",
"mobile_ids": "Y2J5T17410004213",
"monkey_device_status": [
{
"activity_all": "[]",
"activity_count": 0,
"activity_tested": "[]",
"activity_tested_count": 0,
"anr_count": 3,
"begin_time": "2019-07-24 11:40:54",
"cancel_status": 1,
"crash_count": 0,
"current_stage": "通过",
"device_connect_status": 1,
"end_time": "2019-07-24 13:41:51",
"exception_count": 0,
"exception_run_time": 0,
"id": 178,
"login_app_status": 1,
"mobile_id": 43,
"mobile_model": "HUAWEI VKY-AL00",
"mobile_resolution": "2560 x 1440",
"mobile_serial": "Y2J5T17410004213",
"mobile_use_times": 16,
"mobile_version": "7.0",
"monkey_id": 111,
"process": 100,
"run_time": 120,
"running_error_reason": "",
"running_status": 1,
"screen_lock_status": 1,
"setup_install_app_status": 1,
"setup_uninstall_app_status": 1,
"start_app_status": 1,
"teardown_uninstall_app_status": 1
}
],
"package_name": "com.mengtuiapp.mall",
"parameters": "{'system_device': 0, 'app': {'user_id': 93, 'app_id': 86}, 'login': {
'required': 0, 'username': '', 'password': ''}}",
"process": 100,
"report_url": "",
"run_time": 0,
"status": 0,
"system_device": 0,
"type_id": 1,
"user_id": 93,
"user_nickname": "孟伟"
}
],
"message": "ok",
"page_index": 1,
"page_size": 10,
"total": 66
}
"""
page_size, page_index = parse_list_args2()
page_size = page_size or 10
page_index = page_index or 1
user_id = request.args.get('user_id')
id = request.args.get('id')
test_type = request.args.get('test_type', 1)
monkeys, count = MonkeyBusiness.get_all_monkeys(id, user_id, page_size, page_index, test_type=test_type)
return {
"code": 0,
"data": monkeys,
"page_size": page_size,
"page_index": page_index,
"total": count
} | 212e41f66a363e7a325df8c6459cd7d8a418b78f | 3,628,938 |
from datetime import datetime
def get_all_schedules(db_cur, server_id, is_async):
"""Extract all candidate schedules for a server
+--------- minute (0 - 59)
| +--------- hour (0 - 23)
| | +--------- day of the month (1 - 31)
| | | +--------- month (1 - 12)
| | | | +--------- day of the week (0 - 7) (Sunday to Saturday; 7 is also Sunday)
* * * * *
"""
sqlquery = f"""
SELECT
schedule_id,
interval_mask
FROM
( /* bar */
SELECT
schedule_id,
interval_mask,
exec_command,
parameters,
adhoc_execute,
is_async,
is_running
FROM
( /* foo */
(SELECT
schedule_id,
interval_mask,
exec_command,
COALESCE(adhoc_parameters, parameters, '') AS parameters,
adhoc_execute,
is_async,
is_running,
schedule_order
FROM schedules
INNER JOIN servers USING (server_id)
WHERE adhoc_execute = 0
AND server_id = {str(server_id)}
AND schedules.is_enabled = 1
AND servers.is_enabled = 1
AND now() >= first_run_date
AND now() <= last_run_date
)
UNION
(SELECT
schedule_id,
'* * * * *' AS interval_mask,
exec_command,
COALESCE(adhoc_parameters, parameters, '') AS parameters,
adhoc_execute,
is_async,
is_running,
schedule_order
FROM schedules
INNER JOIN servers USING (server_id)
WHERE adhoc_execute = 1
AND server_id = {str(server_id)}
AND servers.is_enabled = 1
)
) foo
ORDER BY schedule_order, schedule_id
) bar
WHERE is_running = 0
AND is_async = {str(is_async)}
"""
db_cur.execute(sqlquery)
cur_schedules = db_cur
obj_schedules = []
now_minute = datetime.datetime.strptime(
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:00"), "%Y-%m-%d %H:%M:%S"
)
for row in cur_schedules.fetchall():
schedule_id = str(row[0])
interval_mask = str(row[1])
# Skip entries with a bad interval_mask
if not croniter.is_valid(interval_mask):
utils.send_slack_message(
f":warning: *WARNING* invalid interval_mask on schedule_id *'{schedule_id}'*",
f"```schedule_id : {schedule_id}\ninterval_mask : {interval_mask}```",
"warning",
)
else:
iteration = croniter(
interval_mask, now_minute - datetime.timedelta(minutes=1)
)
next_iter = iteration.get_next(datetime.datetime)
if now_minute == next_iter:
obj_schedules.append(schedule_id)
return obj_schedules | 781697a3d98a6d9bdcad50b7bbb10d42f0385149 | 3,628,939 |
def predicted_win(board: Board, draws: list[int]) -> Prediction:
"""
Goes through the drawn numbers and returns a Prediction, which is a tuple of
two numbers:
- the turn on which the board wins
- the score of the board at that moment
"""
lines = board_lines(board)
for i, draw in enumerate(draws):
for line in lines:
if draw in line:
line.remove(draw)
if not all(lines):
score = get_score(board, draws[: i + 1])
return (i, score)
raise ValueError | 5f45de9ef755eea432e9a80dd6a9109c6673b183 | 3,628,940 |
from typing import Union
from typing import Sequence
from typing import Optional
from typing import Iterable
from typing import Tuple
from typing import List
def quantity_data_frame(bundle: Union[InstanceBundle, Sequence[InstanceBundle]],
quantity_name: str,
us: Optional[Iterable[float]] = None,
steps: Optional[Iterable[int]] = None,
simulated: bool = False
) -> Tuple[pd.DataFrame, type, List[InstanceBundle]]:
"""Extracts given quantity from InstanceBundle(s) as Panda's data frame.
Args:
bundle: Bundle, or iterable of bundles to plot the data from.
quantity_name: Name of quantity to plot, the list of supported names
can be retrieved by listing keys of quantities property of
InstanceBundle class.
us: List of interaction strengths U to plot. If not provided, all
bundles will be plotted.
steps: List of Trotter steps to plot. If not provided, the steps will
be extracted from bundles provided that all of them have the same
range.
simulated: When true, extracts the exact numerical simulation that
matches the instance problem parameters instead of the bundle
experimental data.
Returns:
Tuple of:
- Panda's data frame with accumulated data. The columns of returned
data frame depends whether the quantity is of AggregatedQuantity or
PerSiteQuantity.
- Type of selected quantity, either AggregatedQuantity or
PerSiteQuantity.
- List of InstanceBundles used, when us list selects a subset of
provided bundles.
"""
def create_aggregated_data(data: List[Tuple[float,
int,
int,
float,
float,
Optional[float],
Optional[float]]]
) -> pd.DataFrame:
return pd.DataFrame(data, columns=[
'u', 'chain', 'step', 'time', 'value', 'std_error', 'std_dev'])
def generate_aggregated_entries(dt: float,
u: float,
quantity: AggregatedQuantity,
step: int,
data_step: int
) -> Tuple[float,
int,
int,
float,
float,
Optional[float],
Optional[float]]:
for chain in range(quantity.chains_count):
if quantity.std_error is not None:
std_error = float(quantity.std_error[chain][step])
else:
std_error = None
if quantity.std_dev is not None:
std_dev = float(quantity.std_dev[chain][step])
else:
std_dev = None
yield (u,
chain,
data_step,
data_step * dt,
float(quantity.average[chain][step]),
std_error,
std_dev)
def create_per_site_data(data: List[Tuple[float,
int,
int,
int,
float,
float,
Optional[float],
Optional[float]]]
) -> pd.DataFrame:
return pd.DataFrame(data, columns=[
'u', 'chain', 'step', 'time', 'site', 'value', 'std_error',
'std_dev'])
def generate_per_site_entries(dt: float,
u: float,
quantity: AggregatedQuantity,
step: int,
data_step: int
) -> Tuple[float,
int,
int,
int,
float,
float,
Optional[float],
Optional[float]]:
for chain in range(quantity.chains_count):
for site in range(len(quantity.average[chain][step])):
if quantity.std_error is not None:
std_error = quantity.std_error[chain][step][site]
else:
std_error = pd.NA
if quantity.std_dev is not None:
std_dev = quantity.std_dev[chain][step][site]
else:
std_dev = pd.NA
yield (u,
chain,
data_step,
data_step * dt,
site + 1,
quantity.average[chain][step][site],
std_error,
std_dev)
if isinstance(bundle, InstanceBundle):
bundles = bundle,
else:
bundles = bundle
if not len(bundles):
raise ValueError('At least one InstanceBundle is mandatory')
if not steps:
steps = bundles[0].steps
for bundle in bundles[1:]:
if steps != bundle.steps:
raise ValueError(
'Unequal Trotter steps between experiment bundles')
else:
steps = list(steps)
if not us:
us = [bundle.u for bundle in bundles]
else:
us = list(us)
steps_map = {step: index for index, step in enumerate(steps)}
us_map = {u: index for index, u in enumerate(us)}
us_bundles = []
data = []
quantity_type = None
missing_u = set(us)
for bundle in bundles:
if bundle.u not in us_map:
continue
if simulated:
bundle = bundle.exact_numerics_bundle()
us_bundles.append(bundle)
missing_u.remove(bundle.u)
quantity = bundle.quantities[quantity_name]()
if quantity_type is None:
if isinstance(quantity, AggregatedQuantity):
quantity_type = AggregatedQuantity
elif isinstance(quantity, PerSiteQuantity):
quantity_type = PerSiteQuantity
else:
raise ValueError(f'Unknown quantity type {quantity_type}')
else:
assert isinstance(quantity, quantity_type), (
f'Inconsistent quantity types {type(quantity)} and '
f'{quantity_type}')
missing_steps = set(steps)
for s, step in enumerate(bundle.steps):
if step not in steps_map:
continue
missing_steps.remove(step)
step_index = steps_map[step]
if quantity_type == AggregatedQuantity:
data += generate_aggregated_entries(
bundle.dt, bundle.u, quantity, s, step_index)
elif quantity_type == PerSiteQuantity:
data += generate_per_site_entries(
bundle.dt, bundle.u, quantity, s, step_index)
else:
raise RuntimeError(f'Unexpected quantity type {quantity_type}')
if missing_steps:
raise ValueError(
f'No experiments to cover Trotter steps {missing_steps}')
if missing_u:
raise ValueError(f'No experiments to cover U values {missing_u}')
if quantity_type == AggregatedQuantity:
pandas = create_aggregated_data(data)
elif quantity_type == PerSiteQuantity:
pandas = create_per_site_data(data)
else:
raise RuntimeError(f'Unexpected quantity type {quantity_type}')
return pandas, quantity_type, us_bundles | 1b9787d25ce20bebd73dc9a08743153266e08d06 | 3,628,941 |
from typing import Union
from typing import List
def trim_name(names: Union[List[str], str]) -> Union[List[str], str]:
"""Trims the name from the web API, specifically from IFTTT (removes extra "the ")"""
# Single name
if isinstance(names, str):
trimmed_name = names.lower().replace("the", "").strip()
return trimmed_name
elif isinstance(names, list):
trimmed_names: List[str] = []
for name in names:
trimmed_names.append(str(trim_name(name)))
return trimmed_names
return "" | ee0bf0dcb9353fcad1b4af5f50bf2ff208c1dbe2 | 3,628,942 |
def concat(*streams, **kwargs):
"""Concatenate audio and video streams, joining them together one after the other.
The filter works on segments of synchronized video and audio streams. All segments
must have the same number of streams of each type, and that will also be the number
of streams at output.
Args:
unsafe: Activate unsafe mode: do not fail if segments have a different format.
Related streams do not always have exactly the same duration, for various reasons
including codec frame size or sloppy authoring. For that reason, related
synchronized streams (e.g. a video and its audio track) should be concatenated at
once. The concat filter will use the duration of the longest stream in each segment
(except the last one), and if necessary pad shorter audio streams with silence.
For this filter to work correctly, all segments must start at timestamp 0.
All corresponding streams must have the same parameters in all segments; the
filtering system will automatically select a common pixel format for video streams,
and a common sample format, sample rate and channel layout for audio streams, but
other settings, such as resolution, must be converted explicitly by the user.
Different frame rates are acceptable but will result in variable frame rate at
output; be sure to configure the output file to handle it.
Official documentation: `concat <https://ffmpeg.org/ffmpeg-filters.html#concat>`__
"""
video_stream_count = kwargs.get('v', 1)
audio_stream_count = kwargs.get('a', 0)
stream_count = video_stream_count + audio_stream_count
if len(streams) % stream_count != 0:
raise ValueError(
'Expected concat input streams to have length multiple of {} (v={}, a={}); got {}'.format(
stream_count, video_stream_count, audio_stream_count, len(streams)
)
)
kwargs['n'] = int(len(streams) / stream_count)
return FilterNode(streams, concat.__name__, kwargs=kwargs, max_inputs=None).stream() | a6fe1278191f85c0496ae133c4f36ccf814d6ed6 | 3,628,943 |
def proj(ax,s,ds,
axis='z',title='',vmin=1e0,vmax=1e3,dat=xr.Dataset(),**kwargs):
"""Draw projection plot at given snapshot number
Args:
ax: axes to draw.
s: LoadSim object.
ds: AthenaDataSet object.
axis: axis to project (default:'z').
title: axes title (default:'').
vmin: minimum imshow color level (default:1e0).
vmax: maximum imshow color level (default:1e3).
Returns:
img: the image that goes into plt.colorbar
Example:
surf = proj(ax, s, ds, dat=dat, add_colorbar=False)
plt.colorbar(surf, cax=cax, label=r'$\Sigma\,[M_\odot\,{\rm pc}^{-2}]$')
"""
# set plot attributes
x, y, xlabel, ylabel, xlim, ylim = _set_xy_axis(s,axis)
# load data
if not 'density' in dat:
dat = dat.merge(ds.get_field(['density']))
dx = dat[axis][1]-dat[axis][0]
surf = (dat.density*u.Msun*dx).sum(dim=axis)
# draw
if 'add_colorbar' in kwargs:
if (kwargs['add_colorbar'] == False):
img = surf.plot.imshow(ax=ax,norm=LogNorm(vmin,vmax), cmap='pink_r',
**kwargs)
else:
img = surf.plot.imshow(
ax=ax,norm=LogNorm(vmin,vmax), cmap='pink_r',
cbar_kwargs={'label':r'$\Sigma\,[M_\odot\,{\rm pc}^{-2}]$'},
**kwargs)
else:
img = surf.plot.imshow(
ax=ax,norm=LogNorm(vmin,vmax), cmap='pink_r',
cbar_kwargs={'label':r'$\Sigma\,[M_\odot\,{\rm pc}^{-2}]$'},
**kwargs)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_aspect('equal')
return img | 0b329c8d0173944d4f037795d3b1c5a008c83c44 | 3,628,944 |
from typing import Dict
import aiohttp
from datetime import datetime
import logging
import json
import asyncio
async def fetch_network_node_health(
network_name: str,
time_s: int,
interval_s: int,
node_stats: Dict,
session: aiohttp.ClientSession,
) -> None:
"""Fetch health metric for all nodes from network test service."""
try:
url = f"{environ.get('NETWORK_TEST_URL', 'http://network_test:8080')}/execution"
start_dt_iso = datetime.fromtimestamp(time_s - interval_s).isoformat()
end_dt_iso = datetime.fromtimestamp(time_s).isoformat()
params = {
"network_name": network_name,
"test_type": "sequential_node",
"partial": "false",
"status": "finished",
"start_dt": start_dt_iso,
}
async with session.get(url, params=params) as resp:
if resp.status != 200:
logging.error(f"Request to {url} failed: {resp.reason} ({resp.status})")
return None
executions = json.loads(await resp.read())
if not executions or not executions["executions"]:
logging.error(
f"Network test - No network test execution data for {network_name} "
+ f"between {start_dt_iso} and {end_dt_iso}."
)
return None
latest_execution_id = max(row["id"] for row in executions["executions"])
url = f"{url}/{latest_execution_id}"
async with session.get(url) as resp:
if resp.status != 200:
logging.error(f"Request to {url} failed: {resp.reason} ({resp.status})")
return None
results = json.loads(await resp.read())
for result in results["results"]:
if result["health"] is None or result["health"] == "MISSING":
logging.warning(
f'Network test - Node health of {result["asset_name"]} '
f'for {network_name} is {result["health"]}.'
)
continue
node_name = result["asset_name"]
node_stats[network_name][node_name]["node_health"] = Health[
result["health"]
].value
except (aiohttp.ClientError, asyncio.TimeoutError) as err:
logging.error(f"Request to {url} for {network_name} failed: {err}") | 4fe082f36bf6c403357268a54a01310cc249ad53 | 3,628,945 |
def get_ingress_address(endpoint_name):
"""Returns an ingress-address belonging to the named endpoint, if
available. Falls back to private-address if necessary."""
return get_ingress_addresses(endpoint_name)[0] | 1b584e20a8b281c2739df1dd24a3c5cc9409a1e0 | 3,628,946 |
import torch
def evaluate_sfp(logits_cls, labels, flag):
""" evaluate same family prediction """
result = {}
result["n"] = len(logits_cls)
result["avg_loss"] = F.binary_cross_entropy_with_logits(logits_cls, labels.float())
if flag["acc"]:
samefamily_hat = logits_cls > 0.5
result["correct"] = torch.sum((samefamily_hat == labels)).item()
return result | b244f57d3d6c74b468d3608d6d2972dab4ee408f | 3,628,947 |
def round(x):
"""Round tensor to nearest integer. Rounds half to even. """
return np.round(x) | c022ac98db8345ec1fe544772746d501c837cb7a | 3,628,948 |
from typing import Any
def regnet_y_8gf(
pretrained: bool = False, progress: bool = True, **kwargs: Any
) -> RegNet:
"""
Constructs a RegNetY-8GF architecture from
`"Designing Network Design Spaces" <https://arxiv.org/abs/2003.13678>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> regnet_y_8gf = flowvision.models.regnet_y_8gf(pretrained=False, progress=True)
"""
params = BlockParams.from_init_params(
depth=17, w_0=192, w_a=76.82, w_m=2.19, group_width=56, se_ratio=0.25, **kwargs
)
return _regnet("regnet_y_8gf", params, pretrained, progress, **kwargs) | 83c84bb2706d33c54cce4a9ce0c23eed3fc0f0e6 | 3,628,949 |
def fit_single_univariate_sample(samples):
""" Fit a univariate gaussian model based on the samples given """
gaussian = UnivariateGaussian()
return gaussian.fit(samples) | ab18a34972146d4cd506aaf59014cee0b3d8c4ae | 3,628,950 |
def get_durations_from_alignment(batch_alignments, mels, phonemes, weighted=False, binary=False, fill_gaps=False,
fix_jumps=False, fill_mode='max'):
"""
:param batch_alignments: attention weights from autoregressive model.
:param mels: mel spectrograms.
:param phonemes: phoneme sequence.
:param weighted: if True use weighted average of durations of heads, best head if False.
:param binary: if True take maximum attention peak, sum if False.
:param fill_gaps: if True fills zeros durations with ones.
:param fix_jumps: if True, tries to scan alingments for attention jumps and interpolate.
:param fill_mode: used only if fill_gaps is True. Is either 'max' or 'next'. Defines where to take the duration
needed to fill the gap. Next takes it from the next non-zeros duration value, max from the sequence maximum.
:return:
"""
assert (binary is True) or (fix_jumps is False), 'Cannot fix jumps in non-binary attention.'
mel_pad_mask = create_mel_padding_mask(mels)
phon_pad_mask = create_encoder_padding_mask(phonemes)
durations = []
# remove start end token or vector
unpad_mels = []
unpad_phonemes = []
final_alignment = []
for i, al in enumerate(batch_alignments):
mel_len = int(mel_pad_mask[i].shape[-1] - np.sum(mel_pad_mask[i]))
phon_len = int(phon_pad_mask[i].shape[-1] - np.sum(phon_pad_mask[i]))
unpad_alignments = al[:, 1:mel_len - 1, 1:phon_len - 1] # first dim is heads
unpad_mels.append(mels[i, 1:mel_len - 1, :])
unpad_phonemes.append(phonemes[i, 1:phon_len - 1])
alignments_weights = weight_mask(unpad_alignments[0])
heads_scores = []
scored_attention = []
for _, attention_weights in enumerate(unpad_alignments):
score = np.sum(alignments_weights * attention_weights)
scored_attention.append(attention_weights / score)
heads_scores.append(score)
if weighted:
ref_attention_weights = np.sum(scored_attention, axis=0)
else:
best_head = np.argmin(heads_scores)
ref_attention_weights = unpad_alignments[best_head]
if binary: # pick max attention for each mel time-step
binary_attn, binary_score = binary_attention(ref_attention_weights)
if fix_jumps:
binary_attn = fix_attention_jumps(
binary_attn=binary_attn,
alignments_weights=alignments_weights,
binary_score=binary_score)
integer_durations = binary_attn.sum(axis=0)
else: # takes actual attention values and normalizes to mel_len
attention_durations = np.sum(ref_attention_weights, axis=0)
normalized_durations = attention_durations * ((mel_len - 2) / np.sum(attention_durations))
integer_durations = np.round(normalized_durations)
tot_duration = np.sum(integer_durations)
duration_diff = tot_duration - (mel_len - 2)
while duration_diff != 0:
rounding_diff = integer_durations - normalized_durations
if duration_diff > 0: # duration is too long -> reduce highest (positive) rounding difference
max_error_idx = np.argmax(rounding_diff)
integer_durations[max_error_idx] -= 1
elif duration_diff < 0: # duration is too short -> increase lowest (negative) rounding difference
min_error_idx = np.argmin(rounding_diff)
integer_durations[min_error_idx] += 1
tot_duration = np.sum(integer_durations)
duration_diff = tot_duration - (mel_len - 2)
if fill_gaps: # fill zeros durations
integer_durations = fill_zeros(integer_durations, take_from=fill_mode)
assert np.sum(integer_durations) == mel_len - 2, f'{np.sum(integer_durations)} vs {mel_len - 2}'
new_alignment = duration_to_alignment_matrix(integer_durations.astype(int))
best_head = np.argmin(heads_scores)
best_attention = unpad_alignments[best_head]
final_alignment.append(best_attention.T + new_alignment)
durations.append(integer_durations)
return durations, unpad_mels, unpad_phonemes, final_alignment | 192155b6968a276c308290c250f1487a4be5c1e7 | 3,628,951 |
import functools
def argparser_course_required_wrapper(with_argparser):
"""
When applied to a do_x function in the Clanvas class that takes in argparser opts,
will convert/replace course attribute with a corresponding course object either
using the course string as a query or the current (cc'd) course if not supplied.
:param with_argparser: the cmd2.with_argparser method to be wrapped.
:return:
"""
@functools.wraps(with_argparser)
def inject_argparser(self, opts, *args, **kwargs):
course = course_query_or_cc(self, opts.course)
if course is None:
get_outputter().poutput('Please specify a course to use this command.')
get_outputter().poutput_verbose('Use the cc command or the -c option.')
return False
else:
delattr(opts, 'course')
return with_argparser(self, course, opts, *args, **kwargs)
return inject_argparser | b1862167e95480eda5fe746dcd739f31292aebde | 3,628,952 |
from pathlib import Path
from typing import Optional
def read_csv_and_filter_prediction_target(csv: Path, prediction_target: str,
crossval_split_index: Optional[int] = None,
data_split: Optional[ModelExecutionMode] = None,
epoch: Optional[int] = None) -> pd.DataFrame:
"""
Given one of the CSV files written during inference time, read it and select only those rows which belong to the
given prediction_target. Also check that the final subject IDs are unique.
:param csv: Path to the metrics CSV file. Must contain at least the following columns (defined in the LoggingColumns
enum): LoggingColumns.Patient, LoggingColumns.Hue.
:param prediction_target: Target ("hue") by which to filter.
:param crossval_split_index: If specified, filter rows only for the respective run (requires
LoggingColumns.CrossValidationSplitIndex).
:param data_split: If specified, filter rows by Train/Val/Test (requires LoggingColumns.DataSplit).
:param epoch: If specified, filter rows for given epoch (default: last epoch only; requires LoggingColumns.Epoch).
:return: Filtered dataframe.
"""
def check_column_present(dataframe: pd.DataFrame, column: LoggingColumns) -> None:
if column.value not in dataframe:
raise ValueError(f"Missing {column.value} column.")
df = pd.read_csv(csv)
df = df[df[LoggingColumns.Hue.value] == prediction_target] # Filter by prediction target
df = df[~df[LoggingColumns.Label.value].isna()] # Filter missing labels
# Filter by crossval split index
if crossval_split_index is not None:
check_column_present(df, LoggingColumns.CrossValidationSplitIndex)
df = df[df[LoggingColumns.CrossValidationSplitIndex.value] == crossval_split_index]
# Filter by Train/Val/Test
if data_split is not None:
check_column_present(df, LoggingColumns.DataSplit)
df = df[df[LoggingColumns.DataSplit.value] == data_split.value]
# Filter by epoch
if LoggingColumns.Epoch.value in df:
# In a FULL_METRICS_DATAFRAME_FILE, the epoch column will be BEST_EPOCH_FOLDER_NAME (string) for the Test split.
# Here we cast the whole column to integer, mapping BEST_EPOCH_FOLDER_NAME to -1.
epochs = df[LoggingColumns.Epoch.value].apply(lambda e: -1 if e == BEST_EPOCH_FOLDER_NAME else int(e))
if epoch is None:
epoch = epochs.max() # Take last epoch if unspecified
df = df[epochs == epoch]
elif epoch is not None:
raise ValueError(f"Specified epoch {epoch} but missing {LoggingColumns.Epoch.value} column.")
if not df[LoggingColumns.Patient.value].is_unique:
raise ValueError(f"Subject IDs should be unique, but found duplicate entries "
f"in column {LoggingColumns.Patient.value} in the csv file.")
return df | b2034440fc8c198001054edb69edcb9067cae861 | 3,628,953 |
def compare(Target, Population):
"""
This function takes in two picture objects and compares them.
:param Target: target image
:type Target: Picture object
:param Population: The population of the current generations
:type Population: A list of picture objects
:return: Two best members of the population
:rtype: A list of picture objects
"""
target_rep = Target.get_rep()
target_len = len(target_rep)
best_pop = []
correctness_list = []
for member in Population:
correct = 0
member_rep = member.get_rep()
if target_len != len(member_rep):
print("Error: Images must be of the same size")
else:
for x in range(0, len(target_rep)):
if target_rep[x] == member_rep[x]:
correct += 1
correctness_list.append(correct)
best_idx = correctness_list.index(max(correctness_list))
best_pop.append(Population[best_idx])
del correctness_list[best_idx]
second_best_idx = correctness_list.index(max(correctness_list))
best_pop.append(Population[second_best_idx])
return best_pop | 4456141d1c980c5ca008d614ced05e1ec2efc062 | 3,628,954 |
def normalize_AE_state(states, noSOC=True): # return normalized states for AE (no pred, soc)
"""
:param states: (9, seq, 27)
:return: (9, seq, 19)
"""
state_list = np.split(states, SPLIT_IDX, -1)
result_list = []
for state, func in zip(state_list, func_callbacks):
result_list.append(func(state))
# swap Hour and DayLight Saving Status
result_list[2], result_list[3] = result_list[3], result_list[2]
# discard prediction states
for i in range(4, 8):
result_list[i] = result_list[i][:, 0:1]
# discard soc states
if noSOC:
return np.concatenate(result_list[:-2], -1)
else:
return np.concatenate(result_list, -1) | 53d0f3d9f23bbc22ab4a3a978a6893b8f1a2238c | 3,628,955 |
def load(image_file):
"""Load the image from file path."""
image = tf.io.read_file(image_file)
image = tf.image.decode_jpeg(image)
width = tf.shape(image)[1]
width = width // 2
real_image = image[:, :width, :]
input_image = image[:, width:, :]
input_image = tf.cast(input_image, tf.float32)
real_image = tf.cast(real_image, tf.float32)
return input_image, real_image | c64f19c3779703dff0b08fc22740cf04ec0561b9 | 3,628,956 |
def get_corpus_directory():
"""Return path of Data/Corpus directory"""
data_directory = get_data_directory()
corpus_directory = data_directory / "Corpus"
return corpus_directory | ecb42b88390a09eb3994034b4982164fa07fc037 | 3,628,957 |
def reconstruction(freq, nfreq, A, d, damping='hysteretic', type='a', residues=False, LR=0, UR=0):
"""generates a FRF from modal parameters.
There is option to consider the upper and lower residues (Ewins, D.J.
and Gleeson, P. T.: A method for modal identification of lightly
damped structures)
#TODO: check the correctness of the viscous damping reconstruction
Arguments:
A - Modal constants of the considering FRF.
nfreq - natural frequencies in Hz
c - damping loss factor or damping ratio
damp - type of damping:
-'hysteretic'
-'viscous'
LR - lower residue
UR - upper residue
residues - if 'True' the residues of lower and higher residues
are taken into account. The lower and upper residues
are first and last component of A, respectively.
type - type of FRF function:
-'d'- receptance
-'v'- mobility
-'a'- accelerance
"""
A = np.array(A, dtype='complex')
d=np.array(d)
om = np.array(2 * np.pi * freq)
nom = np.array(2 * np.pi * nfreq)
#in the case of single mode the 1D arrays have to be created
if A.shape==():
A_=A; d_=d ; nom_=nom
A=np.zeros((1),dtype='complex'); d=np.zeros(1) ; nom=np.zeros(1)
A[0]=A_ ; d[0]=d_
nom[0]=nom_
if residues:
LR = np.array(LR)
UR = np.array(UR)
H = np.zeros(len(freq), dtype=complex)
if damping == 'hysteretic':
#calculation of a FRF
for i in range(0, len(freq)):
for k in range(0, len(nom)):
H[i] = H[i] + A[k] / (nom[k] ** 2 - om[i] ** 2 + d[k] * 1j * nom[k] ** 2)
if residues:
for i in range(1, len(freq)):
H[i] = H[i] + LR / om[i] ** 2 - UR
if damping == 'viscous':
H = np.zeros(len(freq), dtype=complex)
for i in range(0, len(freq)):
for k in range(0, len(nom)):
H[i]=H[i]+ A[k] / (nom[k] ** 2 - om[i] ** 2 + 2.j * om[i] * nom[k] * d[k])
H = convert_frf(H, om, 'd' ,type)
return H | f082b9b09c8aed79dc7426f510298753a57a7041 | 3,628,958 |
from typing import Optional
from typing import Sequence
from typing import Union
def multilateral_methods(
df: pd.DataFrame,
price_col: str = 'price',
quantity_col: str = 'quantity',
date_col: str='month',
product_id_col: str='id',
characteristics: Optional[Sequence[str]] = None,
groups: Optional[Sequence[str]] = None,
method: str = 'all',
bilateral_method: str = 'tornqvist',
td_engine: str = 'numpy',
reference_month: Union[int, str] = 1,
plot: bool = False,
) -> pd.DataFrame:
"""
Calculate all the multilateral indices.
Currently supported: GEKS (geks), Geary-Khamis (gk), Time Product Dummy
(tpd) and Time Hedonic Dummy (tdh).
Parameters
----------
df: pandas DataFrame
Contains price and quantity columns, a time series column, and a product
ID column as a minimum. A characteristics column should also be present
for hedonic methods.
price_col: str, defaults to 'price'
User-defined name for the price column.
quantity_col: str, defaults to 'quantity'
User-defined name for the quantity column.
date_col: str, defaults to 'month'
User-defined name for the date column.
product_id_col: str, defaults to 'id'
User-defined name for the product ID column.
characteristics: list of str, defaults to None
The names of the characteristics columns.
groups: list of str, defaults to None
The names of the groups columns.
method: str, defaults to 'all'
Options: {'all', 'geks', gk', 'tpd', 'tdh'}
The multilateral method to apply. The 'all' option uses the
GEKS paired with a bilateral, GK and TPD index.
bilateral_method: str, defaults to 'tornqvist'
Options: {'carli', 'jevons', 'dutot', 'laspeyres', 'paasche',
'geom_laspeyres', 'geom_paasche', 'drobish', 'marshall_edgeworth',
'palgrave', 'fisher', 'tornqvist', 'walsh', 'sato_vartia', 'lowe',
'geary_khamis_b', 'rothwell'}
The bilateral method to pair with `method='geks'`.
td_engine: str, defaults to 'numpy'
Options: {'numpy', 'statsmodels', 'sklearn', 'pyspark'}
Engine to use for wls computation with `method='tpd'`.
reference_month: int or str, defaults to 1
The month to use as the reference month for the multilateral methods. An
integer specifies the position while a string species the month in the
format 'YYYY-MM'.
plot: bool, defaults to False
Boolean parameter on whether to plot the resulting timeseries for price
indices.
Returns
-------
pd.DataFrame
Dataframe containing the timeseries and index values.
"""
method, bilateral_method = method.lower(), bilateral_method.lower()
valid_methods = {'all', 'geks', 'gk', 'tpd', 'tdh'}
valid_bilateral_methods = {
'carli', 'jevons', 'dutot', 'laspeyres', 'lowe',
'paasche', 'geom_laspeyres', 'geom_paasche', 'drobish',
'marshall_edgeworth', 'palgrave', 'fisher', 'tornqvist',
'walsh', 'sato_vartia', 'geary_khamis_b', 'tpd', 'rothwell'
}
if method not in valid_methods:
raise ValueError("Invalid option, please select a valid method.")
if method in {'all', 'geks'} and bilateral_method not in valid_bilateral_methods:
raise ValueError("Invalid option, please select a valid bilateral method for GEKS.")
args = (price_col, quantity_col, date_col, product_id_col)
# Obtain unique time periods present in the data.
periods = sorted(df[date_col].unique())
if isinstance(reference_month, str):
reference_month = periods.index(reference_month) + 1
if groups:
return (
df
.groupby(groups)
.apply(
lambda df_group: multilateral_methods(
df_group,
*args,
characteristics=characteristics,
method=method,
bilateral_method=bilateral_method,
td_engine=td_engine,
reference_month=reference_month,
plot=plot
)
)
.reset_index()
.rename({'level_1': 'month'}, axis=1)
)
if quantity_col not in df.columns:
df[quantity_col] = 1
if bilateral_method not in ('jevons', 'carli', 'dutot'):
# Calculate weights for each item in each period.
df = _weights_calc(df, *args)
if method == 'all':
index_vals = {
f'index_value_geks': geks(df, *args, bilateral_method),
'index_value_gk': geary_khamis(df, *args),
'index_value_td': time_dummy(df, *args, characteristics, engine=td_engine)
}
elif method == 'geks':
index_vals = geks(df, *args, bilateral_method)
elif method == 'gk':
index_vals = geary_khamis(df, *args)
elif method == 'tpd':
index_vals = time_dummy(df, *args, None, engine=td_engine)
elif method == 'tdh':
if not characteristics:
raise ValueError("Characteristics required for TDH.")
else:
index_vals = time_dummy(df, *args, characteristics, engine=td_engine)
output_df = (
pd.DataFrame(
index_vals,
index=periods
)
.rename({0: 'index_value'}, axis=1)
)
output_df.sort_index(inplace=True)
if reference_month != 1:
output_df = output_df / output_df.iloc[reference_month - 1]
if plot:
sns.set(rc={'figure.figsize':(11, 4)})
(output_df * 100).plot(linewidth=2)
return output_df | 30448b0e9b0f6bff0cfd7042c154cb63d091ee9d | 3,628,959 |
import torch
def causal_fftconv(
signal: torch.Tensor,
kernel: torch.Tensor,
bias: torch.Tensor = None,
) -> torch.Tensor:
"""
Args:
signal: (Tensor) Input tensor to be convolved with the kernel.
kernel: (Tensor) Convolution kernel.
bias: (Optional, Tensor) Bias tensor to add to the output.
padding: (int) Number of zero samples to pad the input on the last dimension.
Returns:
(Tensor) Convolved tensor
"""
# 1. Pad the input signal & kernel tensors
if kernel.shape[-1] % 2 == 0:
kernel = f.pad(kernel, [1, 0], value=0.0)
pad = torch.nn.ConstantPad1d((kernel.shape[-1] - 1, 0), 0)
pad_sig = pad(signal)
kernel_padding = [0, pad_sig.size(-1) - kernel.size(-1)]
padded_kernel = f.pad(kernel, kernel_padding)
# 2. Perform fourier convolution
signal_fr = torch.fft.rfft(pad_sig.double(), dim=-1)
kernel_fr = torch.fft.rfft(padded_kernel.double(), dim=-1)
# 3. Multiply the transformed matrices
kernel_fr.imag *= -1.0
output_fr = (signal_fr.unsqueeze(1) * kernel_fr.unsqueeze(0)).sum(2)
# 3.1. Expand on the output and batch dim. respectively, do point-wise multiplication and sum.
# 4. Compute inverse FFT, and remove extra padded values
output = torch.fft.irfft(output_fr, dim=-1).float()
output = output[:, :, : signal.shape[-1]]
# 5. Optionally, add a bias term before returning.
if bias is not None:
output += bias.view(1, -1, 1)
return output | 3a9cb98ee6edb00a0bf5523fa1e1e580d5a99523 | 3,628,960 |
def sell_at_loss_switch(value, exchange):
"""enable/disable buy size amount"""
tg_wrapper.helper.config[exchange]["config"].update({"sellatloss": 0})
if "sellatloss" in value:
tg_wrapper.helper.config[exchange]["config"].update({"sellatloss": 1})
return False, False
return True, True | 64ebde92f24f583f18a2a686db4956d29f4f9d64 | 3,628,961 |
def strify(iterable_struct, delimiter=','):
""" Convert an iterable structure to comma separated string.
:param iterable_struct: an iterable structure
:param delimiter: separated character, default comma
:return: a string with delimiter separated
"""
return delimiter.join(map(str, iterable_struct)) | 3c8337691c9008449a86e1805fe703d6da73a523 | 3,628,962 |
import copy
def load_shifting_multiple_tech(
fueltypes,
enduse_techs,
technologies,
fuel_yh,
param_lf_improved_cy
):
"""Shift demand in case of multiple technologiesself.
Check how much of each technology is shifted in peak hours.
Calculate the absolute and relative demand of shifited demand
for every technology. Then substract and assign the new demands
per technology.
"""
fuel_tech_shifted = {}
for fueltype in fueltypes:
# Get technologies of fueltype
technologies_fueltype = tech_related.get_technologies_per_fueltype(
enduse_techs, technologies, fueltype)
if technologies_fueltype != []:
# Sum across al technologies
sum_all_techs = np.zeros((365, 24))
for tech in technologies_fueltype:
sum_all_techs += fuel_yh[tech]
# Shift demand for enduse (summed demand of technologies)
sum_all_techs_shifted = load_shifting(
sum_all_techs,
mode_constrained=True,
param_lf_improved_cy=param_lf_improved_cy,
make_all_flat=False)
# Shift demand of peak (removed demand)
removed_demand = sum_all_techs - sum_all_techs_shifted
removed_demand = removed_demand.reshape(8760)
removed_demand[removed_demand < 0] = 0
tot_removed_demand = sum(removed_demand)
shape_removed = (1 / tot_removed_demand) * removed_demand
shape_removed[np.isnan(shape_removed)] = 0
peak_hour_positions = list(np.where(removed_demand > 0)[0])
# Demand in off-peak (demand added)
added_demand = sum_all_techs - sum_all_techs_shifted
added_demand = added_demand.reshape(8760)
added_demand[added_demand > 0] = 0
added_demand = abs(added_demand)
tot_added_demand = sum(added_demand)
shape_added = (1 / tot_added_demand) * added_demand
shape_added[np.isnan(shape_added)] = 0
assert round(np.sum(tot_removed_demand), 6) == round(tot_added_demand, 6)
for tech in technologies_fueltype:
fuel_tech_8760 = copy.copy(fuel_yh[tech].reshape(8760))
sum_all_techs_8760 = sum_all_techs.reshape(8760)
# Calculate share of technology specific demand to overall enduse demand per hour before shift
h_contribution_before = (1 / sum_all_techs_8760) * fuel_tech_8760
h_contribution_before[np.isnan(h_contribution_before)] = 0
# Get absolute demand in peak hours
tech_peak_shifted_demand = 0
for peak_hour_pos in peak_hour_positions:
tech_peak_shifted_demand += h_contribution_before[peak_hour_pos] * removed_demand[peak_hour_pos]
# Percentage of total shifted demand of technology
p_abs_tech_shifted = (1.0 / tot_removed_demand) * tech_peak_shifted_demand
if np.isnan(p_abs_tech_shifted):
p_abs_tech_shifted = 0
# Add shifted demand to total demand
fuel_tech_8760 += (p_abs_tech_shifted * tot_removed_demand) * shape_added
# Substract peak from total demand
fuel_tech_8760 -= (p_abs_tech_shifted * tot_removed_demand) * shape_removed
assert round(np.sum(fuel_tech_8760), 6) == round(np.sum(fuel_yh[tech]), 6)
fuel_tech_shifted[tech] = fuel_tech_8760.reshape(365, 24)
return fuel_tech_shifted | 76d57269697ba4b2b7734ede87f9abe019f02243 | 3,628,963 |
def configure_node(
cluster,
node,
certnkey,
dataset_backend_configuration,
provider,
logging_config=None
):
"""
Configure flocker-dataset-agent on a node,
so that it could join an existing Flocker cluster.
:param Cluster cluster: Description of the cluster.
:param Node node: The node to configure.
:param CertAndKey certnkey: The node's certificate and key.
:param bytes provider: provider of the nodes - aws. rackspace or managed.
:param dict logging_config: A Python logging configuration dictionary,
following the structure of PEP 391.
"""
setup_action = 'start'
if provider == "managed":
setup_action = 'restart'
return run_remotely(
username='root',
address=node.address,
commands=sequence([
task_install_node_certificates(
cluster.certificates.cluster.certificate,
certnkey.certificate,
certnkey.key),
task_install_api_certificates(
cluster.certificates.user.certificate,
cluster.certificates.user.key),
task_enable_docker(node.distribution),
if_firewall_available(
node.distribution,
open_firewall_for_docker_api(node.distribution),
),
task_configure_flocker_agent(
control_node=cluster.control_node.address,
dataset_backend=cluster.dataset_backend,
dataset_backend_configuration=(
dataset_backend_configuration
),
logging_config=logging_config,
),
task_enable_docker_plugin(node.distribution),
task_enable_flocker_agent(
distribution=node.distribution,
action=setup_action,
),
]),
) | cdff328b042b6b3a9b8afbad6e103e838735a976 | 3,628,964 |
import torch
def unsubdivide(P, T, iter=1):
"""
Unsubdivides the given mesh n times
In order to work, the mesh is intended subdivided using the method 'subdivide'.
Parameters
----------
P : Tensor
the input points set
T : LongTensor
the topology tensor
iter : int (optional)
the number of times to unsubdivide the input mesh (default is 1)
Returns
-------
(Tensor,LongTensor,LongTensor)
the new points set, the new topology, and the indices of the original points
"""
i = T.clone()
for n in range(0, iter):
i = torch.t(torch.reshape(i[0], (col(i)//4, 4)))
if istri(T):
i = i[0:3]
if row(i) == 1:
i = torch.t(i)
j = unique(i)[0]
p = P[j]
t = reindex(i)
i = j
return p, t, i | 304d98376075925fbac549b2ab042b9e75dc9d98 | 3,628,965 |
def table2sparse(data, shape, order, m_type=lil_matrix):
"""Constructs a 2D sparse matrix from an Orange.data.Table
Note:
This methods sort the columns (=> [rows, cols])
Args:
data: Orange.data.Table
shape: (int, int)
Tuple of integers with the shape of the matrix
order: (int, int)
Tuple of integers with the index of the base columns
m_type: scipy.sparse.*
Type of matrix to return (csr_matrix, lil_matrix,...)
Returns:
matrix: scipy.sparse
"""
return sparse_matrix_2d(row=data.X[:, order[0]], col=data.X[:, order[1]],
data=data.Y, shape=shape, m_type=m_type) | c84309cb330eb3aab2160feb5a525e2147afcfcd | 3,628,966 |
from typing import OrderedDict
def get_s_bi_status(c: analyze.CZSC) -> OrderedDict:
"""倒数第1笔的表里关系信号
:param c: CZSC 对象
:return: 信号字典
"""
freq: Freq = c.freq
s = OrderedDict()
v = Signal(k1=str(freq.value), k2="倒1笔", k3="表里关系", v1="其他", v2='其他', v3='其他')
s[v.key] = v.value
if c.bi_list:
# 表里关系的定义参考:http://blog.sina.com.cn/s/blog_486e105c01007wc1.html
min_ubi = min([x.low for x in c.bars_ubi])
max_ubi = max([x.high for x in c.bars_ubi])
last_bi = c.bi_list[-1]
v = None
if last_bi.direction == Direction.Down:
if min_ubi < last_bi.low:
v = Signal(k1=str(freq.value), k2="倒1笔", k3="表里关系", v1="向下延伸")
else:
v = Signal(k1=str(freq.value), k2="倒1笔", k3="表里关系", v1="底分完成")
if last_bi.direction == Direction.Up:
if max_ubi > last_bi.high:
v = Signal(k1=str(freq.value), k2="倒1笔", k3="表里关系", v1="向上延伸")
else:
v = Signal(k1=str(freq.value), k2="倒1笔", k3="表里关系", v1="顶分完成")
if v and "其他" not in v.value:
s[v.key] = v.value
return s | 2cb9f416e346a0b4b8bc64081b2049f6f280ff2d | 3,628,967 |
def shuffle_code_book(encode_book):
"""
shuffle the code book
:param encode_book: code book
:return: shuffled code book
"""
codbok = np.array(list(encode_book.items()))
ids0 = np.random.permutation(codbok.shape[0])
ids1 = np.random.permutation(codbok.shape[0])
cod = codbok[ids0, 0]
word = codbok[ids1, 1]
shuff_encode_book = dict()
for i in range(len(cod)):
shuff_encode_book[cod[i]] = word[i]
return shuff_encode_book | d9f84db17179fd68daa9e5882624267d4e67a9a7 | 3,628,968 |
def default_handler(data: pd.Series, *args, **kwargs) -> pd.Series:
"""Processes given data and indicates if the data matches requirements.
Parameters
----------
data: pd.Series
The data to process.
Returns
-------
pd.Series: The logical list indicating if the data matches requirements.
"""
return data.apply(lambda x: isinstance(x, str)) | c0913be67440e41a08788558e20464ef5b02caae | 3,628,969 |
def load_original_data(data, load_dirty=False):
"""
Loads the original dataframe. Missing values are replaced with
np.nan. If load_dirty is set to True, the dirty dataset of a
cleaning experiment is loaded. Otherwise, the default clean dataset
is loaded.
"""
if load_dirty:
df = pd.read_csv(data.dirty_data_path,
sep=data.original_separator,
header=None)
else:
df = pd.read_csv(data.data_path,
sep=data.original_separator,
header=None)
df = df.replace(data.missing_value_token, np.nan)
return df | 15fd528c49d46ea83479f2b35de99c91f5bfe46d | 3,628,970 |
import os
def merge_cpr_fr24_data(date, *, max_speed=DEFAULT_MAX_SPEED,
distance_accuracy=DEFAULT_DISTANCE_ACCURACY):
"""
Match, merge and clean refined CPR and FR24 ADS-B data for the given date.
Parameters
----------
date: string
The date in ISO8601 format, e.g. 2017-08-16
max_speed: string
The maximum ground speed permitted between adjacent positions [Knots],
default: 750 Knots.
distance_accuracy: string
The maximum distance between positions at the same time [Nautical Miles],
default: 0.25 NM.
Returns
-------
True if succesful, False otherwise.
"""
if is_valid_iso8601_date(date):
os.chdir(REFINED_DIR)
tasks.match_cpr_adsb_trajectories_on(date)
tasks.merge_cpr_adsb_trajectories_on(date)
return tasks.clean_raw_positions_data(CPR_FR24, date, float(max_speed),
float(distance_accuracy))
else:
log.error("The date is not valid: %s", date)
return False | b1fdbc3b1ec11b0427bc40dfaa9d7aafbb3dfa9c | 3,628,971 |
import torch
from registration_pt import (device, reg_l2_rigid)
from images import gaussian_filter_2d_pt, gaussian_cov
# Overhead
dev = device()
prec = precision()
rot_mtx_T = lambda phi: torch.stack((
torch.stack((torch.cos(phi),torch.sin(phi))),
torch.stack((-torch.sin(phi),torch.cos(phi)))
))
def bfsw_detector_pt(Y, X, mask, image_type='textured', sigma=3,
sigma_scene=1.5, nu=1, stride_u=16, stride_v=16, max_iter=1024,
thresh=0, step=tensor(1e0, device=device(), dtype=precision()),
articulation_pt=None):
"""Brute Force Sliding Window optimization-based detector
Search for a motif X in a scene Y by "brute force": given striding
parameters param_dict['stride_u'], param_dict['stride_v'], run a
registration algorithm placed at each stride point in Y to obtain a final
loss and final transformation parameters, and use those data to generate a
detection occurrence map ("rolloff" controlled by param_dict['nu']) with
coordinates the same as the input scene Y which determines where and "to
what intensity" the motif X was detected at each point in the image. The
idea being, that when the stride parameters are set sufficiently small
(i.e. dense searching), we will find all occurrences of the motif; and by
using registration, we can do the detection in an invariant/potentially
efficient fashion.
The "detector" terminology is a slight misnomer, as the main output here is
an occurrence map -- in other words, detection is left to be done by
external algorithms (no thresholding/etc is done here other than rolloff
control via param_dict['nu'] mentioned above).
Inputs:
------------
Y - (C, M, N) torch tensor
Input scene, strided over to search for X
X - (C, m, n) torch tensor
Input motif, to be detected in the scene Y
mask - (1, M, N) torch tensor, or None
Either the mask for the motif, or anything with image_mode='spike' (it
gets ignored)
Other parameters: see registration_pt documentation
Outputs:
------------
spike map, coord map, errors map, transformation map
"""
#torch.tensor(((torch.cos(phi),
#-torch.sin(phi)),(torch.sin(phi), torch.cos(phi))), device=dev,
#dtype=prec)
# Main tasks:
C, M, N = Y.shape
c, m, n = X.shape
# 1. Process the strides
start_u = 0
stop_u = M
start_v = 0
stop_v = N
start_u_chk = int(np.maximum(0, start_u))
start_v_chk = int(np.maximum(0, start_v))
stop_u_chk = int(np.minimum(M - m + 1, stop_u))
stop_v_chk = int(np.minimum(N - n + 1, stop_v))
anchors_u = torch.arange(start_u_chk, stop_u_chk, stride_u, device=dev,
dtype=precision())
anchors_v = torch.arange(start_v_chk, stop_v_chk, stride_v, device=dev,
dtype=precision())
# add the end indices too, to avoid a "near zero bias"...
if anchors_u[-1] < stop_u_chk-1:
anchors_u = torch.cat((anchors_u, torch.tensor((stop_u_chk-1,),
device=dev, dtype=precision())), dim=-1)
if anchors_v[-1] < stop_v_chk-1:
anchors_v = torch.cat((anchors_v, torch.tensor((stop_v_chk-1,),
device=dev, dtype=precision())), dim=-1)
# 2. Do the registration with batched reg_l2 (create locs)
locs = torch.cat(
(torch.kron(anchors_u, torch.ones(len(anchors_v), device=dev,
dtype=precision()))[:,None],
torch.kron(torch.ones(len(anchors_u), device=dev,
dtype=precision()), anchors_v)[:,None]), dim=-1)
print(f'Total work to do: {locs.shape[0]} strides, '
f'at {X.shape[1]} by {X.shape[2]} motif, '
f'in {Y.shape[1]} by {Y.shape[2]} scene.')
if image_type == 'textured':
# Textured motif call: two rounds of multiscale
phi, b, errors0 = reg_l2_rigid(Y, X, mask, locs, step=step,
max_iter=max_iter, sigma=sigma, sigma_scene=sigma_scene)
# Second round multiscale
phi, b, errors2 = reg_l2_rigid(Y, X, mask, locs, step=step,
max_iter=max_iter, sigma=0.1, sigma_scene=1e-6, init_data=(phi, b),
erode=False)
# Concatenate errors
errors = torch.cat((errors0, errors2), -1)
else:
# spike motif call: don't need multiscale
phi, b, errors = reg_l2_rigid(Y, X, mask, locs, step=step,
max_iter=max_iter, sigma=sigma, sigma_scene=sigma_scene,
image_type='spike', rejection_thresh=0.25)
# 3. Create the spike map (do this on the GPU)
ctr = torch.tensor(((m-1)/2, (n-1)/2), device=dev,
dtype=precision())[None, :]
if articulation_pt is None:
articulation_pt = ctr
Q = rot_mtx_T(phi[:, 0])
affine_part = torch.tensordot(articulation_pt - ctr, Q, dims=1)[0, ...].T
#spike_locs = torch.min(torch.max(
# ctr + locs + torch.flip(b, (-1,)),
# torch.tensor((0,0),device=dev,dtype=precision())),
# torch.tensor((M,N),device=dev,dtype=precision()))
spike_locs = torch.min(torch.max(
affine_part + ctr + locs + torch.flip(b, (-1,)),
torch.tensor((0,0),device=dev,dtype=precision())),
torch.tensor((M,N),device=dev,dtype=precision()))
spike_map = torch.zeros((1, M, N), device=dev, dtype=precision())
for idx in range(spike_locs.shape[0]):
weight = torch.exp(-nu * torch.maximum(torch.zeros((1,), device=dev,
dtype=precision()), errors[idx, -1] - thresh))
spike_interp = gaussian_filter_2d_pt(M, N, sigma_u=0.5,
offset_u=spike_locs[idx,0], offset_v=spike_locs[idx,1])
spike_map = torch.maximum(spike_map, weight * spike_interp /
torch.sum(spike_interp))
# 4. Prepare the output struct (need spike map and transformation
# parameters, errors for debug).
output = {
'spike_locs': spike_locs,
'errors': errors,
'phi': phi,
'b': b
}
return spike_map, output | dba2c3dc3d27f575f05fb42dc38c5f4fd6f80e32 | 3,628,972 |
from sys import path
def _full_path_to(folder_name: str, i_f_r: str):
"""
Helper method to find the full path to a folder in Data
:param folder_name: Name of the folder in Data
:param i_f_r: 'i' (Input), 'f' (Files) or 'r' (Results). 'r' has subfolders 'l' (Labs), 't' (Tables), 'f' (Figures)
and 'v' (LabVisualisations)
:return: Full path to the folder in Data
"""
if i_f_r == 'i':
parent_folder = 'Input'
elif i_f_r == 'f':
parent_folder = 'Files'
else:
parent_folder = 'Results'
if i_f_r[1] == 'l':
parent_folder = path.join(parent_folder, 'Labs')
elif i_f_r[1] == 't':
parent_folder = path.join(parent_folder, 'Tables')
elif i_f_r[1] == 'f':
parent_folder = path.join(parent_folder, 'Figures')
else:
parent_folder = path.join(parent_folder, 'LabVisualisations')
return path.join(DATA_PATH, parent_folder, folder_name) | 31a9519670ee8f64867af3b367a792ae7c32aa5d | 3,628,973 |
def mapped_col_index_nb(mapped_arr, col_arr, n_cols):
"""Identical to `record_col_index_nb`, but for mapped arrays."""
col_index = np.full((n_cols, 2), -1, dtype=np.int_)
prev_col = -1
for r in range(mapped_arr.shape[0]):
col = col_arr[r]
if col < prev_col:
raise ValueError("col_arr must be sorted")
if col != prev_col:
if prev_col != -1:
col_index[prev_col, 1] = r
col_index[col, 0] = r
prev_col = col
if r == mapped_arr.shape[0] - 1:
col_index[col, 1] = r + 1
return col_index | e640da21bce2a570c0d96f588de1739f90a9bc70 | 3,628,974 |
from typing import Set
from typing import Dict
from typing import OrderedDict
def get_greedy_advanced(data: OrderedDictType[_T1, Set[_T2]], unit_counts: Dict[_T1, int], mode: SelectionMode) -> OrderedSet[_T1]:
"""The parameter ngrams needs to be ordered to be able to produce reproductable results."""
assert isinstance(data, OrderedDict)
logger = getLogger(__name__)
all_ngrams = {e for s in data.values() for e in s}
available_entries = data.copy()
covered: Set[_T2] = set()
result: OrderedSet[_T1] = OrderedSet()
while covered != all_ngrams:
new_unit_counts = get_new_unit_counts(available_entries, covered)
potential_keys = get_most_new_units_keys(new_unit_counts)
if len(potential_keys) > 1:
logger.info(f"Found {len(potential_keys)} candidates for the current iteration.")
selected_key = select_key(potential_keys, unit_counts, mode)
result.add(selected_key)
covered |= data[selected_key]
available_entries.pop(selected_key)
return result | 8820dca332cc3c1e833195170327c1bcd49c84c8 | 3,628,975 |
def generate_pattern_eq_ipv4(value):
"""
makes a pattern to check an ip address
"""
return "ipv4-addr:value = '" + value + "'" | 36b4a09363512709c3bdf8046ea52f8ba14aa8e7 | 3,628,976 |
import os
import csv
from fmpy.cross_check import get_vendor_ids
def generate_result_tables(repo_dir, data_dir):
""" Generate the cross-check result tables """
combinations = [] # all permutations of FMI version, type and platform
for fmi_version in ['1.0', '2.0']:
for fmi_type in ['cs', 'me']:
for platform in ['c-code', 'darwin64', 'linux32', 'linux64', 'win32', 'win64']:
combinations.append((fmi_version, fmi_type, platform))
tools_csv = os.path.join(data_dir, 'tools.csv')
vendors = get_vendor_ids(tools_csv)
tools = {} # tool_id -> tool_name
for tool_infos in vendors.values():
for tool_id, tool_name in tool_infos:
tools[tool_id] = tool_name
def split_path(path):
segments = []
while True:
path, segment = os.path.split(path)
if not segment:
break
segments.insert(0, segment)
return segments
def collect_results():
results = []
vendor_repo = os.path.join(repo_dir, 'results')
for root, dirs, files in os.walk(vendor_repo):
if 'passed' not in files:
continue # ignore
segments = split_path(root)
fmi_version, fmi_type, platform, _, _, exporting_tool_name, exporting_tool_version, model_name = segments[-8:]
not_compliant_file = os.path.join(repo_dir, 'fmus', fmi_version, fmi_type, platform, exporting_tool_name, exporting_tool_version, model_name, 'notCompliantWithLatestRules')
if os.path.isfile(not_compliant_file):
continue # ignore
results.append(segments[-8:])
return results
def build_matrix(results, fmi_version, fmi_type, platform):
""" Build the result matrix for an FMI version, type and platform """
importing_tools = set()
exporting_tools = set()
filtered = []
# get the tools
for fmi_version_, fmi_type_, platform_, importing_tool_name, importing_tool_version, exporting_tool_name, exporting_tool_version, model_name in results:
if fmi_version_ != fmi_version or fmi_type_ != fmi_type or platform_ != platform:
continue
importing_tools.add(importing_tool_name)
exporting_tools.add(exporting_tool_name)
filtered.append((importing_tool_name, importing_tool_version, exporting_tool_name, exporting_tool_version, model_name))
# build matrix
importing_tools = sorted(importing_tools, key=lambda s: s.lower())
exporting_tools = sorted(exporting_tools, key=lambda s: s.lower())
matrix = []
for importing_tool in importing_tools:
row = []
for exporting_tool in exporting_tools:
count = 0
for r in filtered:
if r[0] == importing_tool and r[2] == exporting_tool:
count += 1
row.append(count)
matrix.append(row)
return importing_tools, exporting_tools, matrix
results = collect_results()
# filter tool IDs
results = [r for r in results if r[3] in tools and r[5] in tools]
matrices = {}
for combination in combinations:
matrices[combination] = build_matrix(results, *combination)
for fmi_version, fmi_type, platform in combinations:
importing_tools, exporting_tools, matrix = matrices[(fmi_version, fmi_type, platform)]
importing_tools = [tools[tool_id] for tool_id in importing_tools]
exporting_tools = [tools[tool_id] for tool_id in exporting_tools]
csv_filename = 'fmi1' if fmi_version == '1.0' else 'fmi2'
csv_filename += '-'
csv_filename += fmi_type
csv_filename += '-'
csv_filename += platform + '.csv'
with open(os.path.join(data_dir, 'cross-check', csv_filename), 'w') as f:
f.write(','.join([''] + exporting_tools) + '\n')
for importing_tool, row in zip(importing_tools, matrix):
f.write(','.join([importing_tool] + list(map(str, row))) + '\n')
participants = set()
# generate the tools file with cross-check results
export_passed = {}
import_passed = {}
for fmi_version, fmi_type, platform, importing_tool_name, importing_tool_version, exporting_tool_name, exporting_tool_version, model_name in results:
key = (exporting_tool_name, fmi_version, fmi_type)
participants.add(exporting_tool_name)
participants.add(importing_tool_name)
if key not in export_passed:
export_passed[key] = {}
if importing_tool_name not in export_passed[key]:
export_passed[key][importing_tool_name] = {model_name}
else:
export_passed[key][importing_tool_name].add(model_name)
key = (importing_tool_name, fmi_version, fmi_type)
if key not in import_passed:
import_passed[key] = {}
if exporting_tool_name not in import_passed[key]:
import_passed[key][exporting_tool_name] = {model_name}
else:
import_passed[key][exporting_tool_name].add(model_name)
# aggregate the results
participants = sorted(participants, key=lambda s: s.lower())
rows = [participants]
def check_passed(key, d):
cnt = 0
for vendor, models in d.get(key, {}).items():
if len(models) >= 3:
cnt += 1
return cnt >= 3
for results in [export_passed, import_passed]:
for i, c in enumerate([('cs', '1.0'), ('cs', '2.0'), ('me', '1.0'), ('me', '2.0')]):
fmi_type, fmi_version = c
row = []
for tool_id in participants:
key = (tool_id, fmi_version, fmi_type)
row.append('passed' if check_passed(key, results) else '')
rows.append(row)
with open(os.path.join(data_dir, 'cross-check', 'result.csv'), 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(rows) | 39ab09791892dfedaf3fa6b5e7ea5dd13837a4ad | 3,628,977 |
import os
def get_dump_date():
"""Iterate through labs dumps and find the newest one."""
dates = sorted(next(os.walk(DIRECTORY))[1], reverse=True)
for date in dates:
if os.path.isfile(FILENAME.format(date=date)):
return date
return None | 69b6160ae2321eb2b1c12b50d77c9b0bf449a886 | 3,628,978 |
def get_pub_velocity_cmd_vel(**kvargs):
"""
Returns publisher for :setpoint_velocity: plugin, :cmd_vel: topic
"""
return rospy.Publisher(mavros.get_topic('setpoint_velocity', 'cmd_vel'), TwistStamped, **kvargs) | 00f0a8331950791a5a072549e13014d32cfbef37 | 3,628,979 |
def get_point_information(form):
"""
Функция для формирования json с полигоном для отрисовки аналитики по точке
:param form: форма из POST запроса с координатами точки и 6 основными фильтрами
:return: json с полигоном с необходимой информацией
"""
point_info = generate_point_information(form)
geometry = point_info['geometry']
polygon_coords = []
if not geometry.is_empty:
polygon_coords = list(geometry.exterior.coords)
polygon_coords = [[x[1], x[0]] for x in polygon_coords]
zones_type = point_info['typeZones']
services_type = point_info['typeServs']
if not zones_type:
zones_type = []
else:
zones_type = zones_type.split(', ')
if not services_type:
services_type = []
else:
services_type = services_type.split(', ')
resp = {
'totalAreaOfSportsZones': point_info['totalArea'],
'typesOfSportsZones': zones_type,
'typesOfSportsServices': services_type,
'polygonList': polygon_coords
}
return resp | 4e1ec811b1c790aa4940bf2b9a178e6d3ba78d8d | 3,628,980 |
def __kspack(ks):
"""takes a kset and returns an 8-bit number"""
bits = 0
_ks = __make_ks()
for i in range(8):
if _ks[i] in ks:
bits += 2**i
return bits | 44c1a99c1c91c8c2d7991968af5b607eeccf8834 | 3,628,981 |
import warnings
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s'%name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s'%name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])
r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r1.dtype[name]
assert dt2==dt1
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.recarray((common_len + left_len + right_len,), dtype=newdtype)
if defaults is not None:
for thiskey in defaults:
if thiskey not in newdtype.names:
warnings.warn('rec_join defaults key="%s" not in new dtype names "%s"'%(
thiskey, newdtype.names))
for name in newdtype.names:
dt = newdtype[name]
if dt.kind in ('f', 'i'):
newrec[name] = 0
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = newrec.dtype.fields.keys()
for k, v in defaults.items():
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return newrec | c77fb9520edd02817c806930a91058da62a91d12 | 3,628,982 |
from typing import OrderedDict
def cf(data):
"""AFF Community Facts"""
# AFF linked to Community Facts by place name
# CEDSCI links to Community Profiles by GEOID, but we can get around
# that by using search instead
raw_data = OrderedDict(zip(aff_cf, data))
if raw_data["geo_type"] == "zip":
raise KeyError("CEDSCI does not support profiles for zipcodes")
new_data = OrderedDict(target="profile", q=raw_data["geo_name"])
return new_data | 6dbbd1ff5e8d8a2a98950b9a3e7f31bc6a84db12 | 3,628,983 |
import os
import pickle
def perspective_transform(img):
"""
Applies the perspective transformation to the image. If the pickle file
does not exist, the transformation is determined first and saved in the
pickle file.
:param img: The image to be transformed
:return: The warped/transformed image and the transformation Matrix
"""
if not os.path.isfile('M_pickle.p'):
M = determine_perspective_transform_matrix()
pickle.dump(M, open( "M_pickle.p", "wb" ))
else:
M = pickle.load(open("M_pickle.p", "rb"))
img_size = (img.shape[1], img.shape[0])
warped = cv2.warpPerspective(img, M, img_size)
return warped, M | 4ca6a276d5eceec2ac29872677a1d6e09fc8fe0f | 3,628,984 |
import requests
import sys
def print_server_info(ip, user, password):
"""
Fetch and print servers info
@params:
ip - Required : the ip of the server (Str)
user - Required : the administrator username (Str)
password - Required : The administrator password (Str)
"""
try:
r = requests.get(f'https://{ip}:8443/api/v2/server', auth=(user, password), verify=False)
if r.status_code != 200:
raise Exception(f"Invalid response from plesk api. Response code: {r.status_code}")
data = r.json()
return print(f"{'='*100}\nServer info: {data['hostname']}, platform: {data['platform']}, panel version: {data['panel_version']} ({data['panel_revision']})\n{'='*100}\n")
except:
sys.exit(f"Error occured while trying to get server info") | 77280b61a71f58f827e879a09f8e72f984d759d9 | 3,628,985 |
def bytes_string(text, encode="utf-8"):
"""Return a bytes object on Python 3 and a str object on Python 2"""
if not PY3:
if isinstance(text, unicode): # pylint: disable=undefined-variable
result = text.encode(encode)
else:
result = text
else:
if isinstance(text, bytes):
result = text
else:
result = bytes(text, encode)
return result | cb8592910081330645d71906f24743736152afc7 | 3,628,986 |
def log_gaussian_prior(map_data, sigma, ps_map):
""" Gaussian prior on the power spectrum of the map
"""
data_ft = jnp.fft.fft2(map_data) / map_data.shape[0]
return -0.5*jnp.sum(jnp.real(data_ft*jnp.conj(data_ft)) / (ps_map+sigma**2)) | ad02d9225a77e476f24c244d426ad29ffc2603c5 | 3,628,987 |
def calculate_cdf(data):
"""Calculate CDF given data points
Parameters
----------
data : array-like
Input values
Returns
-------
cdf : series
Cumulative distribution funvtion calculated at indexed points
"""
data = pd.Series(data)
data = data.fillna(0)
total = np.nansum(data)
index = data.index.tolist()
cdf = []
for i in range(min(index), max(index)):
cdf.append(np.sum(data[list(range(min(index), i))]) / total)
return pd.Series(cdf, index=index[1:]) | 2d1f29f2c3f18f6a832553c3a945b027328c327b | 3,628,988 |
from typing import Any
from typing import Optional
from typing import Set
from typing import Deque
from typing import List
def get_special_size(obj: Any, ids: Optional[Set[int]] = None) -> SpecialTuple:
"""
Handles size requests for classes and data structures
:param obj: object to calculate size
:param ids: set of already seen ids
:return: Tuple of the size in bytes of the special data structures of that object, and the integer keys associated
with those objects and their special contents
"""
if ids is None:
ids = set()
if obj is None:
# pyre-fixme[6]: Expected `int` for 1st param but got `None`.
ids.add(obj)
return SpecialTuple(size=0, seen_ids=ids)
if isinstance(obj, SpecialMemoryProperties):
return obj.special_memory_size(ids)
elif isinstance(obj, (Deque, Set, memoryview, List)):
return _get_special_size_helper(obj, ids)
else:
return SpecialTuple(size=0, seen_ids=ids) | 03e3498789b0a1cc5ca6740a09d38a8361fcdf38 | 3,628,989 |
def sanitize_df(df, d_round=2, **options):
"""All dataframe cleaning and standardizing logic goes here."""
for c in df.columns[df.dtypes == float]:
df[c] = df[c].round(d_round)
return df | cb411b0019112155311a926ec145becc0f8c4ce9 | 3,628,990 |
from typing import Any
from typing import Dict
def bind_args(func: FunctionType, *args: Any, **kwargs: Any) -> Dict[str, Any]:
"""Bind values from `args` and `kwargs` to corresponding arguments of `func`
:param func: function to be inspected
:param args: positional arguments to be bound
:param kwargs: keyword arguments to be bound
:return: `dict[argument_name] = argument_value` if binding was successful,
raise TypeError with one of `ERR_*` error descriptions otherwise
"""
function_code = func.__code__
pos_args_amount = func.__code__.co_argcount
named_args_amount = func.__code__.co_kwonlyargcount
arg_names = func.__code__.co_varnames
pos_only_args = func.__code__.co_posonlyargcount
pos_args = list(arg_names[:pos_args_amount])
named_args = list(arg_names[pos_args_amount:pos_args_amount + named_args_amount])
args_flag = bool(function_code.co_flags & CO_VARARGS)
kwargs_flag = bool(function_code.co_flags & CO_VARKEYWORDS)
kek: bool = False
if args_flag:
args_name = arg_names[pos_args_amount + named_args_amount]
if kwargs_flag:
kek = True
kwargs_name = arg_names[pos_args_amount + named_args_amount + args_flag]
given_args = [*args]
given_kwargs = {**kwargs}
pos_defaults: Dict[str, Any] = {}
if func.__defaults__:
pos_defaults_names = list(pos_args[-1: -len(func.__defaults__) - 1: -1])
pos_defaults_names.reverse()
pos_defaults = dict(zip(pos_defaults_names, func.__defaults__))
named_defaults: Dict[str, Any] = {}
if func.__kwdefaults__:
named_defaults = func.__kwdefaults__
result_dict: Dict[str, Any] = {}
delta: int = pos_args_amount - len(pos_defaults)
flag: bool = False
if pos_only_args != 0 and len(given_args) == 0:
flag = True
if len(given_args) > len(pos_args):
if not args_flag:
raise TypeError(ERR_TOO_MANY_POS_ARGS)
else:
if len(given_args) < min(pos_only_args, delta) or pos_only_args - len(given_args) > delta:
raise TypeError(ERR_MISSING_POS_ARGS)
idx: int = 0
while idx < pos_only_args:
if pos_args[idx] in given_kwargs and not kek:
raise TypeError(ERR_POSONLY_PASSED_AS_KW)
idx += 1
result_dict.update(dict(zip(pos_args, given_args[:len(pos_args)])))
result_dict.setdefault(args_name, tuple(given_args[len(pos_args):]))
elif len(given_args) == len(pos_args):
if len(given_args) < min(pos_only_args, delta):
for el in named_args:
if el in given_kwargs:
raise TypeError(ERR_POSONLY_PASSED_AS_KW)
raise TypeError(ERR_MISSING_POS_ARGS)
idx = 0
while idx < pos_only_args:
if pos_args[idx] in given_kwargs and not kek:
raise TypeError(ERR_POSONLY_PASSED_AS_KW)
idx += 1
result_dict.update(dict(zip(pos_args, given_args)))
if args_flag:
result_dict.setdefault(args_name, tuple())
else:
if args_flag:
result_dict.setdefault(args_name, tuple())
if len(given_args) < min(pos_only_args, delta):
for el in named_args:
if el in given_kwargs and not kek:
raise TypeError(ERR_POSONLY_PASSED_AS_KW)
idx = 0
while idx < pos_only_args:
if pos_args[idx] in given_kwargs and not kek:
raise TypeError(ERR_POSONLY_PASSED_AS_KW)
idx += 1
result_dict.update(dict(zip(pos_args[:len(given_args)], given_args)))
cur_pointer = len(given_args)
if flag:
idx = 0
eroor_flag: bool = False
while idx < pos_only_args:
if pos_args[idx] in pos_defaults:
eroor_flag = True
idx += 1
if not eroor_flag:
raise TypeError(ERR_MISSING_POS_ARGS)
while cur_pointer < len(pos_args):
try:
if not kek:
value = result_dict[pos_args[cur_pointer]]
raise TypeError(ERR_MULT_VALUES_FOR_ARG)
except KeyError:
pass
try:
value = given_kwargs[pos_args[cur_pointer]]
result_dict.setdefault(pos_args[cur_pointer], value)
del given_kwargs[pos_args[cur_pointer]]
except KeyError:
try:
value = pos_defaults[pos_args[cur_pointer]]
result_dict.setdefault(pos_args[cur_pointer], value)
except KeyError:
raise TypeError(ERR_MISSING_POS_ARGS)
cur_pointer += 1
for arg in pos_args:
try:
if not kek:
value = given_kwargs[arg]
raise TypeError(ERR_MULT_VALUES_FOR_ARG)
except KeyError:
continue
if len(given_kwargs) > len(named_args) and not kwargs_flag:
raise TypeError(ERR_TOO_MANY_KW_ARGS)
cur_pointer = 0
while cur_pointer < len(named_args):
try:
if not kek:
value = result_dict[named_args[cur_pointer]]
raise TypeError(ERR_MULT_VALUES_FOR_ARG)
except KeyError:
pass
try:
value = given_kwargs[named_args[cur_pointer]]
result_dict.setdefault(named_args[cur_pointer], value)
del given_kwargs[named_args[cur_pointer]]
except KeyError:
try:
value = named_defaults[named_args[cur_pointer]]
result_dict.setdefault(named_args[cur_pointer], value)
except KeyError:
raise TypeError(ERR_MISSING_KWONLY_ARGS)
cur_pointer += 1
if kwargs_flag:
result_dict.setdefault(kwargs_name, given_kwargs)
return result_dict | dc2495b1c53bd93f4ada168abe901e831d8682ac | 3,628,991 |
def register_user(request):
"""
---REGISTER USER---
:param request:
"""
registered = False
if request.method == 'POST':
# Using forms to collect new user data
user_form = UserForm(request.POST)
if user_form.is_valid():
neighborhood = Neighborhood.objects.get(division_title=request.POST['neighborhood_text'])
house = House.objects.filter(neighborhood=neighborhood,
permission_code=request.POST['perm_code_text'])
if house:
# We have checked that the forms are valid now save the user
user = user_form.save()
# Now we hash the password with the set_password method.
# Once hashed, we can update the user object.
user.set_password(user.password)
user.save()
user_profile = UserProfile(user=user, house=house)
user_profile.save()
return HttpResponseRedirect('/')
else:
# no house object was returned, invalid info provided
return HttpResponse("The neighborhood and permission code you have entered do not match any existing neighborhood.")
# Not a HTTP POST, so we render our form using two ModelForm instances.
# These forms will be blank, ready for user input.
else:
user_form = UserForm()
# Render the template depending on the context.
return render(request, 'accounts/register.html', {'user_form': user_form,
'registered': registered}) | 89773e85a900e93198830c3ceb61591c337f8366 | 3,628,992 |
def trans_quarter(string):
"""Transform from (not lexicographic friendly) {quarter}Q{year} to a datetime object.
>>> trans_quarter('4Q2019')
datetime.datetime(2019, 10, 1, 0, 0)
"""
quarter, year = qy_parser.str_to_tuple(string)
return dt(year=year, month=month_of_quarter[quarter], day=1) | 7e3d12b000ec752a939d8f1cdc92e33ad6804628 | 3,628,993 |
def edit_post(post_id):
"""EDIT-POST page helps in editing of blog post."""
post = Post.query.get(post_id)
if post:
form = PostForm(obj=post)
if form.validate_on_submit():
post.title = form.data.get("title")
post.body = form.data.get("body")
db.session.add(post)
db.session.commit()
flash("Post sucessfully edited.")
return redirect(url_for("app.view_post", post_id=post_id))
return render_template(
"post.html",
user=current_user,
post=post,
form=form,
page_title="EDIT-POST",
page_color="purple",
)
else:
next_page = request.headers.get("Referer")
flash("Blog post not found.")
return redirect(next_page or url_for("app.home")) | eff5144db6eb646350b2a92ba661b0b1d50fb82b | 3,628,994 |
def shutdown(at_time=None):
"""
Shutdown a running system
at_time
The wait time in minutes before the system will be shutdown.
CLI Example:
.. code-block:: bash
salt '*' system.shutdown 5
"""
if (
salt.utils.platform.is_freebsd()
or salt.utils.platform.is_netbsd()
or salt.utils.platform.is_openbsd()
):
# these platforms don't power off by default when halted
flag = "-p"
else:
flag = "-h"
cmd = ["shutdown", flag, ("{0}".format(at_time) if at_time else "now")]
ret = __salt__["cmd.run"](cmd, python_shell=False)
return ret | 17367da0e3308709347f2853daa5f2c0ed5afdf3 | 3,628,995 |
def _query(
db,
keyword,
person,
album,
uuid,
title,
no_title,
description,
no_description,
ignore_case,
edited,
external_edit,
favorite,
not_favorite,
hidden,
not_hidden,
missing,
not_missing,
shared,
not_shared,
isphoto,
ismovie,
uti,
burst,
not_burst,
live,
not_live,
cloudasset,
not_cloudasset,
incloud,
not_incloud,
):
""" run a query against PhotosDB to extract the photos based on user supply criteria """
""" used by query and export commands """
""" arguments must be passed in same order as query and export """
""" if either is modified, need to ensure all three functions are updated """
# TODO: this is getting too hairy -- need to change to named args
photosdb = osxphotos.PhotosDB(dbfile=db)
photos = photosdb.photos(
keywords=keyword,
persons=person,
albums=album,
uuid=uuid,
images=isphoto,
movies=ismovie,
)
if title:
# search title field for text
# if more than one, find photos with all title values in title
if ignore_case:
# case-insensitive
for t in title:
t = t.lower()
photos = [p for p in photos if p.title and t in p.title.lower()]
else:
for t in title:
photos = [p for p in photos if p.title and t in p.title]
elif no_title:
photos = [p for p in photos if not p.title]
if description:
# search description field for text
# if more than one, find photos with all name values in description
if ignore_case:
# case-insensitive
for d in description:
d = d.lower()
photos = [
p for p in photos if p.description and d in p.description.lower()
]
else:
for d in description:
photos = [p for p in photos if p.description and d in p.description]
elif no_description:
photos = [p for p in photos if not p.description]
if edited:
photos = [p for p in photos if p.hasadjustments]
if external_edit:
photos = [p for p in photos if p.external_edit]
if favorite:
photos = [p for p in photos if p.favorite]
elif not_favorite:
photos = [p for p in photos if not p.favorite]
if hidden:
photos = [p for p in photos if p.hidden]
elif not_hidden:
photos = [p for p in photos if not p.hidden]
if missing:
photos = [p for p in photos if p.ismissing]
elif not_missing:
photos = [p for p in photos if not p.ismissing]
if shared:
photos = [p for p in photos if p.shared]
elif not_shared:
photos = [p for p in photos if not p.shared]
if shared:
photos = [p for p in photos if p.shared]
elif not_shared:
photos = [p for p in photos if not p.shared]
if uti:
photos = [p for p in photos if uti in p.uti]
if burst:
photos = [p for p in photos if p.burst]
elif not_burst:
photos = [p for p in photos if not p.burst]
if live:
photos = [p for p in photos if p.live_photo]
elif not_live:
photos = [p for p in photos if not p.live_photo]
if cloudasset:
photos = [p for p in photos if p.iscloudasset]
elif not_cloudasset:
photos = [p for p in photos if not p.iscloudasset]
if incloud:
photos = [p for p in photos if p.incloud]
elif not_incloud:
photos = [p for p in photos if not p.incloud]
return photos | 57607818bf35b0eebc864dbb43626ccc69c8c7d7 | 3,628,996 |
def list_products(website, category, search):
"""
There are 3 ways to list the products, 1 by category, 2 by the search bar, 3 by accessing the homepage.
"""
if search:
return Products.objects.filter(websites=website, is_available=True,
title__icontains=search).order_by('position')
if category:
category = get_object_or_404(Categories, websites=website, slug=category)
return Products.objects.filter(categories=category, is_available=True).order_by('position')
return Products.objects.filter(websites=website, is_available=True,
show_on_home=True).order_by('position') | 23fa03aebafeef8ff84077df932a0e07ba2ee4b2 | 3,628,997 |
def get_feature_columns(num_hash_buckets, embedding_dimension):
"""Creates sequential input columns to `RNNEstimator`.
Args:
num_hash_buckets: `int`, number of embedding vectors to use.
embedding_dimension: `int`, size of embedding vectors.
Returns:
List of `tf.feature_column` ojects.
"""
id_col = feature_column.sequence_categorical_column_with_hash_bucket(
constants.TOKENS, num_hash_buckets, dtype=tf.string)
features_columns = [tf.feature_column.embedding_column(
id_col, dimension=embedding_dimension)]
return features_columns | ab4c60333a556839b9835d405a6c39762f1df2a3 | 3,628,998 |
def afsluitmiddel_soort(damo_gdf=None, obj=None):
""""
Zet naam van SOORTAFSLUITMIDDEL om naar attribuutwaarde
"""
data = [_afsluitmiddel_soort(name) for name in damo_gdf['SOORTAFSLUITMIDDEL']]
df = pd.Series(data=data, index=damo_gdf.index)
return df | 337eadf2bbb8b42fcdc3f4060b5a34bfdd5db13d | 3,628,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.