content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def process_addr():
"""Process the bridge IP address/hostname."""
server_addr = request.form.get('server_addr')
session['server_addr'] = server_addr
try:
leap_response = get_ca_cert(server_addr)
session['leap_version'] = leap_response['Body'] \
['PingResponse']['LEAPVersion']
except ConnectionRefusedError:
flash("A connection to %s could not be established. Please check "
"the IP address and try again." % server_addr, 'danger')
return redirect(url_for('wizard'))
| 10,900
|
def get_bkk_list(request):
"""板块课(通识选修课)"""
myconfig = Config.objects.all().first()
year = (myconfig.nChoose)[0:4]
term = (myconfig.nChoose)[4:]
if term == "1":
term = "3"
elif term == "2":
term = "12"
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'bkk':request.POST.get("bkk")
}
res = requests.post(url=myconfig.otherapi+"/choose/bkk",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
bkk = request.POST.get("bkk")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问板块课' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
try:
bkk = "1" if bkk=="2" else "2"
startTime = time.time()
print('【%s】查看了板块课' % stu.name)
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = Xuanke(base_url=base_url, cookies=cookies, year=year, term=term)
bkk_list = person.get_bkk_list(bkk)
endTime = time.time()
spendTime = endTime - startTime
if spendTime > 30:
ServerChan = config["ServerChan"]
text = "板块课超时"
if ServerChan == "none":
return HttpResponse(json.dumps({'err':'板块课超时'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
requests.get(ServerChan + 'text=' + text)
return HttpResponse(json.dumps({'err':'板块课超时'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
content = ('【%s】[%s]访问了板块课,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime))
writeLog(content)
return HttpResponse(json.dumps(bkk_list, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
print(e)
content = ('【%s】[%s]访问板块课出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = Xuanke(base_url=base_url, cookies=sta, year=year, term=term)
bkk_list = person.get_bkk_list(bkk)
return HttpResponse(json.dumps(bkk_list, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
| 10,901
|
def mkmonth(year, month, dates, groups):
"""Make an array of data for the year and month given.
"""
cal = calendar.monthcalendar(int(year), month)
for row in cal:
for index in range(len(row)):
day = row[index]
if day == 0:
row[index] = None
else:
date = '%04.d-%02.d-%02.d' % (year, month, day)
items = dates.get(date, ())
grp = 0
len_items = len(items)
if len_items > 0:
while grp < len(groups):
grp += 1
if len_items <= groups[grp - 1]:
break
row[index] = [day, grp, items, date]
while len(cal) < 6:
cal.append([None] * 7)
return dict(name=calendar.month_name[month], weeks=cal,
startdate='%04.d-%02.d' % (year, month))
| 10,902
|
def test_scoring_card():
"""## Scoring card
Each scoring card will have a victory point, and resources required.
You will need to provide enough resources in order to obtain the
scoring card.
"""
r = ScoringCard("YYYBBB (3)")
assert r.victory_point == 3
assert r.target == Resource("BBBYYY")
assert r.check_enough(Resource("YYBB")) is False
assert r.check_enough(Resource("YYYYBBBBG")) is True
| 10,903
|
def test_is_interface_physical():
# pylint: disable=C0121
"""
Test is_interface_physical
"""
assert is_interface_physical("GigabitEthernet0/0/2") is True
assert is_interface_physical("GigabitEthernet0/0/2.890") is False
assert is_interface_physical("GigabitEthernet0/0/2.1") is False
assert is_interface_physical("Ethernet0.1") is False
assert is_interface_physical("Ethernet1") is True
assert is_interface_physical("Serial0/1/0:15") is True
assert is_interface_physical("Service-Engine0/1/0") is True
assert is_interface_physical("Service-Engine0/1/0.152") is False
assert is_interface_physical("GigabitEthernet0") is True
assert is_interface_physical("ge-0/0/0") is True
assert is_interface_physical("ge-0/0/0.10") is False
assert is_interface_physical("lo0.0") is False
assert is_interface_physical("Loopback1") is False
assert is_interface_physical("Vlan108") is False
assert is_interface_physical("ae0.100") is False
assert is_interface_physical("Management0/0") is True
| 10,904
|
def is_extension(step_str):
"""Return true if step_str is an extension or Any.
Args:
step_str: the string to evaluate
Returns:
True if step_str is an extension
Raises:
ValueError: if step_str is not a valid step.
"""
if not is_valid_step(step_str):
raise ValueError('Not a valid step in a path: "' + step_str + '"')
return step_str[0] == "("
| 10,905
|
def create_file(path: Union[str, Path]) -> None:
"""
Creates an empty file at the given location.
Args:
path (Union[str, Path]): Path where the file should be created.
"""
_path = _get_path(path)
_path.touch()
| 10,906
|
def diff_list(first, second):
"""
Get difference of lists.
"""
second = set(second)
return [item for item in first if item not in second]
| 10,907
|
def run(arguments=None):
"""This function is the main entrypoint for ale. It takes an optional argument which
if passed makes the parser read those arguments instead of argv from the command line.
This function parses the contents of the given config file and passes these options
to the functions which actually do calculations or visualizations.
The argument parser takes a flag which enables
or disables the use of asap on the current run with the flags '--asap' for
enabling it and '--no-asap' to disable it.
Passing this flag is to avoid getting the error 'illegal instruction (core dumped)'
in the terminal since some machines cannot run the current version of ASAP which
is used in this project. """
# Create command line parser with callbacks for the respective subcommands
parser = CreateParser(
default=default,
multi=multi,
simulate=simulate,
analyze=analyze,
visualize=visualize,
)
if arguments:
# parse arguments from the arguments parameter
args = parser.parse_args(arguments.split(" "))
else:
# parse arguments from argv from command line
args = parser.parse_args()
parsed_config_file = parse_config(args.config_file)
options = parsed_config_file
options['use_asap'] = args.use_asap
# Set traj_file_name to that given in command line, or config, or default
if args.traj_file_name:
options['traj_file_name'] = args.traj_file_name
else:
# default to <symbol>.traj
default_traj_file_name = f"{options['symbol']}.traj"
options['traj_file_name'] = default_traj_file_name
print("Given configuration:")
pp.pprint(options)
print()
# Sets the file name of the output file.
if args.out_file_name:
options['out_file_name'] = args.out_file_name
else:
# Defaults to <symbol>.json if not provided in the command line.
default_out_file_name = f"{options['symbol']}_out.json"
options['out_file_name'] = options.get("out_file_name", default_out_file_name)
# Sets the output directory of the output file.
if args.out_dir:
output_dir = args.out_dir
options['out_dir'] = output_dir
else:
# Defaults to current working directory if not present in options
options['out_dir'] = options.get('out_dir', os.getcwd())
# Executes the given subcommand, defaults to calling default
args.sub_command(options, args)
| 10,908
|
def validate_dissolution_statement_type(filing_json, legal_type) -> Optional[list]:
"""Validate dissolution statement type of the filing."""
msg = []
dissolution_stmt_type_path = '/filing/dissolution/dissolutionStatementType'
dissolution_stmt_type = get_str(filing_json, dissolution_stmt_type_path)
if legal_type == Business.LegalTypes.COOP.value:
if not dissolution_stmt_type:
msg.append({'error': _('Dissolution statement type must be provided.'),
'path': dissolution_stmt_type_path})
return msg
if not DissolutionStatementTypes.has_value(dissolution_stmt_type):
msg.append({'error': _('Invalid Dissolution statement type.'),
'path': dissolution_stmt_type_path})
return msg
return None
| 10,909
|
def parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(
usage='%(prog)s [options] <target path> <image> [image] ...')
parser.add_argument(
'-c', '--captions',
dest='captions',
action='store_true',
default=False,
help='read image captions from text files ("<IMAGE_NAME>.txt")')
parser.add_argument(
'--no-resize',
dest='no_resize',
action='store_true',
default=False,
help='do not resize images, just copy them')
parser.add_argument(
'-s', '--size',
dest='max_image_size',
type=parse_dimension_arg,
default=ARGS_DEFAULT_MAX_IMAGE_SIZE,
help='set maximum image size [default: {}]'
.format(ARGS_DEFAULT_MAX_IMAGE_SIZE))
parser.add_argument(
'-t', '--thumbnail-size',
dest='max_thumbnail_size',
type=parse_dimension_arg,
default=ARGS_DEFAULT_MAX_THUMBNAIL_SIZE,
help='set maximum thumbnail size [default: {}]'
.format(ARGS_DEFAULT_MAX_THUMBNAIL_SIZE))
parser.add_argument(
'--title',
dest='title',
help='set gallery title on the website')
parser.add_argument(
'--lightbox',
dest='lightbox',
action='store_true',
default=False,
help='Enable lightbox effect on the website. This disables sub-page creation per picture.')
parser.add_argument(
'--html-only',
dest='html_only',
action='store_true',
default=False,
help='Only generate HTML. Do not process images. This is useful if already processed once but need to re-create html pages.')
parser.add_argument(
'--no-optimize-image',
dest='no_optimize_image',
action='store_true',
default=False,
help='Optimize images to reduce size and remove meta data.')
# First positional argument.
parser.add_argument('destination_path')
# Remaining positional arguments (at least one), as a list.
parser.add_argument('full_image_filenames', nargs='+')
return parser.parse_args()
| 10,910
|
def weave(left: List[int], right: List[int]) -> List[List[int]]:
""" Gives all possible combinations of left and right
keeping the original order on left and right """
if not left or not right:
return [left] if left else [right]
left_result: List[List[int]] = weave_helper(left, right)
right_result: List[List[int]] = weave_helper(right, left)
return left_result + right_result
| 10,911
|
def test_rgb_to_hsl_part_18():
"""Test rgb to hsl color function"""
# assert rgb_to_hsl(0, 0, 0) == (300, 100, 0)
assert rgb_to_hsl(51, 0, 51) == (300, 100, 10)
assert rgb_to_hsl(102, 0, 102) == (300, 100, 20)
assert rgb_to_hsl(153, 0, 153) == (300, 100, 30)
assert rgb_to_hsl(204, 0, 204) == (300, 100, 40)
assert rgb_to_hsl(255, 0, 255) == (300, 100, 50)
assert rgb_to_hsl(255, 51, 255) == (300, 100, 60)
assert rgb_to_hsl(255, 102, 255) == (300, 100, 70)
assert rgb_to_hsl(255, 153, 255) == (300, 100, 80)
assert rgb_to_hsl(255, 204, 255) == (300, 100, 90)
# assert rgb_to_hsl(255, 255, 255) == (300, 100, 100)
| 10,912
|
def compute_rel_attn_value(p_attn, rel_mat, emb, ignore_zero=True):
"""
Compute a part of *attention weight application* and *query-value product*
in generalized RPE.
(See eq. (10) - (11) in the MuseBERT paper.)
Specifically,
- We use distributive law on eq. (11). The function computes the
second term:
$ sum_j (alpha_{ij} * sum_a Emb_a^K(r_{ij}^a)) $
Here,
- b for batch size, h for n_head, vs for vocabulary size.
- dtype is torch.float unless specified.
:param p_attn: (b, d, L_q, L_k)
:param rel_mat: (b, Lq, Lk)
:param emb: (h, vs, d)
:param ignore_zero: bool. Whether to exclude the first vocab.
:return: (b, h, Lq, d)
"""
vs = emb.size(-2)
# bool_relmat: (b, Lq, vs - 1, Lk), dtype: torch.float
bool_relmat = compute_bool_rel_mat(rel_mat, vs, ignore_zero=ignore_zero)
# p_attn: -> (b, d, Lq, 1, 1, Lk)
# bool_relmat: -> (b, 1, L_q, vs - 1, L_k, 1)
# acmlt_p_attn: (b, d, Lq, vs - 1, 1, 1) -> (b, d, Lq, vs - 1)
acmlt_p_attn = \
torch.matmul(p_attn.unsqueeze(-2).unsqueeze(-2),
bool_relmat.unsqueeze(1).unsqueeze(-1)
).squeeze(-1).squeeze(-1)
# acc_p_attn: -> (b, h, Lq, 1, vs - 1)
# emb: -> (1, h, 1, vs, d)
# rel_scores: (b, h, Lq, 1, d) -> (b, h, Lq, d)
start_ind = 1 if ignore_zero else 0
rel_values = \
torch.matmul(acmlt_p_attn.unsqueeze(-2),
emb[:, start_ind:].unsqueeze(0).unsqueeze(-3)
).squeeze(-2)
return rel_values
| 10,913
|
def check_input(args):
"""Checks whether to read from stdin/file and validates user input/options.
"""
# Defaults
option = None
fh = sys.stdin # file handle
if not len(args):
# Reading from pipe with default option
if sys.stdin.isatty():
sys.stderr.write(__doc__)
sys.exit(1)
elif len(args) == 1:
# One of two options: option & Pipe OR file & default option
if args[0].startswith('-'):
option = args[0][1:]
if sys.stdin.isatty(): # ensure the PDB data is streamed in
emsg = 'ERROR!! No data to process!\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
else:
if not os.path.isfile(args[0]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
fh = open(args[0], 'r')
elif len(args) == 2:
# Two options: option & File
if not args[0].startswith('-'):
emsg = 'ERROR! First argument is not an option: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
if not os.path.isfile(args[1]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[1]))
sys.stderr.write(__doc__)
sys.exit(1)
option = args[0][1:]
fh = open(args[1], 'r')
else: # Whatever ...
sys.stderr.write(__doc__)
sys.exit(1)
# Validate option
if option is not None and option != 'multi':
emsg = 'ERROR!! You provided an invalid option: \'{}\'\n'
sys.stderr.write(emsg.format(option))
sys.stderr.write(__doc__)
sys.exit(1)
return (option, fh)
| 10,914
|
def mock_real_galaxy():
"""Mock real galaxy."""
dm = np.loadtxt(TEST_DATA_REAL_PATH / "dark.dat")
s = np.loadtxt(TEST_DATA_REAL_PATH / "star.dat")
g = np.loadtxt(TEST_DATA_REAL_PATH / "gas_.dat")
gal = core.Galaxy(
m_s=s[:, 0] * 1e10 * u.M_sun,
x_s=s[:, 1] * u.kpc,
y_s=s[:, 2] * u.kpc,
z_s=s[:, 3] * u.kpc,
vx_s=s[:, 4] * (u.km / u.s),
vy_s=s[:, 5] * (u.km / u.s),
vz_s=s[:, 6] * (u.km / u.s),
m_dm=dm[:, 0] * 1e10 * u.M_sun,
x_dm=dm[:, 1] * u.kpc,
y_dm=dm[:, 2] * u.kpc,
z_dm=dm[:, 3] * u.kpc,
vx_dm=dm[:, 4] * (u.km / u.s),
vy_dm=dm[:, 5] * (u.km / u.s),
vz_dm=dm[:, 6] * (u.km / u.s),
m_g=g[:, 0] * 1e10 * u.M_sun,
x_g=g[:, 1] * u.kpc,
y_g=g[:, 2] * u.kpc,
z_g=g[:, 3] * u.kpc,
vx_g=g[:, 4] * (u.km / u.s),
vy_g=g[:, 5] * (u.km / u.s),
vz_g=g[:, 6] * (u.km / u.s),
)
return gal
| 10,915
|
def lend(request):
"""
Lend view.
It receives the data from the lend form, process and validates it,
and reloads the page if everything is OK
Args:
- request (HttpRequest): the request
Returns:
"""
logged_user = get_logged_user(request)
if logged_user is not None and logged_user.user_role == UserRole.LENDER:
d = dict(request.POST)
d['lender_input'] = logged_user.id
errors = Loan.objects.basic_validator(d)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
else:
borrower = request.POST.get('borrower_input', 0)
amount = request.POST.get('amount_input', 0)
new_loan = Loan.objects.create(
borrower=User.objects.get(id=borrower),
lender=logged_user,
amount=int(amount)
)
messages.info(request, 'Loan executed successfully')
return redirect('lender', id=logged_user.id)
else:
request.session.clear()
return redirect('/')
| 10,916
|
def test_all_current_members():
"""all_current_members works as expected
"""
r = niaopendata.all_current_members()
_check_valid_list_response(r)
| 10,917
|
def main():
""" Spit out a string regardless of input """
args = getArguments().parse_args()
if len(sys.argv) == 2:
stronk.generate_random_keys(
args.keyAmount, args.keyLength)
elif len(sys.argv) >= 3:
stronk.generate_random_keys(
args.keyAmount, args.keyLength, args.identify)
else:
stronk.generate_random_keys()
| 10,918
|
def deprecated() -> None:
"""Run the command and print a deprecated notice."""
LOG.warning("c2cwsgiutils_coverage_report.py is deprecated; use c2cwsgiutils-coverage-report instead")
return main()
| 10,919
|
def build_syscall_Linux(syscall, arg_list, arch_bits, constraint=None, assertion = None, clmax=SYSCALL_LMAX, optimizeLen=False):
"""
arch_bits = 32 or 64 :)
"""
# Check args
if( syscall.nb_args() != len(arg_list)):
error("Error. Expected {} arguments, got {}".format(len(syscall.arg_types), len(arg_list)))
return None
# Check args length
for i in range(0,len(arg_list)):
if( not verifyArgType(arg_list[i], syscall.arg_types[i])):
error("Argument error for '{}': expected '{}', got '{}'".format(arg_list[i], syscall.arg_types[i], type(arg_list[i])))
return None
# Check constraint and assertion
if( constraint is None ):
constraint = Constraint()
if( assertion is None ):
assertion = getBaseAssertion()
# Check if we have the function !
verbose("Trying to call {}() function directly".format(syscall.def_name))
func_call = build_call(syscall.function(), arg_list, constraint, assertion, clmax=clmax, optimizeLen=optimizeLen)
if( not isinstance(func_call, str) ):
verbose("Success")
return func_call
else:
if( not constraint.chainable.ret ):
verbose("Coudn't call {}(), try direct syscall".format(syscall.def_name))
else:
verbose("Couldn't call {}() and return to ROPChain".format(syscall.def_name))
return None
# Otherwise do syscall directly
# Set the registers
args = [(Arch.n2r(x[0]), x[1]) for x in zip(syscall.arg_regs, arg_list) + syscall.syscall_arg_regs]
chain = popMultiple(args, constraint, assertion, clmax-1, optimizeLen=optimizeLen)
if( not chain ):
verbose("Failed to set registers for the mprotect syscall")
return None
# Int 0x80
if( arch_bits == 32 ):
syscall_gadgets = search(QueryType.INT80, None, None, constraint, assertion)
# syscall
elif( arch_bits == 64):
syscall_gadgets = search(QueryType.SYSCALL, None, None, constraint, assertion)
if( not syscall_gadgets ):
verbose("Failed to find an 'int 0x80' OR 'syscall' gadget")
return None
else:
chain.addChain(syscall_gadgets[0])
verbose("Success")
return chain
| 10,920
|
def gamma(x):
"""Diffusion error (normalized)"""
CFL = x[0]
kh = x[1]
return (
1.
/ (-2)
* (
4. * CFL ** 2 / 3
- 7. * CFL / 3
+ (-23. * CFL ** 2 / 12 + 35 * CFL / 12) * np.cos(kh)
+ (2. * CFL ** 2 / 3 - 2 * CFL / 3) * np.cos(2 * kh)
+ (-CFL ** 2 / 12 + CFL / 12) * np.cos(3 * kh)
)
)
| 10,921
|
def copylabel(original_name):
"""create names/labels with the sequence (Copy), (Copy 2), (Copy 3), etc."""
copylabel = pgettext_lazy("this is a copy", "Copy")
copy_re = f"\\({copylabel}( [0-9]*)?\\)"
match = re.search(copy_re, original_name)
if match is None:
label = f"{original_name} ({copylabel})"
elif match.groups()[0] is None:
label = re.sub(copy_re, f"({copylabel} 2)", original_name)
else:
n = int(match.groups()[0].strip()) + 1
label = re.sub(copy_re, f"({copylabel} {n})", original_name)
return label
| 10,922
|
def load_opts_from_mrjob_confs(runner_alias, conf_paths=None):
"""Load a list of dictionaries representing the options in a given
list of mrjob config files for a specific runner. Returns
``[(path, values), ...]``. If a path is not found, use ``(None, {})`` as
its value.
If *conf_paths* is ``None``, look for a config file in the default
locations (see :py:func:`find_mrjob_conf`).
:type runner_alias: str
:param runner_alias: String identifier of the runner type, e.g. ``emr``,
``local``, etc.
:type conf_paths: list or ``None``
:param conf_path: locations of the files to load
This will only load each config file once, even if it's referenced
from multiple paths due to symlinks.
"""
if conf_paths is None:
results = load_opts_from_mrjob_conf(runner_alias)
else:
# don't include conf files that were loaded earlier in conf_paths
already_loaded = []
# load configs in reversed order so that order of conf paths takes
# precedence over inheritance
results = []
for path in reversed(conf_paths):
results = load_opts_from_mrjob_conf(
runner_alias, path, already_loaded=already_loaded) + results
if runner_alias and not any(conf for path, conf in results):
log.warning('No configs specified for %s runner' % runner_alias)
return results
| 10,923
|
def clut8_rgb888(i):
"""Reference CLUT for wasp-os.
Technically speaking this is not a CLUT because the we lookup the colours
algorithmically to avoid the cost of a genuine CLUT. The palette is
designed to be fairly easy to generate algorithmically.
The palette includes all 216 web-safe colours together 4 grays and
36 additional colours that target "gaps" at the brighter end of the web
safe set. There are 11 greys (plus black and white) although two are
fairly close together.
:param int i: Index (from 0..255 inclusive) into the CLUT
:return: 24-bit colour in RGB888 format
"""
if i < 216:
rgb888 = ( i % 6) * 0x33
rg = i // 6
rgb888 += (rg % 6) * 0x3300
rgb888 += (rg // 6) * 0x330000
elif i < 252:
i -= 216
rgb888 = 0x7f + (( i % 3) * 0x33)
rg = i // 3
rgb888 += 0x4c00 + ((rg % 4) * 0x3300)
rgb888 += 0x7f0000 + ((rg // 4) * 0x330000)
else:
i -= 252
rgb888 = 0x2c2c2c + (0x101010 * i)
return rgb888
| 10,924
|
def test_retry_change(settings):
""" Attempts to change retry property """
with pytest.raises(DeadlinksSettingsChange):
settings.retry = 8
with pytest.raises(AttributeError):
del settings.retry
| 10,925
|
def test_wf_st_6(plugin):
""" workflow with two tasks, outer splitter and combiner for the workflow"""
wf = Workflow(name="wf_st_6", input_spec=["x", "y"])
wf.add(multiply(name="mult", x=wf.lzin.x, y=wf.lzin.y))
wf.add(add2(name="add2", x=wf.mult.lzout.out))
wf.split(["x", "y"], x=[1, 2, 3], y=[11, 12])
wf.combine("x")
wf.set_output([("out", wf.add2.lzout.out)])
wf.plugin = plugin
with Submitter(plugin=plugin) as sub:
sub(wf)
results = wf.result()
assert results[0][0].output.out == 13
assert results[0][1].output.out == 24
assert results[0][2].output.out == 35
assert results[1][0].output.out == 14
assert results[1][1].output.out == 26
assert results[1][2].output.out == 38
# checking all directories
assert wf.output_dir
for odir in wf.output_dir:
assert odir.exists()
| 10,926
|
def assert_table_headers(id_or_elem, headers):
"""Assert the headers of a table.
The headers are the `<th>` tags.
:argument id_or_elem: The identifier of the element, or its element object.
:argument headers: A sequence of the expected headers.
:raise: AssertionError if the element doesn't exist, or if its headers are
not the expected.
"""
logger.debug('Checking headers for %r' % (id_or_elem,))
elem = _get_elem(id_or_elem)
if not elem.tag_name == 'table':
_raise('Element %r is not a table.' % (id_or_elem,))
header_elems = elem.find_elements_by_tag_name('th')
header_text = [get_text(e) for e in header_elems]
if not header_text == headers:
msg = ('Expected headers:%r. Actual headers%r\n' %
(headers, header_text))
_raise(msg)
| 10,927
|
def get_file_from_rcsb(pdb_id,data_type='pdb'):
""" (file_name) -> file_path
fetch pdb or structure factor file for pdb_id from the RCSB website
Args:
file_name: a pdb file name
data_type (str):
'pdb' -> pdb
'xray' -> structure factor
Returns:
a file path for the pdb file_name
"""
try:
file_name = fetch.get_pdb(pdb_id,data_type,mirror='rcsb',log=null_out())
except Sorry:
file_name = ''
return file_name
| 10,928
|
def write_conll_prediction_file(
out_file: str,
examples: List[Example],
y_preds: List[TAG_SEQUENCE]) -> None:
"""Writes a text output with predictions for a collection of Examples in
CoNLL evaluation format, one token per line:
TOKEN GOLD-TAG PRED-TAG
Distinct example outputs are separated by a blank line.
Args:
out_file: the path of the output CoNLL prediction file.
examples: list of Example instances with associated tokens and gold
tag labels.
y_preds: list of predicted tag sequences for each example.
Raises:
AssertionError: if (a) the lengths of y_preds and examples are not
equal, or (b) there is a mismatch in length of tokens, labels or
predicted tags for any example.
"""
assert len(y_preds) == len(examples)
with smart_open(out_file) as fd:
for example, pred_tag in zip(examples, y_preds):
tokens = example.doc_tokens
labels = example.labels
assert len(tokens) == len(labels)
assert len(labels) == len(pred_tag)
for token, label, pred in zip(tokens, labels, pred_tag):
fd.write('{} {} {}\n'.format(str(token.text), label, pred))
# Separate examples by line break
fd.write('\n')
| 10,929
|
def parse_events(fobj):
"""Parse a trace-events file into {event_num: (name, arg1, ...)}."""
def get_argnames(args):
"""Extract argument names from a parameter list."""
return tuple(arg.split()[-1].lstrip('*') for arg in args.split(','))
events = {dropped_event_id: ('dropped', 'count')}
event_num = 0
for line in fobj:
m = event_re.match(line.strip())
if m is None:
continue
disable, name, args = m.groups()
events[event_num] = (name,) + get_argnames(args)
event_num += 1
return events
| 10,930
|
def enu2ECEF(phi, lam, x, y, z, t=0.0):
""" Convert ENU local coordinates (East, North, Up) to Earth centered - Earth fixed (ECEF) Cartesian,
correcting for Earth rotation if needed.
ENU coordinates can be transformed to ECEF by two rotations:
1. A clockwise rotation over east-axis by an angle (90 - phi) to align the up-axis with the z-axis.
2. A clockwise rotation over the z-axis by and angle (90 + lam) to align the east-axis with the x-axis.
Source: http://www.navipedia.net/index.php/Transformations_between_ECEF_and_ENU_coordinates
Arguments:
phi: [float] east-axis rotation angle
lam: [float] z-axis rotation angle
x: [float] ENU x coordinate
y: [float] ENU y coordinate
z: [float] ENU z coordinate
Keyword arguments:
t: [float] time in seconds, 0 by default
Return:
(x_ecef, y_ecef, z_ecef): [tuple of floats] ECEF coordinates
"""
# Calculate ECEF coordinate from given local coordinates
x_ecef = -np.sin(lam)*x - np.sin(phi)*np.cos(lam)*y + np.cos(phi)*np.cos(lam)*z
y_ecef = np.cos(lam)*x - np.sin(phi)*np.sin(lam)*y + np.cos(phi)*np.sin(lam)*z
z_ecef = np.cos(phi) *y + np.sin(phi) *z
# Calculate time correction (in radians)
tau = 2*np.pi/(23.0*3600.0 + 56.0*60.0 + 4.09054) # Earth rotation in rad/s
yaw = -tau*t
x_temp = x_ecef
y_temp = y_ecef
# Apply time correction
x_ecef = np.cos(yaw)*x_temp + np.sin(yaw)*y_temp
y_ecef = -np.sin(yaw)*x_temp + np.cos(yaw)*y_temp
return x_ecef, y_ecef, z_ecef
| 10,931
|
def _load_container_by_name(container_name, version=None):
""" Try and find a container in a variety of methods.
Returns the container or raises a KeyError if it could not be found
"""
for meth in (database.load_container, # From the labware database
_load_weird_container): # honestly don't know
log.debug(
f"Trying to load container {container_name} via {meth.__name__}")
try:
container = meth(container_name)
if meth == _load_weird_container:
container.properties['type'] = container_name
log.info(f"Loaded {container_name} from {meth.__name__}")
break
except (ValueError, KeyError) as e:
log.debug(f"{container_name} not in {meth.__name__} ({repr(e)})")
else:
log.debug(
f"Trying to load container {container_name} version {version}"
f"from v2 labware store")
container = load_new_labware(container_name, version=version)
return container
| 10,932
|
def get_login(discord_id):
"""Get login info for a specific user."""
discord_id_str = str(discord_id)
logins = get_all_logins()
if discord_id_str in logins:
return logins[discord_id_str]
return None
| 10,933
|
def labels(repo):
"""Setup the RadiaSoft labels for ``repo``.
Will add "radiasoft/" to the name if it is missing.
Args:
repo (str): will add https://github.com/radiasoft if missing
"""
r = _repo_arg(repo)
for x in ('inprogress', 'c5def5'), ('1', 'b60205'), ('2', 'fbca04'):
try:
r.create_label(*x)
except github3.exceptions.UnprocessableEntity:
# 422 Validation Failed: happens because already exists
pass
| 10,934
|
def to_json_dict(json_data):
"""Given a dictionary or JSON string; return a dictionary.
:param json_data: json_data(dict, str): Input JSON object.
:return: A Python dictionary/OrderedDict with the contents of the JSON object.
:raises TypeError: If the input object is not a dictionary or string.
"""
if isinstance(json_data, dict):
return json_data
elif isinstance(json_data, str):
return json.loads(json_data, object_hook=OrderedDict)
else:
raise TypeError(f"'json_data' must be a dict or valid JSON string; received: {json_data!r}")
| 10,935
|
def get_keypoints():
"""Get the COCO keypoints and their left/right flip coorespondence map."""
# Keypoints are not available in the COCO json for the test split, so we
# provide them here.
keypoints = [
'nose',
'neck',
'right_shoulder',
'right_elbow',
'right_wrist',
'left_shoulder',
'left_elbow',
'left_wrist',
'right_hip',
'right_knee',
'right_ankle',
'left_hip',
'left_knee',
'left_ankle',
'right_eye',
'left_eye',
'right_ear',
'left_ear']
return keypoints
| 10,936
|
def has_mtu_mismatch(iface: CoreInterface) -> bool:
"""
Helper to detect MTU mismatch and add the appropriate OSPF
mtu-ignore command. This is needed when e.g. a node is linked via a
GreTap device.
"""
if iface.mtu != DEFAULT_MTU:
return True
if not iface.net:
return False
for iface in iface.net.get_ifaces():
if iface.mtu != iface.mtu:
return True
return False
| 10,937
|
def phrase_boxes_alignment(flatten_boxes, ori_phrases_boxes):
""" align the bounding boxes with corresponding phrases. """
phrases_boxes = list()
ori_pb_boxes_count = list()
for ph_boxes in ori_phrases_boxes:
ori_pb_boxes_count.append(len(ph_boxes))
strat_point = 0
for pb_boxes_num in ori_pb_boxes_count:
sub_boxes = list()
for i in range(strat_point, strat_point + pb_boxes_num):
sub_boxes.append(flatten_boxes[i])
strat_point += pb_boxes_num
phrases_boxes.append(sub_boxes)
pb_boxes_count = list()
for ph_boxes in phrases_boxes:
pb_boxes_count.append(len(ph_boxes))
assert pb_boxes_count == ori_pb_boxes_count
return phrases_boxes
| 10,938
|
def editor(initial_contents=None, filename=None, editor=None):
"""
Open a text editor, user edits, return results
ARGUMENTS
initial_contents
If not None, this string is written to the file before the editor
is opened.
filename
If not None, the name of the file to edit. If None, a temporary file
is used.
editor
The path to an editor to call. If None, use editor.default_editor()
"""
editor = editor or default_editor()
if not filename:
with tempfile.NamedTemporaryFile(mode='r+', suffix='.txt') as fp:
if initial_contents is not None:
fp.write(initial_contents)
fp.flush()
subprocess.call([editor, fp.name])
fp.seek(0)
return fp.read()
path = Path(filename)
if initial_contents is not None:
path.write_text(initial_contents)
subprocess.call([editor, filename])
return path.read_text()
| 10,939
|
def dismiss_notification(request):
""" Dismisses a notification
### Response
* Status code 200 (When the notification is successsfully dismissed)
{
"success": <boolean: true>
}
* `success` - Whether the dismissal request succeeded or not
* Status code 400 (When the notification ID cannot be found)
{
"success": <boolean: false>,
"message": <string: "notification_not_found">
}
* `message` - Error message, when success is false
"""
response = {'success': False}
data = request.data
try:
notif = Notification.objects.get(id=data['notificationId'])
notif.dismissed_by.add(request.user)
response['success'] = True
resp_status = status.HTTP_200_OK
except Notification.DoesNotExist:
resp_status = status.HTTP_400_BAD_REQUEST
response['message'] = 'notification_not_found'
return Response(response, status=resp_status)
| 10,940
|
def remove_objects(*, objects: tuple = ()) -> None:
"""
Removes files or folders.
>>> from snakypy.helpers.os import remove_objects
>>> remove_objects(objects=("/tmp/folder", "/tmp/file.txt"))
Args:
objects (tuple): It must receive the path of the object, folders or files.
Returns:
None
"""
with suppress(FileNotFoundError):
for item in objects:
rmtree(item, ignore_errors=True) if isdir(item) else remove(item)
| 10,941
|
def writeTreeStructure(skillList, path: str, **args) -> None:
"""Output a file with a tree structure definition."""
data = [{
'define': 'tree',
'tree': skillList.buildTree()
}]
dumpJsonFile(data, path, **args)
| 10,942
|
def switchToCameraCenter(cameraName, editor):
"""
Additional wrapper layer around switchToCamera. This function switches
to the current camera and also toggles the view mode to be 'center'
"""
pass
| 10,943
|
def log_at_level(logger, message_level, verbose_level, msg):
"""
writes to log if message_level > verbose level
Returns anything written in case we might want to drop down and output at a
lower log level
"""
if message_level <= verbose_level:
logger.info(msg)
return True
return False
| 10,944
|
def datafile(tmp_path_factory):
"""Make a temp HDF5 Ocat details file within 60 arcmin of 3c273 for obsids
before 2021-Nov that persists for the testing session."""
datafile = str(tmp_path_factory.mktemp('ocat') / 'target_table.h5')
update_ocat_local(datafile, target_name='3c273', resolve_name=True, radius=60,
startDate=DATE_RANGE)
return datafile
| 10,945
|
def _collect_scalars(values):
"""Given a list containing scalars (float or int) collect scalars
into a single prefactor. Input list is modified."""
prefactor = 1.0
for i in range(len(values)-1, -1, -1):
if isinstance(values[i], (int, float)):
prefactor *= values.pop(i)
return prefactor
| 10,946
|
def register_handler(pid):
"""
Registers the given Elixir process as the handler
for this library.
This function must be called first - and the client
module must define a function with the same prototype which
simply calls this function.
"""
global _msg_handling_pid
_msg_handling_pid = pid
| 10,947
|
def reduce(p):
"""
This recipe does a quick reduction of GMOS nod and shuffle data.
The data is left in its 2D form, and only a sky correction is done.
The seeing from the spectra cross-section is measured when possible.
Parameters
----------
p : PrimitivesBASE object
A primitive set matching the recipe_tags.
"""
p.prepare()
p.addDQ(static_bpm=None)
p.addVAR(read_noise=True)
p.overscanCorrect()
p.biasCorrect()
p.ADUToElectrons()
p.addVAR(poisson_noise=True)
p.findAcquisitionSlits()
p.skyCorrectNodAndShuffle()
p.measureIQ(display=True)
p.writeOutputs()
return
| 10,948
|
def create_output_directory(validated_cfg: ValidatedConfig) -> Path:
"""
Creates a top level download directory if it does not already exist, and returns
the Path to the download directory.
"""
download_path = validated_cfg.output_directory / f"{validated_cfg.version}"
download_path.mkdir(parents=True, exist_ok=True)
return download_path
| 10,949
|
def presentation_logistique(regression,sig=False):
"""
Mise en forme des résultats de régression logistique
Paramètres
----------
regression: modèle de régression de statsmodel
sig: optionnel, booléen
Retours
-------
DataFrame : tableau de la régression logistique
"""
# Passage des coefficients aux Odds Ratio
df = np.exp(regression.conf_int())
df['odd ratio'] = round(np.exp(regression.params), 2)
df["p-value"] = round(regression.pvalues, 3)
df["IC"] = df.apply(lambda x : "%.2f [%.2f-%.2f]" \
% (x["odd ratio"],x[0],x[1]),axis=1)
# Ajout de la significativité
if sig:
df["p-value"] = df["p-value"].apply(significativite)
df = df.drop([0,1], axis=1)
return df
| 10,950
|
def __check_count_freq(count_freq, flink_window_type):
"""
校验窗口配置项:统计频率
:param count_freq: 统计频率
:param flink_window_type: 窗口类型
"""
# 校验统计频率(单位:s)
if flink_window_type in [TUMBLING, SLIDING, ACCUMULATE] and count_freq not in [
30,
60,
180,
300,
600,
]:
raise StreamWindowConfigCheckError(
_("窗口类型[%s] 属性[%s] 目前只支持 %s") % ("scroll、slide、accumulate", "count_freq", "[30, 60, 180, 300, 600]")
)
| 10,951
|
def test_L1DecayRegularizer():
"""
test L1DecayRegularizer
:return:
"""
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(
main_program=main_program, startup_program=startup_program):
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
initializer = fluid.initializer.Constant(value=0.5)
param_attrs = fluid.ParamAttr(initializer=initializer)
y_predict = fluid.layers.fc(name="fc",
input=data,
size=10,
param_attr=param_attrs)
loss = fluid.layers.cross_entropy(input=y_predict, label=label)
avg_loss = fluid.layers.mean(loss)
optimizer = fluid.optimizer.Adagrad(
learning_rate=0.1,
regularization=fluid.regularizer.L1DecayRegularizer(
regularization_coeff=0.1))
optimizer.minimize(avg_loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
compiled_prog = fluid.compiler.CompiledProgram(main_program)
x = np.ones(shape=(10, 1)).astype('float32')
y = np.zeros(shape=(10, 1)).astype('int64')
for i in range(10):
res = exe.run(compiled_prog,
feed={"X": x,
"label": y},
fetch_list=[avg_loss])[0][0]
res1 = res
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(
main_program=main_program, startup_program=startup_program):
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
initializer = fluid.initializer.Constant(value=0.5)
param_attrs = fluid.ParamAttr(initializer=initializer)
y_predict = fluid.layers.fc(name="fc",
input=data,
size=10,
param_attr=param_attrs)
loss = fluid.layers.cross_entropy(input=y_predict, label=label)
avg_loss = fluid.layers.mean(loss)
optimizer = fluid.optimizer.Adagrad(learning_rate=0.1, )
optimizer.minimize(avg_loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
compiled_prog = fluid.compiler.CompiledProgram(main_program)
x = np.ones(shape=(10, 1)).astype('float32')
y = np.zeros(shape=(10, 1)).astype('int64')
for i in range(10):
res = exe.run(compiled_prog,
feed={"X": x,
"label": y},
fetch_list=[avg_loss])[0][0]
res2 = res
print(res1)
print(res2)
tools.compare(res1, -0.20955022, delta=1e-3)
tools.compare(res2, -0.2250646, delta=1e-3)
| 10,952
|
def handle_colname_collisions(df: pd.DataFrame, mapper: dict, protected_cols: list) -> (pd.DataFrame, dict, dict):
"""
Description
-----------
Identify mapper columns that match protected column names. When found,
update the mapper and dataframe, and keep a dict of these changes
to return to the caller e.g. SpaceTag.
Parameters
----------
df: pd.DataFrame
submitted data
mapper: dict
a dictionary for the schema mapping (JSON) for the dataframe.
protected_cols: list
protected column names i.e. timestamp, country, admin1, feature, etc.
Output
------
pd.DataFame:
The modified dataframe.
dict:
The modified mapper.
dict:
key: new column name e.g. "day1month1year1" or "country_non_primary"
value: list of old column names e.g. ['day1','month1','year1'] or ['country']
"""
# Get names of geo fields that collide and are not primary_geo = True
non_primary_geo_cols = [d["name"] for d in mapper["geo"] if d["name"] in protected_cols and ("primary_geo" not in d or d["primary_geo"] == False)]
# Get names of date fields that collide and are not primary_date = True
non_primary_time_cols = [d['name'] for d in mapper['date'] if d["name"] in protected_cols and ('primary_date' not in d or d['primary_date'] == False)]
# Only need to change a feature column name if it qualifies another field,
# and therefore will be appended as a column to the output.
feature_cols = [d["name"] for d in mapper['feature'] if d["name"] in protected_cols and "qualifies" in d and d["qualifies"]]
# Verbose build of the collision_list, could have combined above.
collision_list = non_primary_geo_cols + non_primary_time_cols + feature_cols
# Bail if no column name collisions.
if not collision_list:
return df, mapper, {}
# Append any collision columns with the following suffix.
suffix = "_non_primary"
# Build output dictionary and update df.
renamed_col_dict = {}
for col in collision_list:
df.rename(columns={col: col + suffix}, inplace=True)
renamed_col_dict[col + suffix] = [col]
# Update mapper
for k, vlist in mapper.items():
for dct in vlist:
if dct["name"] in collision_list:
dct["name"] = dct["name"] + suffix
elif "qualifies" in dct and dct["qualifies"]:
# change any instances of this column name qualified by another field
dct["qualifies"] = [w.replace(w, w + suffix) if w in collision_list else w for w in dct["qualifies"] ]
elif "associated_columns" in dct and dct["associated_columns"]:
# change any instances of this column name in an associated_columns dict
dct["associated_columns"] = {k: v.replace(v, v + suffix) if v in collision_list else v for k, v in dct["associated_columns"].items() }
return df, mapper, renamed_col_dict
| 10,953
|
def process_file(filename):
"""Read a file from disk and parse it into a structured dict."""
try:
with codecs.open(filename, encoding='utf-8', mode='r') as f:
file_contents = f.read()
except IOError as e:
log.info('Unable to index file: %s, error :%s', filename, e)
return
data = json.loads(file_contents)
sections = []
title = ''
body_content = ''
if 'current_page_name' in data:
path = data['current_page_name']
else:
log.info('Unable to index file due to no name %s', filename)
return None
if 'body' in data and data['body']:
body = PyQuery(data['body'])
body_content = body.text().replace(u'¶', '')
sections.extend(generate_sections_from_pyquery(body))
else:
log.info('Unable to index content for: %s', filename)
if 'title' in data:
title = data['title']
if title.startswith('<'):
title = PyQuery(data['title']).text()
else:
log.info('Unable to index title for: %s', filename)
return {'headers': process_headers(data, filename),
'content': body_content, 'path': path,
'title': title, 'sections': sections}
| 10,954
|
def revive(grid: Grid, coord: Point) -> Grid:
"""Generates a set of all cells which can be revived near coord"""
revives = set()
for offset in NEIGHBOR_OFFSETS:
possible_revive = addpos(coord, offset)
if possible_revive in grid: continue
active_count = live_around(grid, possible_revive)
if active_count == 3:
revives.add(possible_revive)
return revives
| 10,955
|
def test_icons() -> None:
"""Test `icons` command."""
run(
COMMAND_LINES["icons"],
b"INFO Icons are written to out/icons_by_name and out/icons_by_id.\n"
b"INFO Icon grid is written to out/icon_grid.svg.\n"
b"INFO Icon grid is written to doc/grid.svg.\n",
)
assert (Path("out") / "icon_grid.svg").is_file()
assert (Path("out") / "icons_by_name").is_dir()
assert (Path("out") / "icons_by_id").is_dir()
assert (Path("out") / "icons_by_name" / "Röntgen apple.svg").is_file()
assert (Path("out") / "icons_by_id" / "apple.svg").is_file()
| 10,956
|
def test_verify_returns_permissions_on_active_collections_only(token, testapp):
"""Get user details and token expiry."""
inactive_collection = CollectionFactory(is_active=False)
permission1 = PermissionFactory(user=token.user, collection=inactive_collection,
registrant=True, cataloger=False)
permission1.save()
active_collection = CollectionFactory(is_active=True)
permission2 = PermissionFactory(user=token.user, collection=active_collection,
registrant=False, cataloger=True)
permission2.save()
res = testapp.get(url_for('oauth.verify'),
headers={'Authorization': str('Bearer ' + token.access_token)})
assert len(res.json_body['user']['permissions']) == 1
returned_permission = res.json_body['user']['permissions'][0]
assert returned_permission['code'] == active_collection.code
assert returned_permission['registrant'] == permission2.registrant
assert returned_permission['cataloger'] == permission2.cataloger
assert returned_permission['friendly_name'] == active_collection.friendly_name
| 10,957
|
def extract_dated_images(filename, output, start_time=None, end_time=None, interval=None, ocr=False):
"""
Read a video, check metadata to understand real time and then extract images into dated files
"""
if start_time:
vi = ManualTimes(filename, real_start_time=start_time, real_interval=interval, real_end_time=end_time)
else:
vi = VideoInfo(filename)
the_call = ["ffmpeg", "-hide_banner", "-loglevel", "verbose", "-y"] # -y : overwrite
the_call.extend(["-i", filename])
# frame_pts is new and unavailable - use real_timestamps instead of:
# the_call.extend(['-frame_pts', 'true'])
the_call.extend(['-qscale:v', '2']) # jpeg quality: 2-5 is good : https://stackoverflow.com/questions/10225403/how-can-i-extract-a-good-quality-jpeg-image-from-an-h264-video-file-with-ffmpeg
the_call.extend(['%06d.jpg'])
run_and_capture(the_call) # throw on fail
rx = re.compile(r'\d\d\d\d\d\d\.jpg') # glob can't match this properly
image_filenames = [f for f in Path(".").glob("*.jpg") if rx.match(str(f)) is not None]
last_ts = vi.real_start
try:
for f in sorted(image_filenames):
if ocr:
im = Image.open(f)
im_iv = ImageOps.grayscale(im)
im_iv = ImageOps.invert(im_iv)
im_iv = im_iv.crop((50, im.height - 100, 300, im.height))
im_iv.save("invert.jpg")
text = image_to_string(im_iv, config="digits")
text = image_to_string(im_iv, lang='eng', config="-c tessedit_char_whitelist=0123456789 -oem 0")
ts = str2dt(text, throw=False) or (last_ts + interval)
LOGGER.debug(f"file: {f} text:{text} ts:{ts}")
raise NotImplementedError("tesseract cannot see digits")
else:
ts = vi.real_timestamps[int(f.stem) - 1]
day_dir = Path(output) / Path(dt2str(ts.date()))
day_dir.mkdir(exist_ok=True)
new_filename = dt2str(ts) + f.suffix
new_path = day_dir / new_filename
LOGGER.debug(f"file: {f} ts:{ts} new:{new_path}")
shutil.move(str(f), str(new_path))
last_ts = ts
except KeyError as exc:
KeyError(f"{exc}: cannot find metadata in {filename}?")
| 10,958
|
def process_table_creation_surplus(region, exchanges_list):
"""Add docstring."""
ar = dict()
ar["@type"] = "Process"
ar["allocationFactors"] = ""
ar["defaultAllocationMethod"] = ""
ar["exchanges"] = exchanges_list
ar["location"] = location(region)
ar["parameters"] = ""
ar["processDocumentation"] = process_doc_creation()
ar["processType"] = "UNIT_PROCESS"
ar["name"] = surplus_pool_name + " - " + region
ar[
"category"
] = "22: Utilities/2211: Electric Power Generation, Transmission and Distribution"
ar["description"] = "Electricity surplus in the " + str(region) + " region."
ar["description"]=(ar["description"]
+ " This process was created with ElectricityLCI "
+ "(https://github.com/USEPA/ElectricityLCI) version " + elci_version
+ " using the " + model_specs.model_name + " configuration."
)
ar["version"] = make_valid_version_num(elci_version)
return ar
| 10,959
|
def upload_event():
"""
Expect a well formatted event data packet (list of events)
Verify the access token in the request
Verify the packet
if verified then send to event hub
else return a failure message
"""
# authenticate the access token with okta api call
# get user id from okta and make sure the ids match
auth_token = request.headers['Authorization']
auth_headers = {"Authorization": "{}".format(auth_token)}
print(auth_headers)
print("sending request to: {}".format(IDENTITY_SERVER_SETTINGS['PERSONICLE_AUTH_API_ENDPOINT']))
auth_response = requests.get(IDENTITY_SERVER_SETTINGS['PERSONICLE_AUTH_API_ENDPOINT'], headers=auth_headers)
print(auth_response.text, auth_response.status_code)
if auth_response.status_code != requests.codes.ok or json.loads(auth_response.text).get("message", False)== False:
return Response("Unauthorised access token", 401)
try:
user_id = json.loads(auth_response.text)['user_id']
except KeyError as e:
return Response("Incorrect response from auth server", 401)
content_type = request.headers.get('Content-Type')
if (content_type == 'application/json'):
event_data_packet = request.json
else:
return Response('Content-Type not supported!', 415)
if type(event_data_packet) != type([]):
return Response("Array of events expected", 422)
# verify the event packet by making the data dictionary api call
send_records = []
send_summary = {}
for event in event_data_packet:
data_dict_params = {"data_type": "event"}
data_dict_response = requests.post(DATA_DICTIONARY_SERVER_SETTINGS['HOST_URL']+"/validate-data-packet",
json=event, params=data_dict_params)
print(data_dict_response.text)
if data_dict_response.status_code == requests.codes.ok and json.loads(data_dict_response.text).get("schema_check", False):
if user_id == event.get("individual_id", ""):
send_summary[event['event_name']] = send_summary.get(event['event_name'], 0) + 1
send_records.append(event)
else:
send_summary['incorrect_user_id'] = send_summary.get('incorrect_user_id', 0) + 1
else:
send_summary['incorrectly_formatted_events'] = send_summary.get('incorrectly_formatted_events', 0) + 1
# send the data to azure event hub
schema_file = os.path.join(AVRO_SCHEMA_LOC, "event_schema.avsc")
if len(send_records)> 0:
send_records_azure.send_records_to_eventhub(schema_file, send_records, EVENTHUB_CONFIG['EVENTHUB_NAME'])
return jsonify({"message": "Sent {} records to database".format(len(send_records)),
"summary": send_summary
})
| 10,960
|
def makepath_coupled(model_hybrid,T,h,ode_method,sample_rate):
""" Compute paths of coupled exact-hybrid model using CHV ode_method. """
voxel = 0
# make copy of model with exact dynamics
model_exact = copy.deepcopy(model_hybrid)
for e in model_exact.events:
e.hybridType = SLOW
# setup integrator
path = np.zeros((Nt,2*model_hybrid.dimension))
path[0][0:model_hybrid.dimension] = model_hybrid.getstate(0)
path[0][model_hybrid.dimension:2*model_hybrid.dimension] = model_exact.getstate(0)
clock = np.zeros(Nt)
k = 0
tj = ode(chvrhs_coupled).set_integrator(ode_method,atol = h,rtol = h)
tj.set_f_params(model_hybrid,model_exact,sample_rate)
y0 = np.zeros(2*model_hybrid.dimension+1)
while (k+1<Nt) and (clock[k]<T):
k = k+1
s1 = tryexponential(1)
# solve
y0[0:model_hybrid.dimension] = model_hybrid.getstate(0)
y0[model_hybrid.dimension:2*model_hybrid.dimension] = model_exact.getstate(0)
y0[2*model_hybrid.dimension] = 0.
tj.set_initial_value(y0,0)
tj.integrate(s1)
ys1 = tj.y
for i in range(model_hybrid.dimension):
model_hybrid.systemState[i].value[0] = ys1[i]
for i in range(model_hybrid.dimension):
model_exact.systemState[i].value[0] = ys1[i+model_hybrid.dimension]
t_next = tj.y[2*model_hybrid.dimension]
for e in model_hybrid.events:
e.updaterate()
for e in model_exact.events:
e.updaterate()
# update slow species
r = np.random.rand()
agg_rate = 0.
for i in range(len(model_hybrid.events)):
if model_hybrid.events[i].hybridType == SLOW:
hybrid_rate = model_hybrid.events[i].rate
exact_rate = model_exact.events[i].rate
agg_rate = agg_rate + res(hybrid_rate,exact_rate )
agg_rate = agg_rate + res(exact_rate,hybrid_rate )
agg_rate = agg_rate + min(hybrid_rate,exact_rate )
else:
agg_rate = agg_rate + model_exact.events[i].rate
#agg_rate = agg_rate + model_hybrid.events[i].rate
#else:
# print("PROBLEM")
# find reaction
if r>sample_rate/(agg_rate+sample_rate):
firing_event_hybrid,firing_event_exact = findreaction_coupled(model_hybrid.events,model_exact.events,agg_rate,r)
if isinstance(firing_event_hybrid,Reaction):
firing_event_hybrid.react()
if isinstance(firing_event_exact,Reaction):
firing_event_exact.react()
clock[k] = clock[k-1] + t_next
path[k][0:model_hybrid.dimension] = model_hybrid.getstate(0)
path[k][model_hybrid.dimension:2*model_hybrid.dimension] = model_exact.getstate(0)
return path[0:k+1],clock[0:k+1]
| 10,961
|
def _django_setup_unittest(request, _django_cursor_wrapper):
"""Setup a django unittest, internal to pytest-django."""
if django_settings_is_configured() and is_django_unittest(request):
request.getfuncargvalue('_django_test_environment')
request.getfuncargvalue('_django_db_setup')
_django_cursor_wrapper.enable()
cls = request.node.cls
_restore_class_methods(cls)
cls.setUpClass()
_disable_class_methods(cls)
def teardown():
_restore_class_methods(cls)
cls.tearDownClass()
_django_cursor_wrapper.restore()
request.addfinalizer(teardown)
| 10,962
|
def commands():
"""Manage your blobs."""
pass
| 10,963
|
def process_image_keypoints(img, keypoints, input_res=224):
"""Read image, do preprocessing and possibly crop it according to the bounding box.
If there are bounding box annotations, use them to crop the image.
If no bounding box is specified but openpose detections are available, use them to get the bounding box.
"""
normalize_img = Normalize(mean=constants.IMG_NORM_MEAN, std=constants.IMG_NORM_STD)
img = img[:,:,::-1].copy() # PyTorch does not support negative stride at the moment
center, scale, bbox = bbox_from_keypoints(keypoints, imageHeight = img.shape[0])
if center is None:
return None, None, None, None, None
img, boxScale_o2n, bboxTopLeft = crop_bboxInfo(img, center, scale, (input_res, input_res))
# viewer2D.ImShow(img, name='cropped', waitTime=1) #224,224,3
if img is None:
return None, None, None, None, None
# unCropped = uncrop(img, center, scale, (input_res, input_res))
# if True:
# viewer2D.ImShow(img)
img = img.astype(np.float32) / 255.
img = torch.from_numpy(img).permute(2,0,1)
norm_img = normalize_img(img.clone())[None]
# return img, norm_img, img_original, boxScale_o2n, bboxTopLeft, bbox
bboxInfo ={"center": center, "scale": scale, "bboxXYWH":bbox}
return img, norm_img, boxScale_o2n, bboxTopLeft, bboxInfo
| 10,964
|
def update_output(
list_of_contents,
list_of_names,
list_of_dates,
initiate_pipeline_n_clicks,
clear_pipeline_n_clicks,
append_uploads_n_clicks,
refresh_uploads_n_clicks,
clear_uploads_n_clicks,
memory,
user_login_n_clicks,
session_data,
workflow,
initiate_pipeline_timestamp,
clear_pipeline_timestamp,
user_login_timestamp,
refresh_uploads_timestamp,
clear_uploads_timestamp,
):
"""Primary APP Pipeline function, as triggered by 'Initiate
[APP] Pipeline' UI button (located in the "Step 2 (2/2)"
section).
Parameters
----------
list_of_contents
<list of str>
Array containing user-uploaded ABI raw contents as
binary strings (thus requiring decoding)
list_of_names
<list of str>
Array containing user-uploaded ABI filenames
(does not include the full path for security reasons)
list_of_dates
<list of int>
Array containing user-uploaded ABI last modified timestamps
(integers as seconds since 1970)
initiate_pipeline_n_clicks
<int>
Total count of UI button clicks
clear_pipeline_n_clicks
<int>
Total count of UI button clicks
append_uploads_n_clicks
<int>
Total count of UI button clicks
refresh_uploads_n_clicks
<int>
Total count of UI button clicks
clear_uploads_n_clicks
<int>
Total count of UI button clicks
memory
Dash.dcc.Store(type='session')
user_login_n_clicks
<int>
Total count of UI button clicks
session_data
Dash.dcc.Store(type='session')
workflow
<type>
initiate_pipeline_timestamp
<type>
clear_pipeline_timestamp
<type>
user_login_timestamp
<type>
refresh_uploads_timestamp
<type>
clear_uploads_timestamp
<type>
"""
def show_list_of_names(USER, list_of_names):
"""Display the filenames for all successfully received
USER-uploaded ABI files.
Args:
USER: <str>
Active user
list_of_names: <list>
List of user-uploaded ABI filenames
Returns:
<html.Div([...])>
Reactive response to display after processing upload
"""
if not all([fn.endswith(tuple([".csv",".xlsx"])) for fn in list_of_names]):
return html.Div(
[
html.Br(),
html.Code(
f"⚠ UPLOAD ERROR: Not all of the {len(list_of_names)} files are CSV or Excel files !",
style={"color": "red"},
),
html.Br(),
html.Code(
f"⛔ | Please reset this upload & then perform a fresh upload of either .csv or .xlsx files."
),
]
)
return html.Div(
[
html.Br(),
html.Code(
f"✔ UPLOAD SUCCESSFUL (N={len(list_of_names)})", style={"color": "green"}
),
html.Br(),
html.Br(),
html.Details(
[
html.Summary(
html.H3(
f"File(s) received (click to expand)",
style={"textAlign": "left", "fontSize": "120%"},
)
),
html.Div(
[
html.Li(f"{'{:02d}'.format(i+1)})\t{abi}")
for (i, abi) in enumerate(sorted(list_of_names))
],
id="files-received",
style={
"textAlign": "left",
"fontSize": "60%",
"columnCount": "3",
"paddingBottom": "2%",
"fontFamily": "'Roboto Mono', monospace",
},
),
html.Hr(
style={
"borderTop": "1px solid",
"animation": "pact-gradient-text-flow 3s infinite linear",
"borderRadius": "5px",
"opacity": "0.67",
"width": "50%",
"marginLeft": "25%",
}
),
]
),
html.Br(),
html.Span(className="fader-line-short", style={"marginBottom": "20px"}),
],
style={"width": "80%", "marginLeft": "10%"},
)
not_signed_in_msg = html.Div(
[html.H6("Please log in to release the pipeline as ready for activation.")]
)
try:
if session_data: # ["user_logged_in"] == "True":
RUN_ID = session_data["RUN_ID"]
SESSION_OUTPUT_DIR = session_data["PATH_TO_SESSION_OUTPUT"]
LOG_FILE = session_data["session_log_file"]
USER = session_data["user_proper"]
UUID = session_data["UUID"]
if len(app.logger.handlers) < 1:
app.logger.info(
f"Number logger handlers = {len(app.logger.handlers)}->{logger.handlers}"
)
app.logger.info("Adding log FileHandler...")
fh = logging.FileHandler(LOG_FILE)
fh.setLevel(logging.INFO)
app.logger.addHandler(fh)
app.logger.info(
f"Number logger handlers = {len(app.logger.handlers)}->{logger.handlers}"
)
else:
return not_signed_in_msg
except KeyError as e:
app.logger.error(f"No user appears to be logged in (KeyError: {e})")
return not_signed_in_msg
### UPON USER FILE UPLOAD(S):
if list_of_contents is not None:
if initiate_pipeline_n_clicks >= 1:
init_t_elapse = tns() / 1e9 - initiate_pipeline_timestamp / 1e3
app.logger.info(f"init_t_elapse = {init_t_elapse}; ")
if init_t_elapse < 30:
if (
clear_pipeline_n_clicks > 0
and refresh_uploads_n_clicks <= clear_pipeline_n_clicks
):
if all(
clear_pipeline_timestamp > ts
for ts in [initiate_pipeline_timestamp, user_login_timestamp]
):
return [
html.H3(
f"Thanks, {USER}; the previous pipeline results have been cleared."
),
html.H4(f"Current analysis output folder: {RUN_ID}"),
html.H5(
html.Div(
[
html.Span(f"Launch a new analysis."),
html.Br(),
]
)
),
]
elif clear_pipeline_n_clicks > 0:
if clear_pipeline_timestamp > initiate_pipeline_timestamp:
if refresh_uploads_n_clicks > 0:
if refresh_uploads_timestamp > clear_pipeline_timestamp:
return show_list_of_names(USER, list_of_names)
return html.Div(
html.H5(
f"(Pipeline results [{RUN_ID}] CLEARED)", style={"color": "red"}
)
)
app.logger.info(
f"📟📶⌁⌁⌁📠Using the following as pipeline data input. \n{len(list_of_names)} USER UPLOADED FILE(S) : \n"
+ "\n 📊⇢🧬 ".join(
[
"{:>03d})\t{:>50s}".format(i + 1, abi)
for i, abi in enumerate(sorted(list_of_names))
]
)
)
app.logger.info(
f"INITIALIZING NEW PIPELINE LAUNCH:\n\n\t\t{SESSION_OUTPUT_DIR}"
)
start_time = tns()
children = []
parsed_upload_children = [
html.Details(
[
parse_contents(c, n, d, SESSION_OUTPUT_DIR, session_log_file=LOG_FILE)
for c, n, d in zip(list_of_contents, list_of_names, list_of_dates)
]
)
]
# Generate (single!) TCR alpha/beta chain pair combinations
# base pipeline reference files (e.g., agg'd fq, designated master
# reference 'genome', DataFrames, log, etc.)
try:
pipeline_output = ljoin(
[
r
for r in pipeline.run_pipeline(
RUN_ID,
SESSION_OUTPUT_DIR,
workflow=workflow,
session_log_file=LOG_FILE,
)
]
)
args = [(*(x), i + 1) for i, x in enumerate(pipeline_output)]
except Exception as e:
logs = []
report = None
with open(LOG_FILE, "r+") as log_file:
for line in log_file.readlines():
logs.append(line)
stderr = [
dcc.Textarea(
placeholder="(Main Sequence -- logger placeholder)",
value="\n".join(logs),
style={
"height": "400px",
"width": "50%",
"fontSize": "0.7rem",
"lineHeight": "0.9rem",
"fontFamily": "'Roboto Mono', monospace",
},
className="logger-text",
name="organization",
readOnly=True,
)
]
fatal_crash = "⚠ ALERT: ERROR IN MAIN PIPELINE SEQUENCE"
app.logger.error(f"{fatal_crash}: \n\n{e}")
log_exc(app.logger)
return html.Div(
[
html.H2(fatal_crash, style={"color": "red"}),
html.P(f"App runtime was: {gtt(start_time)}"),
html.Code(f"Primary error message for crash:\n{e}"),
html.H4("See [end of] AUDIT LOG (below) for failure reason."),
html.H5(f"WEB SERVER SYSTEM LOG:", style={"color": "red"}),
html.Div(stderr),
]
)
### # # # # # # # # # #### # # # # # # # # # ###
children.append(
html.Div(
[
html.Hr(),
html.Br(),
html.H4("All files analyzed in most recent upload:"),
]
)
)
""" ~ ◮ ~
S U M M A R Y
a n a l y s i s
~ ~ ~
~ ◮ ~
"""
if report:
summary_report = [
html.Div(
[
html.Br(),
html.H2(
"Pipeline Output Summary",
style={
"fontSize": "80%",
"letterSpacing": "1.33rem",
"fontFamily": "Cinzel",
"animation": "anim-text-flow-keys 120s infinite linear",
},
),
html.Hr(),
],
style={"width": "90%", "marginLeft": "5%"},
)
]
else:
summary_report = [html.Div([html.H4(f"No final output found.")])]
html_out = f"{SESSION_OUTPUT_DIR}{RUN_ID}_HTMLprops.tsv"
pd.DataFrame(
[str(c.to_plotly_json()) for c in children], columns=["DashHTMLDivComponents"]
).to_csv(html_out, encoding="utf-8", sep="\t")
app.logger.info("Processed & analzyed input files were:")
app.logger.debug(parsed_upload_children)
app.logger.info(",".join([str(type(x)) for x in parsed_upload_children]))
total_exec_time = gtt(start_time)
app.logger.info(
f"———COMPLETE——-\n\n \t ☆☆☆ Total EXECUTION TIME Required ☆☆☆\n\n \t\t = {total_exec_time} s \n\n"
)
show_exec_time = [
html.Div(
[
html.Hr(),
html.H3(
f"* ゚(>͂ ͡͡︒ ͜ ʖ ͡︒)>-。゚☆* :・゚.☆ * ・ "
),
html.H4(f"Total Execution Time Required = {total_exec_time} s"),
html.Hr(),
html.Br(),
]
)
]
if len(children) > 50:
full_report = [
html.Div(
[
html.H2(
f"NOTICE: Due to an unusually large number of results in this analysis (N={len(children)}), full report display has been automatically disabled."
)
]
)
]
else:
full_report = children
children = (
show_exec_time
+ TOC
+ summary_report
+ full_report
+ parsed_upload_children
+ [html.Div(html.Hr())]
)
app.logger.debug(",".join([str(type(x)) for x in children]))
app.logger.debug(f"Number of html.Div elements in final layout: {len(children)}")
return children
elif initiate_pipeline_n_clicks > 15:
return html.Div(
[
html.H4(
"⚠ | ALERT ! : Un𝒇ortunately, you have over-activated the pipeline submissions check system. Please re𝒇resh the page, re-log in, and re-upload the set o𝒇 ABI 𝒇iles you would like analyzed. 🛠⎆ "
),
html.H6("↺ Please Re𝒇resh the page. ↺"),
]
)
if clear_uploads_n_clicks > 0:
t_elapsed = tns() / 1e9 - clear_uploads_timestamp / 1e3
if t_elapsed < 2:
for tcr_dir in os.listdir(SESSION_OUTPUT_DIR):
grouped_clone_fqs = f"{SESSION_OUTPUT_DIR}{tcr_dir}"
if os.path.isdir(grouped_clone_fqs):
shutil.rmtree(grouped_clone_fqs)
return html.Div(
[
html.Code(f"UPLOADS CLEARED", style={"color": "red"}),
html.H5(
f'To continue, submit at least one new upload & click "✥ Append".'
),
]
)
if append_uploads_n_clicks > 0 or clear_uploads_n_clicks > 0:
if len(list_of_names) > 0 and len(memory.items()) > 0:
all_uploads = (
memory[f"{RUN_ID}-list_of_names"]
if len(memory[f"{RUN_ID}-list_of_names"]) > 0
else list_of_names
)
return show_list_of_names(USER, all_uploads)
elif len(memory.items()) == 0:
return html.Div(html.Code("NONE"))
else:
app.logger.info(
f"{USER} uploaded the following {len(list_of_names)} file(s):"
+ "\n\t ◇ 📄 "
+ "\n\t ◇ 📄 ".join(sorted(list_of_names))
+ ".\n"
)
return show_list_of_names(USER, list_of_names)
else:
return html.Div(
[html.Br(), html.H5(f"Logged in as: {USER}", style={"color": "rgb(32,92,188)"})]
)
| 10,965
|
def _load_outputs(dict_: Dict) -> Iterable[Union[HtmlOutput, EbookConvertOutput]]:
"""Translates a dictionary into a list of output objects.
The dictionary is assumed to have the following structure::
{
'outputs': [{ 'path': 'some', 'new': 'text' },
{ 'path: '...', 'replace_with': '...' }]
}
If the key 'outputs' is not present in the dictionary or if there are no output
sub-dictionaries, an empty list is returned instead.
The type of the output is inferred from the file name provided as a value of the 'path' key
of the output sub-dictionary.
A file name ending in the file type '.html' will produce an HtmlOutput. '.epub', '.mobi' or
any other file type excluding '.html' will produce an EbookConvertOutput.
Note that a local stylesheet *replaces* the global stylesheet, but local ebookconvert_params
are *added* to the global ebookconvert_params if present.
Args:
dict_: The dictionary.
Returns:
The list of output objects or an empty list either if not output sub-dictionaries are
present in the encapsulating dictionary or if the 'outputs' key itself is missing.
"""
outputs = []
global_stylesheet = None
global_ec_params = []
if 'stylesheet' in dict_:
global_stylesheet = dict_['stylesheet']
if 'ebookconvert_params' in dict_:
global_ec_params = _load_ebookconvert_params(dict_)
for output in dict_['outputs']:
path = output['path']
file_type = path.split('.')[-1]
if 'stylesheet' not in output and global_stylesheet:
output['stylesheet'] = global_stylesheet
if file_type == 'html':
outputs.append(HtmlOutput(**output))
else:
if 'ebookconvert_params' in output:
local_ec_params = _load_ebookconvert_params(output)
output['ebookconvert_params'] = global_ec_params + local_ec_params
else:
output['ebookconvert_params'] = global_ec_params
outputs.append(EbookConvertOutput(**output))
return outputs
| 10,966
|
def _async_friendly_contextmanager(func):
"""
Equivalent to @contextmanager, except the resulting (non-async) context
manager works correctly as a decorator on async functions.
"""
@wraps(func)
def helper(*args, **kwargs):
return _AsyncFriendlyGeneratorContextManager(func, args, kwargs)
return helper
| 10,967
|
def all_inputs(n):
"""
returns an iterator for all {-1,1}-vectors of length `n`.
"""
return itertools.product((-1, +1), repeat=n)
| 10,968
|
def brightness(df: pd.DataFrame, gain: float = 1.5) -> pd.DataFrame:
"""
Enhance image brightness.
Parameters
----------
df
The dataset as a dataframe.
Returns
-------
df
A new dataframe with follwing changes:
* 'filename', overwrited with new brightened image filenames.
"""
logging.info('Brightening images ...')
df_out = df.copy()
new_filename_list = []
for index, row in df.iterrows():
filename = row['filename']
logging.debug(f'Brightening image {filename}')
img = Image.open(filename)
img = ImageEnhance.Brightness(img)
img = img.enhance(gain)
new_filename = make_filename(row, step='brightness')
new_filename_list.append(new_filename)
save_image(new_filename, img, dpi=(300, 300), engine='pil')
df_out['filename'] = new_filename_list
return df_out
| 10,969
|
def generate_winner_list(winners):
""" Takes a list of winners, and combines them into a string. """
return ", ".join(winner.name for winner in winners)
| 10,970
|
def test_get_alias_name_2():
"""Test alias from empty meta extraction"""
meta = { }
assert get_alias_names(meta) is None
| 10,971
|
def infer_chords_for_sequence(quantized_sequence,
chords_per_bar=None,
key_change_prob=0.001,
chord_change_prob=0.5,
chord_pitch_out_of_key_prob=0.01,
chord_note_concentration=100.0):
"""Infer chords for a quantized NoteSequence using the Viterbi algorithm.
This uses some heuristics to infer chords for a quantized NoteSequence. At
each chord position a key and chord will be inferred, and the chords will be
added (as text annotations) to the sequence.
Args:
quantized_sequence: The quantized NoteSequence for which to infer chords.
This NoteSequence will be modified in place.
chords_per_bar: The number of chords per bar to infer. If None, use a
default number of chords based on the time signature of
`quantized_sequence`.
key_change_prob: Probability of a key change between two adjacent frames.
chord_change_prob: Probability of a chord change between two adjacent
frames.
chord_pitch_out_of_key_prob: Probability of a pitch in a chord not belonging
to the current key.
chord_note_concentration: Concentration parameter for the distribution of
observed pitches played over a chord. At zero, all pitches are equally
likely. As concentration increases, observed pitches must match the
chord pitches more closely.
Raises:
SequenceAlreadyHasChordsException: If `quantized_sequence` already has
chords.
UncommonTimeSignatureException: If `chords_per_bar` is not specified and
`quantized_sequence` has an uncommon time signature.
NonIntegerStepsPerChordException: If the number of quantized steps per chord
is not an integer.
EmptySequenceException: If `quantized_sequence` is empty.
SequenceTooLongException: If the number of chords to be inferred is too
large.
"""
sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
for ta in quantized_sequence.text_annotations:
if ta.annotation_type == music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL:
raise SequenceAlreadyHasChordsException(
'NoteSequence already has chord(s): %s' % ta.text)
if chords_per_bar is None:
time_signature = (quantized_sequence.time_signatures[0].numerator,
quantized_sequence.time_signatures[0].denominator)
if time_signature not in _DEFAULT_TIME_SIGNATURE_CHORDS_PER_BAR:
raise UncommonTimeSignatureException(
'No default chords per bar for time signature: (%d, %d)' %
time_signature)
chords_per_bar = _DEFAULT_TIME_SIGNATURE_CHORDS_PER_BAR[time_signature]
# Determine the number of seconds (and steps) each chord is held.
steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(
quantized_sequence)
steps_per_chord_float = steps_per_bar_float / chords_per_bar
if steps_per_chord_float != round(steps_per_chord_float):
raise NonIntegerStepsPerChordException(
'Non-integer number of steps per chord: %f' % steps_per_chord_float)
steps_per_chord = int(steps_per_chord_float)
steps_per_second = sequences_lib.steps_per_quarter_to_steps_per_second(
quantized_sequence.quantization_info.steps_per_quarter,
quantized_sequence.tempos[0].qpm)
seconds_per_chord = steps_per_chord / steps_per_second
num_chords = int(math.ceil(quantized_sequence.total_time / seconds_per_chord))
if num_chords == 0:
raise EmptySequenceException('NoteSequence is empty.')
if num_chords > _MAX_NUM_CHORDS:
raise SequenceTooLongException(
'NoteSequence too long for chord inference: %d frames' % num_chords)
# Compute pitch vectors for each chord frame, then compute log-likelihood of
# observing those pitch vectors under each possible chord.
note_pitch_vectors = sequence_note_pitch_vectors(
quantized_sequence, seconds_per_frame=seconds_per_chord)
chord_frame_loglik = _chord_frame_log_likelihood(
note_pitch_vectors, chord_note_concentration)
# Compute distribution over chords for each key, and transition distribution
# between key-chord pairs.
key_chord_distribution = _key_chord_distribution(
chord_pitch_out_of_key_prob=chord_pitch_out_of_key_prob)
key_chord_transition_distribution = _key_chord_transition_distribution(
key_chord_distribution,
key_change_prob=key_change_prob,
chord_change_prob=chord_change_prob)
key_chord_loglik = np.log(key_chord_distribution)
key_chord_transition_loglik = np.log(key_chord_transition_distribution)
key_chords = _key_chord_viterbi(
chord_frame_loglik, key_chord_loglik, key_chord_transition_loglik)
# Add the inferred chord changes to the sequence, logging any key changes.
current_key_name = None
current_chord_name = None
for frame, (key, chord) in enumerate(key_chords):
if _PITCH_CLASS_NAMES[key] != current_key_name:
if current_key_name is not None:
tf.logging.info('Sequence has key change from %s to %s at %f seconds.',
current_key_name, _PITCH_CLASS_NAMES[key],
frame * seconds_per_chord)
current_key_name = _PITCH_CLASS_NAMES[key]
if chord == constants.NO_CHORD:
figure = constants.NO_CHORD
else:
root, kind = chord
figure = '%s%s' % (_PITCH_CLASS_NAMES[root], kind)
if figure != current_chord_name:
ta = quantized_sequence.text_annotations.add()
ta.time = frame * seconds_per_chord
ta.quantized_step = frame * steps_per_chord
ta.text = figure
ta.annotation_type = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL
current_chord_name = figure
| 10,972
|
def stampify_url():
"""The stampified version of the URL passed in args."""
url = request.args.get('url')
max_pages = request.args.get('max_pages')
enable_animations = bool(request.args.get('animations') == 'on')
if not max_pages:
max_pages = DEFAULT_MAX_PAGES
_stampifier = Stampifier(url, int(max_pages), enable_animations)
try:
return _stampifier.stampify().stamp_html
except StampifierError as err:
return render_template('error_screen.html',
message=err.message)
| 10,973
|
def pleasant_lgr_stand_alone_parent(pleasant_lgr_test_cfg_path, tmpdir):
"""Stand-alone version of lgr parent model for comparing with LGR results.
"""
# Edit the configuration file before the file paths within it are converted to absolute
# (model.load_cfg converts the file paths)
cfg = load(pleasant_lgr_test_cfg_path)
del cfg['setup_grid']['lgr']
cfg['simulation']['sim_ws'] = os.path.join(tmpdir, 'pleasant_lgr_just_parent')
# save out the edited configuration file
path, fname = os.path.split(pleasant_lgr_test_cfg_path)
new_file = os.path.join(path, 'pleasant_lgr_just_parent.yml')
dump(new_file, cfg)
# load in the edited configuration file, converting the paths to absolute
cfg = MF6model.load_cfg(new_file)
# add some stuff just for the tests
cfg['gisdir'] = os.path.join(cfg['simulation']['sim_ws'], 'gis')
m = MF6model.setup_from_cfg(cfg)
m.write_input()
return m
| 10,974
|
def twitter_channel():
"""
RESTful CRUD controller for Twitter channels
- appears in the administration menu
Only 1 of these normally in existence
@ToDo: Don't enforce
"""
#try:
# import tweepy
#except:
# session.error = T("tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!")
# redirect(URL(c="admin", f="index"))
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter account Details"),
title_list = T("Twitter accounts"),
label_create = T("Add Twitter account"),
title_update = T("Edit Twitter account"),
label_list_button = T("View Twitter accounts"),
msg_record_created = T("Twitter account added"),
msg_record_deleted = T("Twitter account deleted"),
msg_record_modified = T("Twitter account updated"),
msg_list_empty = T("No Twitter accounts currently defined"),
)
def prep(r):
oauth_consumer_key = settings.msg.twitter_oauth_consumer_key
oauth_consumer_secret = settings.msg.twitter_oauth_consumer_secret
if not (oauth_consumer_key and oauth_consumer_secret):
session.error = T("You should edit Twitter settings in models/000_config.py")
return True
oauth = tweepy.OAuthHandler(oauth_consumer_key,
oauth_consumer_secret)
if r.http == "GET" and r.method in ("create", "update"):
# We're showing the form
_s3 = session.s3
try:
_s3.twitter_oauth_url = oauth.get_authorization_url()
_s3.twitter_request_key = oauth.request_token.key
_s3.twitter_request_secret = oauth.request_token.secret
except tweepy.TweepError:
session.error = T("Problem connecting to twitter.com - please refresh")
return True
#table.pin.readable = True
#table.pin.label = T("PIN number from Twitter (leave empty to detach account)")
#table.pin.value = ""
table.twitter_account.label = T("Current Twitter account")
return True
else:
# Not showing form, no need for pin
#table.pin.readable = False
#table.pin.label = T("PIN") # won't be seen
#table.pin.value = "" # but let's be on the safe side
pass
return True
#s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
#if isinstance(output, dict):
# if r.http == "GET" and r.method in ("create", "update"):
# rheader = A(T("Collect PIN from Twitter"),
# _href = session.s3.twitter_oauth_url,
# _target = "_blank")
# output["rheader"] = rheader
return output
s3.postp = postp
return s3_rest_controller()
| 10,975
|
def init_time(p, **kwargs):
"""Initialize time data."""
time_data = {
'times': [p['parse']],
'slots': p['slots'],
}
time_data.update(**kwargs)
return time_data
| 10,976
|
def welcome():
"""
The code that executes at launch. It guides the user through menus and starts other functions from the other folders
based on the users choices.
"""
clear()
print(msg)
print(Style.RESET_ALL)
print("\n")
print("Welcome!")
input("Press ENTER key to begin!")
clear()
print(feesNotice)
input()
clear()
# Checks to see if the account information was stored on the local machine previously
if not os.path.isfile(os.path.join(CURRENT_DIR, "sav", "credentials.pickle")):
# If the user hasn't successfullly logged in before, it takes them to a menu sequence to log in, and saves the info locally
# for the next time the script is run.
login.login_interface()
user = login.Credentials()
print(f"You are logged in as: {user.username}")
instructions = """
Instructions:
This program takes in a csv file full of docket numbers and will automatically
populate 2 folders with the raw JSON data and all of the PDF documents associated
with that docket.
You will now select the path where your input csv is located.
Press ENTER to open the file browser.
"""
print(instructions)
input()
clear()
# Opens a graphical file browser and returns the path to the csv file that the user selected.
csvChoice = file_browser.browseCSVFiles()
# Assigns the choice to a global variable, so other modules can find the path that the user specified.
global_variables.CSV_INPUT_PATH = csvChoice
options = """
Type in one of the following numbers and press ENTER to specify your choice:
[1] Get all JSON files and PDF files.
[2] Get JSON files only.
[3] Get PDF files only.
( Only select 3 if you already have a directory full of JSON files. )
( The JSON files are needed to extract the download links from. )
[4] More options.
Enter your response below.[1/2/3/4]
"""
print(options)
def handle_input():
"""
Prompts the user for a choice and calls the function from the 'modules' folder that corresponds
with that choice.
"""
userChoice = input()
# Choice 1 is downloading all json and pdf files.
if userChoice == "1":
clear()
menus.select_paths_menu()
clear()
menus.specify_client_matter_menu()
print(msg)
get_json_and_pdfs()
# Choice 2 is donwloading only JSON files.
elif userChoice == "2":
clear()
menus.select_paths_menu(pdfOption=False)
menus.specify_client_matter_menu()
print(msg)
get_json.thread_download_json()
# Choice 3 is downloading only PDF files.
elif userChoice == "3":
clear()
menus.select_paths_menu()
menus.specify_client_matter_menu()
print(msg)
link_list = get_pdfs.get_urls("json-output")
# get_pdfs.multiprocess_download_pdfs(link_list)
get_pdfs.thread_download_pdfs(link_list)
elif userChoice == "4":
clear()
menus.other_options_menu()
# If the user enters anything other than a valid choice, then it tells them their choice is invalid and
# restarts this function, prompting them to make a choice again.
else:
print("Please Enter Valid input (1, 2 or 3)")
return handle_input()
handle_input()
try:
os.startfile(os.path.join(CURRENT_DIR, "log"))
except:
pass
print("\nDone.")
input()
| 10,977
|
def bsplslib_D0(*args):
"""
:param U:
:type U: float
:param V:
:type V: float
:param UIndex:
:type UIndex: int
:param VIndex:
:type VIndex: int
:param Poles:
:type Poles: TColgp_Array2OfPnt
:param Weights:
:type Weights: TColStd_Array2OfReal &
:param UKnots:
:type UKnots: TColStd_Array1OfReal &
:param VKnots:
:type VKnots: TColStd_Array1OfReal &
:param UMults:
:type UMults: TColStd_Array1OfInteger &
:param VMults:
:type VMults: TColStd_Array1OfInteger &
:param UDegree:
:type UDegree: int
:param VDegree:
:type VDegree: int
:param URat:
:type URat: bool
:param VRat:
:type VRat: bool
:param UPer:
:type UPer: bool
:param VPer:
:type VPer: bool
:param P:
:type P: gp_Pnt
:rtype: void
"""
return _BSplSLib.bsplslib_D0(*args)
| 10,978
|
def b_cross(self) -> tuple:
"""
Solve cross one piece at a time.
Returns
-------
tuple of (list of str, dict of {'CROSS': int})
Moves to solve cross, statistics (move count in ETM).
Notes
-----
The cube is rotated so that the white centre is facing down.
The four white cross pieces are moved to the yellow side (on top),
starting with the edge which is the fewest moves away from solved.
The edges are then moved down to the white centre in the fewest
number of moves.
"""
cube = self.cube
solve = []
edges = (1,0), (-1,1), (1,-1), (0,1)
cross = {
'L': (4,1,-1),
"L'": (2,1,0),
'F': (1,1,-1),
"F'": (3,1,0),
'R': (2,1,-1),
"R'": (4,1,0),
'B': (3,1,-1),
"B'": (1,1,0),
'L2': (5,1,0),
'F2': (5,0,1),
'R2': (5,1,-1),
'B2': (5,-1,1),
"L U' F": (1,0,1),
"L' U' F": (1,-1,1),
"F U' R": (2,0,1),
"F' U' R": (2,-1,1),
"R' U F'": (3,0,1),
"R U F'": (3,-1,1),
"B' U R'": (4,0,1),
"B U R'": (4,-1,1)
}
for s, side in enumerate(cube):
if side[1][1] == 'U':
break
if s != 5:
move = ('z2', "z'", "x'", 'z', 'x')[s]
self.move(move)
solve.append(move)
while not(all(cube[0][y][x] == 'U' for y, x in edges) or
all(cube[5][y][x] == 'U' for y, x in edges) and
all(side[-1][1] == side[1][1] for side in cube[1:5])):
for edge in cross:
if cube[cross[edge][0]][cross[edge][1]][cross[edge][-1]] == 'U':
break
slot = 'LFRB'.index(edge[0])
if cube[0][edges[slot][0]][edges[slot][1]] != 'U':
moves = edge.split()
elif cube[0][edges[slot-3][0]][edges[slot-3][1]] != 'U':
moves = ['U'] + edge.split()
elif cube[0][edges[slot-1][0]][edges[slot-1][1]] != 'U':
moves = ["U'"] + edge.split()
else:
moves = ['U2'] + edge.split()
self.move(moves)
solve.extend(moves)
while any(cube[5][y][x] != 'U' for y, x in edges):
if cube[1][0][1] == cube[1][1][1] and cube[0][1][0] == 'U':
self.move('L2')
solve.append('L2')
if cube[2][0][1] == cube[2][1][1] and cube[0][-1][1] == 'U':
self.move('F2')
solve.append('F2')
if cube[3][0][1] == cube[3][1][1] and cube[0][1][-1] == 'U':
self.move('R2')
solve.append('R2')
if cube[4][0][1] == cube[4][1][1] and cube[0][0][1] == 'U':
self.move('B2')
solve.append('B2')
if any(cube[s][0][1] == cube[(s + 2) % 4 + 1][1][1] and
cube[0][edges[s-1][0]][edges[s-1][1]] == 'U'
for s in range(1, 5)):
self.move('U')
solve.append('U')
elif any(cube[s][0][1] == cube[s % 4 + 1][1][1] and
cube[0][edges[s-1][0]][edges[s-1][1]] == 'U'
for s in range(1, 5)):
self.move("U'")
solve.append("U'")
elif any(cube[s][0][1] == cube[(s + 1) % 4 + 1][1][1] and
cube[0][edges[s-1][0]][edges[s-1][1]] == 'U'
for s in range(1, 5)):
self.move('U2')
solve.append('U2')
return solve, {'CROSS': len(solve)}
| 10,979
|
def get_db():
"""Creates a 'SQLAlchemy' instance.
Creates a 'SQLAlchemy' instance and store it to 'flask.g.db'.
Before this function is called, Flask's application context must be exist.
Returns:
a 'SQLAlchemy' instance.
"""
if 'db' not in g:
current_app.logger.debug('construct SQLAlchemy instance.')
db = SQLAlchemy(current_app)
g.db = db
return g.db
| 10,980
|
def dsm_nifti(image, spatial_radius=0.01, dist_metric='correlation',
dist_params=dict(), y=None, n_folds=1, roi_mask=None,
brain_mask=None, n_jobs=1, verbose=False):
"""Generate DSMs in a searchlight pattern on Nibabel Nifty-like images.
DSMs are computed using a patch surrounding each voxel.
.. versionadded:: 0.4
Parameters
----------
image : 4D Nifti-like image
The Nitfi image data. The 4th dimension must contain the images
for each item.
spatial_radius : float
The spatial radius of the searchlight patch in meters. All source
points within this radius will belong to the searchlight patch.
Defaults to 0.01.
dist_metric : str
The metric to use to compute the DSM for the data. This can be
any metric supported by the scipy.distance.pdist function. See also the
``dist_params`` parameter to specify and additional parameter for
the distance function. Defaults to 'correlation'.
dist_params : dict
Extra arguments for the distance metric used to compute the DSMs.
Refer to :mod:`scipy.spatial.distance` for a list of all other metrics
and their arguments. Defaults to an empty dictionary.
y : ndarray of int, shape (n_items,) | None
For each source estimate, a number indicating the item to which it
belongs. When ``None``, each source estimate is assumed to belong to a
different item. Defaults to ``None``.
n_folds : int | None
Number of folds to use when using cross-validation to compute the
evoked DSM metric. Specify ``None``, to use the maximum number of folds
possible, given the data.
Defaults to 1 (no cross-validation).
roi_mask : 3D Nifti-like image | None
When set, searchlight patches will only be generated for the subset of
voxels with non-zero values in the given mask. This is useful for
restricting the analysis to a region of interest (ROI). Note that while
the center of the patches are all within the ROI, the patch itself may
extend beyond the ROI boundaries.
Defaults to ``None``, in which case patches for all voxels are
generated.
brain_mask : 3D Nifti-like image | None
When set, searchlight patches are restricted to only contain voxels
with non-zero values in the given mask. This is useful for make sure
only information from inside the brain is used. In contrast to the
`roi_mask`, searchlight patches will not use data outside of this mask.
Defaults to ``None``, in which case all voxels are included in the
analysis.
n_jobs : int
The number of processes (=number of CPU cores) to use. Specify -1 to
use all available cores. Defaults to 1.
verbose : bool
Whether to display a progress bar. In order for this to work, you need
the tqdm python module installed. Defaults to False.
Yields
------
dsm : ndarray, shape (n_items, n_items)
A DSM for each searchlight patch.
"""
if (not isinstance(image, tuple(nib.imageclasses.all_image_classes))
or image.ndim != 4):
raise ValueError('The image data must be 4-dimensional Nifti-like '
'images')
# Get data as (n_items x n_voxels)
X = image.get_fdata().reshape(-1, image.shape[3]).T
# Find voxel positions
voxels = np.array(list(np.ndindex(image.shape[:-1])))
voxel_loc = voxels @ image.affine[:3, :3]
voxel_loc /= 1000 # convert position from mm to meters
# Apply masks
result_mask = np.ones(image.shape[:3], dtype=bool)
if brain_mask is not None:
if brain_mask.ndim != 3 or brain_mask.shape != image.shape[:3]:
raise ValueError('Brain mask must be a 3-dimensional Nifi-like '
'image with the same dimensions as the data '
'image')
brain_mask = brain_mask.get_fdata() != 0
result_mask &= brain_mask
brain_mask = brain_mask.ravel()
X = X[:, brain_mask]
voxel_loc = voxel_loc[brain_mask]
if roi_mask is not None:
if roi_mask.ndim != 3 or roi_mask.shape != image.shape[:3]:
raise ValueError('ROI mask must be a 3-dimensional Nifi-like '
'image with the same dimensions as the data '
'image')
roi_mask = roi_mask.get_fdata() != 0
result_mask &= roi_mask
roi_mask = roi_mask.ravel()
if brain_mask is not None:
roi_mask = roi_mask[brain_mask]
roi_mask = np.flatnonzero(roi_mask)
# Compute distances between voxels
logger.info('Computing distances...')
from sklearn.neighbors import NearestNeighbors
nn = NearestNeighbors(radius=spatial_radius, n_jobs=n_jobs).fit(voxel_loc)
dist = nn.radius_neighbors_graph(mode='distance')
# Compute DSMs
patches = searchlight(X.shape, dist=dist, spatial_radius=spatial_radius,
temporal_radius=None, sel_series=roi_mask)
yield from dsm_array(X, patches, dist_metric=dist_metric,
dist_params=dist_params, y=y, n_folds=n_folds,
n_jobs=n_jobs, verbose=verbose)
| 10,981
|
def generate_probes(filename_input, filename_results, options,
conf=None, problem=None, probes=None, labels=None,
probe_hooks=None):
"""
Generate probe figures and data files.
"""
if conf is None:
required, other = get_standard_keywords()
conf = ProblemConf.from_file(filename_input, required, other)
opts = conf.options
if options.auto_dir:
output_dir = opts.get_('output_dir', '.')
filename_results = os.path.join(output_dir, filename_results)
output('results in: %s' % filename_results)
io = MeshIO.any_from_filename(filename_results)
step = options.step if options.step >= 0 else io.read_last_step()
all_data = io.read_data(step)
output('loaded:', all_data.keys())
output('from step:', step)
if options.only_names is None:
data = all_data
else:
data = {}
for key, val in all_data.iteritems():
if key in options.only_names:
data[key] = val
if problem is None:
problem = Problem.from_conf(conf,
init_equations=False, init_solvers=False)
if probes is None:
gen_probes = conf.get_function(conf.options.gen_probes)
probes, labels = gen_probes(problem)
if probe_hooks is None:
probe_hooks = {None : conf.get_function(conf.options.probe_hook)}
if options.output_filename_trunk is None:
options.output_filename_trunk = problem.ofn_trunk
filename_template = options.output_filename_trunk \
+ ('_%%d.%s' % options.output_format)
if options.same_dir:
filename_template = os.path.join(os.path.dirname(filename_results),
filename_template)
output_dir = os.path.dirname(filename_results)
for ip, probe in enumerate(probes):
output(ip, probe.name)
probe.set_options(close_limit=options.close_limit)
for key, probe_hook in probe_hooks.iteritems():
out = probe_hook(data, probe, labels[ip], problem)
if out is None: continue
if isinstance(out, tuple):
fig, results = out
else:
fig = out
if key is not None:
filename = filename_template % (key, ip)
else:
filename = filename_template % ip
if fig is not None:
if isinstance(fig, dict):
for fig_name, fig_fig in fig.iteritems():
fig_filename = edit_filename(filename,
suffix='_' + fig_name)
fig_fig.savefig(fig_filename)
output('figure ->', os.path.normpath(fig_filename))
else:
fig.savefig(filename)
output('figure ->', os.path.normpath(filename))
if results is not None:
txt_filename = edit_filename(filename, new_ext='.txt')
write_results(txt_filename, probe, results)
output('data ->', os.path.normpath(txt_filename))
| 10,982
|
def retain_groundtruth(tensor_dict, valid_indices):
"""Retains groundtruth by valid indices.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
fields.InputDataFields.groundtruth_difficult
valid_indices: a tensor with valid indices for the box-level groundtruth.
Returns:
a dictionary of tensors containing only the groundtruth for valid_indices.
Raises:
ValueError: If the shape of valid_indices is invalid.
ValueError: field fields.InputDataFields.groundtruth_boxes is
not present in tensor_dict.
"""
input_shape = valid_indices.get_shape().as_list()
if not (len(input_shape) == 1 or
(len(input_shape) == 2 and input_shape[1] == 1)):
raise ValueError('The shape of valid_indices is invalid.')
valid_indices = tf.reshape(valid_indices, [-1])
valid_dict = {}
if fields.InputDataFields.groundtruth_boxes in tensor_dict:
# Prevents reshape failure when num_boxes is 0.
num_boxes = tf.maximum(tf.shape(
tensor_dict[fields.InputDataFields.groundtruth_boxes])[0], 1)
for key in tensor_dict:
if key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_confidences,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_keypoint_visibilities,
fields.InputDataFields.groundtruth_instance_masks]:
valid_dict[key] = tf.gather(tensor_dict[key], valid_indices)
# Input decoder returns empty tensor when these fields are not provided.
# Needs to reshape into [num_boxes, -1] for tf.gather() to work.
elif key in [fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_label_types]:
valid_dict[key] = tf.reshape(
tf.gather(tf.reshape(tensor_dict[key], [num_boxes, -1]),
valid_indices), [-1])
# Fields that are not associated with boxes.
else:
valid_dict[key] = tensor_dict[key]
else:
raise ValueError('%s not present in input tensor dict.' % (
fields.InputDataFields.groundtruth_boxes))
return valid_dict
| 10,983
|
def next_higher_number(some_integer_input):
"""
Given an integer, this function returns the next higher number which
has the exact same set of digits. If no higher number exists, return
the the integer provided as input.
For instance:
>>> next_higher_number(123)
132
>>> next_higher_number(1232)
1322
>>> next_higher_number(70)
70
"""
pass
| 10,984
|
def test_url_call_succeeds_with_200(monkeypatch):
"""
Test the function will return once the HTTP Response is
200 OK.
"""
# Setup the mocks.
response = MagicMock(spec=urllib.request.addinfourl)
response.getcode.return_value = 200
mock_urlopen = MagicMock(return_value=response)
# Path urlopen so that we do not make an http request.
monkeypatch.setattr('urllib.request.urlopen', mock_urlopen)
waitForURL('http://exmple.com/foo/bar')
assert mock_urlopen.call_count == 1
assert response.getcode.called
| 10,985
|
def cluster_list_node(realm, id):
""" this function add a cluster node """
cluster = Cluster(ES)
account = Account(ES)
account_email = json.loads(request.cookies.get('account'))["email"]
if account.is_active_realm_member(account_email, realm):
return Response(json.dumps(cluster.list_nodes(realm, id)))
else:
return Response({"failure": "account identifier and realm is not an active match"})
| 10,986
|
def validate_code(code):
"""
Ensure that the code provided follows python_variable_naming_syntax.
"""
regex = "^[a-z_][a-z0-9_]+$"
if not re.search(regex, code):
raise ValidationError("code must be in 'python_variable_naming_syntax'")
| 10,987
|
def compute_features_for_audio_file(audio_file):
"""
Parameters
----------
audio_file: str
Path to the audio file.
Returns
-------
features: dict
Dictionary of audio features.
"""
# Load Audio
logging.info("Loading audio file %s" % os.path.basename(audio_file))
audio, sr = librosa.load(audio_file, sr=msaf.Anal.sample_rate)
# Compute harmonic-percussive source separation
logging.info("Computing Harmonic Percussive source separation...")
y_harmonic, y_percussive = librosa.effects.hpss(audio)
# Output features dict
features = {}
# Compute framesync features
features["mfcc"], features["hpcp"], features["tonnetz"], \
features["cqt"], features["gmt"] = compute_features(audio, y_harmonic)
# Estimate Beats
features["beats_idx"], features["beats"] = compute_beats(
y_percussive, sr=msaf.Anal.sample_rate)
# Compute Beat-sync features
features["bs_mfcc"], features["bs_hpcp"], features["bs_tonnetz"], \
features["bs_cqt"], features["bs_gmt"] = compute_beat_sync_features(features,
features["beats_idx"])
# Analysis parameters
features["anal"] = {}
features["anal"]["frame_rate"] = msaf.Anal.frame_size
features["anal"]["hop_size"] = msaf.Anal.hop_size
features["anal"]["mfcc_coeff"] = msaf.Anal.mfcc_coeff
features["anal"]["sample_rate"] = msaf.Anal.sample_rate
features["anal"]["window_type"] = msaf.Anal.window_type
features["anal"]["n_mels"] = msaf.Anal.n_mels
features["anal"]["dur"] = audio.shape[0] / float(msaf.Anal.sample_rate)
return features
| 10,988
|
def complete_json(input_data, ref_keys='minimal', input_root=None,
output_fname=None, output_root=None):
"""
Parameters
----------
input_data : str or os.PathLike or list-of-dict
Filepath to JSON with data or list of dictionaries with information
about annotations
ref_keys : {'minimal', 'info'}, optional
Which reference keys to check in `input_data`. Default: 'minimal'
input_root : str, optional
If `input_data` is a filename the key in the file containing data about
annotations. If not specified will be based on provided `ref_keys`.
Default: None
output_fname : str or os.PathLike, optional
Filepath where complete JSON should be saved. If not specified the
data are not saved to disk. Default: None
output_root : str, optional
If `output_fname` is not None, the key in the saved JSON where
completed information should be stored. If not specified will be based
on `input_root`. Default: None
Returns
-------
output : list-of-dict
Information about annotations from `input_data`
"""
valid_keys = ['minimal', 'info']
if ref_keys not in valid_keys:
raise ValueError(f'Invalid ref_keys: {ref_keys}. Must be one of '
f'{valid_keys}')
# this is to add missing fields to existing data
# could accept data dict list or filename as input
# set minimal vs info
if ref_keys == 'minimal':
ref_keys = MINIMAL_KEYS
if input_root is None:
input_root = 'annotations'
elif ref_keys == 'info':
ref_keys = INFO_KEYS
if input_root is None:
input_root = 'info'
# check input
if not isinstance(input_data, list):
input_data = parse_json(input_data, root=input_root)
# make output
output = []
for item in input_data:
output.append({
key: (item[key] if key in item else None)
for key in ref_keys
})
# write output
if output_fname is not None:
if output_root is None:
output_root = input_root
write_json(output, output_fname, root=output_root)
return output
| 10,989
|
def BuildSystem(input_dir, info_dict, block_list=None):
"""Build the (sparse) system image and return the name of a temp
file containing it."""
return CreateImage(input_dir, info_dict, "system", block_list=block_list)
| 10,990
|
def add_upgrades(ws, cols, lnth):
"""
"""
for col in cols:
cell = "{}1".format(col)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[:2]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "='New_4G_Sites'!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
part1 = "=IFERROR(IF(Towers_non_4G_MNO!{}>0,IF(Towers_non_4G_MNO!{}>".format(cell,cell)
part2 = "New_4G_Sites!{},New_4G_Sites!{},New_4G_Sites!{}-Towers_non_4G_MNO!{}),0),0)".format(cell,cell,cell,cell)
ws[cell] = part1 + part2 #+ part3 + part4
columns = ['C','D','E','F','G','H','I','J','K','L']
ws = format_numbers(ws, columns, (1, 200), 'Comma [0]', 0)
set_border(ws, 'A1:L{}'.format(lnth-1), "thin", "000000")
return ws
| 10,991
|
def vae_loss(recon_x, x, mu, logvar, reduction="mean"):
"""
Effects
-------
Reconstruction + KL divergence losses summed over all elements and batch
See Appendix B from VAE paper:
Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
https://arxiv.org/abs/1312.6114
"""
BCE = F.binary_cross_entropy(recon_x, x, reduction=reduction)
# 0.5 * mean(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.mean(1 + logvar - mu.pow(2) - logvar.exp())
return BCE, KLD
| 10,992
|
def assert_balance(from_channel, balance, locked):
""" Assert the from_channel overall token values. """
assert balance >= 0
assert locked >= 0
distributable = balance - locked
channel_distributable = channel.get_distributable(
from_channel.our_state,
from_channel.partner_state,
)
assert channel.get_balance(from_channel.our_state, from_channel.partner_state) == balance
assert channel_distributable == distributable
assert channel.get_amount_locked(from_channel.our_state) == locked
amount_locked = channel.get_amount_locked(from_channel.our_state)
assert balance == amount_locked + distributable
| 10,993
|
def test_driver_update_fails_with_invalid_id():
"""
Tests updating a record fails if the record id is not found.
"""
with sqlite3.connect("index.sq3") as conn:
driver = SQLAlchemyIndexDriver("sqlite:///index.sq3")
did = str(uuid.uuid4())
baseid = str(uuid.uuid4())
rev = str(uuid.uuid4())[:8]
form = "object"
conn.execute(
"""
INSERT INTO index_record(did, baseid, rev, form, size) VALUES (?,?,?,?,?)
""",
(did, baseid, rev, form, None),
)
conn.commit()
with pytest.raises(NoRecordFound):
driver.update("some_record_that_does_not_exist", "some_record_version", rev)
| 10,994
|
def Or(*args):
"""Defines the three valued ``Or`` behaviour for a 2-tuple of
three valued logic values"""
def reduce_or(cmp_intervala, cmp_intervalb):
if cmp_intervala[0] is True or cmp_intervalb[0] is True:
first = True
elif cmp_intervala[0] is None or cmp_intervalb[0] is None:
first = None
else:
first = False
if cmp_intervala[1] is True or cmp_intervalb[1] is True:
second = True
elif cmp_intervala[1] is None or cmp_intervalb[1] is None:
second = None
else:
second = False
return (first, second)
return reduce(reduce_or, args)
| 10,995
|
def create_task():
"""Create a new task"""
data = request.get_json()
# In advanced solution, a generic validation should be done
if (TaskValidator._validate_title(data)):
TaskPersistence.create(title=data['title'])
return {'success': True, 'message': 'Task has been saved'}
# Simple error response
return {'error': 'bad request', 'message': 'not valid data', 'status': 400}
| 10,996
|
def test_is_python_version_newer_than_3_6():
"""Test to check python version is 3.7 or later"""
assert util.is_python_version_newer_than_3_6() is not None
| 10,997
|
def move(timeout=10):
"""Send mouse X & Y reported data from the Pinnacle touch controller
until there's no input for a period of ``timeout`` seconds."""
if mouse is None:
raise OSError("mouse HID device not available.")
start = time.monotonic()
while time.monotonic() - start < timeout:
data = tpad.report() # only returns fresh data (if any)
if data: # is there fresh data?
mouse.send_report(data) # no scrolling or backward/forward
start = time.monotonic()
mouse.send_report(b'\x00' * 4)
| 10,998
|
def get_socket_with_reuseaddr() -> socket.socket:
"""Returns a new socket with `SO_REUSEADDR` option on, so an address
can be reused immediately, without waiting for TIME_WAIT socket
state to finish.
On Windows, `SO_EXCLUSIVEADDRUSE` is used instead.
This is because `SO_REUSEADDR` on this platform allows the socket
to be bound to an address that is already bound by another socket,
without requiring the other socket to have this option on as well.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if 'SO_EXCLUSIVEADDRUSE' in dir(socket):
sock.setsockopt(socket.SOL_SOCKET,
getattr(socket, 'SO_EXCLUSIVEADDRUSE'), 1)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return sock
| 10,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.