hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
21f6044c6d47b23589ffcc79568157764ee5680f | 20,359 | py | Python | scripts/parse_cluster_realign.py | maojanlin/gAIRRsuite | 2c7b586624a8ec3f7321da787729bf5435b219d3 | [
"MIT"
] | 3 | 2021-05-20T06:05:40.000Z | 2022-03-07T03:05:30.000Z | scripts/parse_cluster_realign.py | maojanlin/gAIRRsuite | 2c7b586624a8ec3f7321da787729bf5435b219d3 | [
"MIT"
] | null | null | null | scripts/parse_cluster_realign.py | maojanlin/gAIRRsuite | 2c7b586624a8ec3f7321da787729bf5435b219d3 | [
"MIT"
] | 1 | 2022-03-04T02:29:27.000Z | 2022-03-04T02:29:27.000Z | import argparse
import pickle
import os
import numpy as np
#from parse_contig_realign import mark_edit_region, variant_link_graph, haplotyping_link_graph, output_contig_correction
from parse_contig_realign import variant_link_graph, output_contig_correction, parse_CIGAR, parse_MD, trim_dict, find_double_pos, get_farthest_ext
from utils import get_reverse_complement
import sys
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-fs', '--fn_sam',
help = 'sam file of reads realign to contig'
)
parser.add_argument(
'-fc', '--fn_cluster_contig',
help = 'cropped contig file, corrected or not'
)
parser.add_argument(
'-for', '--fo_report',
help = 'output report file'
)
parser.add_argument(
'-foc', '--fo_corrected_alleles',
help = 'output corrected alleles fasta file'
)
args = parser.parse_args()
return args
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def cluster_separate(fn_cluster_contig, fn_sam):
# dict_contig {}
# - keys: contig_name
# - values: [edit_histogram, cover_histogram, contig_SEQ, list_read_field[]]
dict_contig = {}
# dict_contig's initialization
with open(fn_cluster_contig, 'r') as f_c:
contig_name = ""
contig_SEQ = ""
for line in f_c:
if line[0] == '>':
if contig_name != "":
dict_contig[contig_name] = [np.zeros(len(contig_SEQ) + 1), np.zeros(len(contig_SEQ) + 1), contig_SEQ, []]
contig_name = line.strip()[1:].split()[0]
contig_SEQ = ""
else:
contig_SEQ += line.strip()
dict_contig[contig_name] = [np.zeros(len(contig_SEQ) + 1), np.zeros(len(contig_SEQ) + 1), contig_SEQ, []]
with open(fn_sam, 'r') as f_r:
read_name = ""
read_SEQ = ""
for line in f_r:
if line[0] != '@':
fields = line.split()
if fields[2] == '*':
continue
else:
contig_name = fields[2]
dict_contig[contig_name][3].append(fields)
return dict_contig
def mark_edit_region(contig_name, contig_info, ignore_S=False):
# contig_info = [edit_histogram, cov_histogram, contig_SEQ, list_read]
edit_histogram = contig_info[0]
cov_histogram = contig_info[1]
# list_read_info: [ (start_pos, end_pos, read_name, even_odd_flag, mis_region) ]
list_read_info = []
even_odd_flag = 1
list_read_field = contig_info[3]
for fields in list_read_field:
read_name = fields[0]
read_SEQ = fields[9]
cigar = fields[5]
sam_flag = int(fields[1])
# if the alignment is a supplementary alignment, pass, it does not matter the even odd
# read BWA manual "Supplementary Alignment" for more information
if sam_flag > 1024:
continue
S_flag = False
number, operate = parse_CIGAR(cigar)
if ignore_S and 'S' in cigar:
if operate[0] == 'S':
if number[0] >= len(read_SEQ)/15:
S_flag = True
if operate[-1] == 'S':
if number[-1] >= len(read_SEQ)/15:
S_flag = True
# if cigar == '*', means alignment is bad, pass
# if the read align to incorrect contigs, pass
if cigar == '*' or contig_name != fields[2] or S_flag:
# list_read_info.append((start_pos, end_pos, read_name, even_odd_flag, mis_region))
list_read_info.append((0, 0, read_name, even_odd_flag, [], "", read_SEQ))
if even_odd_flag == 1:
even_odd_flag = 2
else:
even_odd_flag = 1
continue
edit_dist = int(fields[11].split(':')[2]) # NM:i:2 tag
MD_tag = fields[12].split(':')[2] # MD:Z:38G2A20
start_pos = int(fields[3])
mis_region_MD = parse_MD(MD_tag)
mis_region_MD = [ele + start_pos - 1 for ele in mis_region_MD] # change to ref coordinate
mis_region_I = [] # insertion boundary region
diff_len = 0 # len contribution of D, I, and S
if 'I' in operate or 'D' in operate or 'S' in operate:
idx_I = start_pos - 1 # index in reference
for idx, op in enumerate(operate):
if op == 'I':
diff_len -= number[idx]
mis_region_I.append(idx_I)
mis_region_I.append(idx_I+1)
else:
if op == 'S':
diff_len -= number[idx]
else:
idx_I += number[idx]
if op == 'D':
diff_len += number[idx]
end_pos = start_pos + len(fields[9]) + diff_len
match_len = end_pos - start_pos
mis_region_S = []
recover_S_flag = False
if operate[0] == 'S':
left_S_len = min(number[0], start_pos-1)
if left_S_len < match_len/10: # if S len is not too long, we accept it as mismatch
mis_region_S = [pos for pos in range(start_pos-left_S_len,start_pos)]
start_pos -= left_S_len
operate[0] = 'M'
if left_S_len != number[0]:
operate = ['S'] + operate
number = [number[0]-left_S_len] + number
number[1] = left_S_len
recover_S_flag = True
if operate[-1] == 'S':
right_S_len = min(number[-1], len(cov_histogram)-end_pos)
if right_S_len < match_len/10: # if S len is not to long, we accept it as mismatch
mis_region_S += [pos for pos in range(end_pos,end_pos+right_S_len)]
end_pos += right_S_len
operate[-1] = 'M'
if right_S_len != number[-1]:
operate = operate + ['S']
number = number + [number[-1]-right_S_len]
number[-2] = right_S_len
recover_S_flag = True
if recover_S_flag:
cigar = ""
for cigar_id, element in enumerate(number):
cigar += str(element)
cigar += operate[cigar_id]
#print(read_name + '\t', start_pos, end_pos)
cov_histogram[start_pos:end_pos] += 1
mis_region = mis_region_MD + mis_region_I + mis_region_S
mis_region.sort()
edit_histogram[mis_region] += 1
# record the reads information
list_read_info.append((start_pos, end_pos, read_name, even_odd_flag, mis_region, cigar, read_SEQ))
if even_odd_flag == 1:
even_odd_flag = 2
else:
even_odd_flag = 1
return edit_histogram, cov_histogram, list_read_info
def haplotyping_link_graph(dict_link_graph, dict_var_weight, dict_link_outward, dict_link_inward, edit_region):
# sort the potential variants on the interested site, can only use these variants bases
list_pos_weight = []
print("Trimming the significant bases at interested site:")
print("Original site-base dict", dict_var_weight)
for key in sorted(dict_var_weight.keys()):
dict_part = dict_var_weight[key]
trim_dict(dict_part, 10)
list_pos_weight.append((key, sorted(dict_part.items(), key=lambda pair:pair[1], reverse=True)))
print("Final site-base list:", list_pos_weight)
eprint("#### max site-base variant #", max([len(ele[1]) for ele in list_pos_weight]))
if list_pos_weight == []:
print("There is no variant detected!")
return [], []
print("+++++++++++++++++++", "dict_link_graph", "+++++++++++++++++++")
for key in sorted(dict_link_graph.keys()):
print(key, dict_link_graph[key])
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
# initializing the haplotype list, the cursor, and the last_ext
haplotype_0 = [] # record the (position, base) pair of the haplotype
hap_cursor_0 = 0 # record the position got the linking information (still useless in this version)
break_flag_0 = False # the flag indicating of the haplotype is breaked
haplotype_1 = []
hap_cursor_1 = 0
break_flag_1 = False
pos_start_idx = 0
# find the first variant site with two variants
pos_start_idx, haplotype_0, haplotype_1, hap_cursor_0, hap_cursor_1 = find_double_pos(pos_start_idx, list_pos_weight, haplotype_0, haplotype_1, hap_cursor_0, hap_cursor_1)
# haplotyping from list_pos_weight:
for pos_idx in range(pos_start_idx, len(list_pos_weight)):
pos_weight = list_pos_weight[pos_idx]
position = pos_weight[0]
list_pos_base = pos_weight[1]
print("XXXXXXXXXXXXXX", position, "XXXXXXXXXXXXXXXX")
# deal with haplotype_0's outward lin
dict_outward_0 = {}
if dict_link_outward.get(haplotype_0[hap_cursor_0]):
dict_outward_0 = dict_link_outward[haplotype_0[hap_cursor_0]]
trim_dict(dict_outward_0)
if position > get_farthest_ext(dict_outward_0, haplotype_0[hap_cursor_0]):
break_flag_0 = True
eprint("Haplotype 0 has a break at", haplotype_0[hap_cursor_0], "to", position)
print(dict_outward_0)
# deal with haplotype_1's outward link
print("--------------------")
dict_outward_1 = {}
if dict_link_outward.get(haplotype_1[hap_cursor_1]):
dict_outward_1 = dict_link_outward[haplotype_1[hap_cursor_1]]
trim_dict(dict_outward_1)
if position > get_farthest_ext(dict_outward_1, haplotype_1[hap_cursor_1]):
break_flag_1 = True
eprint("Haplotype 1 has a break at", haplotype_1[hap_cursor_1], "to", position)
print(dict_outward_1)
# deal with position's inward link
print("--------------------")
dict_inward_0 = {}
if dict_link_inward.get((position, list_pos_base[0][0])):
dict_inward_0 = dict_link_inward[(position, list_pos_base[0][0])]
trim_dict(dict_inward_0)
print(dict_inward_0)
#print(dict_link_graph[(position, list_pos_base[1][0])])
if len(list_pos_base) > 1:
print("--------------------")
dict_inward_1 = {}
if dict_link_inward.get((position, list_pos_base[1][0])):
dict_inward_1 = dict_link_inward[(position, list_pos_base[1][0])]
trim_dict(dict_inward_1)
print(dict_inward_1)
connect_info_0 = None
connect_info_1 = None
# There must be at least one kind of base in the position
for (outward_key, weight) in sorted(dict_outward_0.items(), key=lambda pair:pair[1], reverse=True):
if dict_inward_0.get(outward_key):
print("Potential Connect: ", outward_key, 0, 0)
connect_info_0 = (dict_outward_0[outward_key], (position, outward_key[1][1]))
break
for (outward_key, weight) in sorted(dict_outward_1.items(), key=lambda pair:pair[1], reverse=True):
if dict_inward_0.get(outward_key):
print("Potential Connect: ", outward_key, 1, 0)
connect_info_1 = (dict_outward_1[outward_key], (position, outward_key[1][1]))
break
# if there are two variants in the position
if len(list_pos_base) > 1:
for (outward_key, weight) in sorted(dict_outward_0.items(), key=lambda pair:pair[1], reverse=True):
if dict_inward_1.get(outward_key):
print("Potential Connect: ", outward_key, 0, 1)
if connect_info_0 == None or connect_info_0[0] < weight:
connect_info_0 = (dict_outward_0[outward_key], (position, outward_key[1][1]))
break
for (outward_key, weight) in sorted(dict_outward_1.items(), key=lambda pair:pair[1], reverse=True):
if dict_inward_1.get(outward_key):
print("Potential Connect: ", outward_key, 1, 1)
if connect_info_1 == None or connect_info_1[0] < weight:
connect_info_1 = (dict_outward_1[outward_key], (position, outward_key[1][1]))
break
# the case that two haplotypes may collapse into one
if connect_info_0 and connect_info_1:
if connect_info_0[1] == connect_info_1[1]: # two haplotypes are collapsed
record_info_0 = [connect_info_0[1]]
record_info_1 = [connect_info_1[1]]
for redouble_idx in range(pos_idx, len(list_pos_weight)):
rd_pos_weight = list_pos_weight[redouble_idx]
rd_position = rd_pos_weight[0]
rd_list_pos_base = rd_pos_weight[1]
if(len(rd_list_pos_base)) >= 2: # if there are two variants at the site
# call the potential connections
last_info_0 = haplotype_0[hap_cursor_0]
last_info_1 = haplotype_1[hap_cursor_1]
dict_info_0 = dict_link_graph[last_info_0]
dict_info_1 = dict_link_graph[last_info_1]
# connect them
rd_info_0 = None
rd_info_1 = None
for rd_link_info, rd_weight in sorted(dict_info_0.items(), key=lambda pair:pair[1], reverse=True):
variant_flag = False
for info_pair in rd_link_info[1]:
tmp_rd_info = []
if info_pair == connect_info_0[1]:
variant_flag = True
tmp_rd_info.append(info_pair)
if variant_flag:
tmp_rd_info.append(info_pair)
if info_pair[0] == rd_position:
rd_info_0 = tmp_rd_info
break
if rd_info_0:
break
for rd_link_info, rd_weight in sorted(dict_info_1.items(), key=lambda pair:pair[1], reverse=True):
for info_pair in rd_link_info[1]:
tmp_rd_info = []
if info_pair == connect_info_1[1]:
variant_flag = True
tmp_rd_info.append(info_pair)
if variant_flag:
tmp_rd_info.append(info_pair)
if info_pair[0] == rd_position:
rd_info_1 = tmp_rd_info
break
if rd_info_1:
break
print("connect_info_0", record_info_0)
print("connect_info_1", record_info_1)
print("rd_info_0", rd_info_0)
print("rd_info_1", rd_info_1)
if rd_info_0:
record_info_0 += rd_info_0
if rd_info_1:
record_info_1 += rd_info_1
if rd_info_0 != rd_info_1:
if rd_info_0:
pass
else:
break_flag_0 = True
if rd_info_1:
pass
else:
break_flag_1 = True
break
haplotype_0 += record_info_0
hap_cursor_0 += len(record_info_0)
haplotype_1 += record_info_1
hap_cursor_1 += len(record_info_1)
print("Crossing the single base variant site...")
continue
# update the nodes if the connection is found
if connect_info_0:
haplotype_0.append(connect_info_0[1])
hap_cursor_0 += 1
if break_flag_1 and len(list_pos_base) >1:
for idx in range(2):
potential_base = list_pos_base[idx][0]
if potential_base != connect_info_0[1][1]:
eprint("Link rebuilt on Haplotype 1 at", haplotype_1[hap_cursor_1] , "to", position)
haplotype_1.append((position, potential_base))
hap_cursor_1 += 1
break_flag_1 = False
break
if connect_info_1:
haplotype_1.append(connect_info_1[1])
hap_cursor_1 += 1
if break_flag_0 and len(list_pos_base) >1:
for idx in range(2):
potential_base = list_pos_base[idx][0]
if potential_base != connect_info_1[1][1]:
eprint("Link rebuilt on Haplotype 0 at", haplotype_0[hap_cursor_0] , "to", position)
haplotype_0.append((position, potential_base))
hap_cursor_0 += 1
break_flag_0 = False
break
if break_flag_0 and break_flag_1:
eprint("BREAKING LINKS FOR BOTH HAPLOTYPE AT", position, "!!!!")
eprint("Breaking links cannot resloved, we guess...")
haplotype_0.append((position, list_pos_base[0][0]))
hap_cursor_0 += 1
if len(list_pos_base) > 1:
haplotype_1.append((position, list_pos_base[1][0]))
hap_cursor_1 += 1
print(haplotype_0)
print(haplotype_1)
return haplotype_0, haplotype_1
if __name__ == '__main__':
args = parse_args()
fn_sam = args.fn_sam
fn_cluster_contig = args.fn_cluster_contig
fo_report = args.fo_report
fo_corrected_alleles = args.fo_corrected_alleles
dict_contig = cluster_separate(fn_cluster_contig, fn_sam)
for contig_name, contig_info in sorted(dict_contig.items()):
#parse the sam file and generate
edit_histogram, cov_histogram, list_read_info = mark_edit_region(contig_name, contig_info)
#determine the region contains alternative flanking region
edit_region = []
for idx, ele in enumerate(edit_histogram):
print(str(idx) + ':\t' + str(cov_histogram[idx]) + '\t' + str(ele))
if ele > cov_histogram[idx]/4:
edit_region.append(idx)
print(contig_name, edit_region)
contig_SEQ = dict_contig[contig_name][2]
interest_region = "0-" + str(len(contig_SEQ))
interest_edit_region = edit_region
if interest_edit_region != [] and min(cov_histogram[1:]) > 20:
print("=========== allele correction ==============")
eprint("CORRECT", contig_name.split('|')[1], min(cov_histogram[1:]), interest_edit_region)
dict_link_graph, dict_var_weight, dict_link_outward, dict_link_inward = variant_link_graph(interest_edit_region, list_read_info)
haplotype_0, haplotype_1 = haplotyping_link_graph(dict_link_graph, dict_var_weight, dict_link_outward, dict_link_inward, interest_region)
#output_contig_correction(contig_SEQ, region_st, region_ed, haplotype_0, haplotype_1, contig_name, corrected_contig_output_file)
output_contig_correction(contig_SEQ, 0, len(contig_SEQ), haplotype_0, haplotype_1, contig_name, fo_corrected_alleles, "/novel")
elif interest_edit_region != []:
eprint("DDeficient", contig_name.split('|')[1], min(cov_histogram[1:]), interest_edit_region)
print("=== cov not efficient:", min(cov_histogram[1:]), "=======")
else:
eprint("No variant", contig_name.split('|')[1], min(cov_histogram[1:]), interest_edit_region)
print("============ No novel allele ===============")
| 45.545861 | 175 | 0.554055 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,447 | 0.169311 |
21f660305f9eb4a0474e78b1bbb3a65006f2c75f | 30,257 | py | Python | src/huntsman/pocs/observatory.py | Physarah/huntsman-pocs | df013f71f99a7da17d6b44915ebf6f301aba10fc | [
"MIT"
] | null | null | null | src/huntsman/pocs/observatory.py | Physarah/huntsman-pocs | df013f71f99a7da17d6b44915ebf6f301aba10fc | [
"MIT"
] | null | null | null | src/huntsman/pocs/observatory.py | Physarah/huntsman-pocs | df013f71f99a7da17d6b44915ebf6f301aba10fc | [
"MIT"
] | null | null | null | import time
from contextlib import suppress, contextmanager
from astropy import units as u
from panoptes.utils import error
from panoptes.utils.utils import get_quantity_value
from panoptes.utils.time import current_time, wait_for_events, CountdownTimer
from panoptes.pocs.observatory import Observatory
from panoptes.pocs.scheduler.observation.bias import BiasObservation
from huntsman.pocs.utils.logger import get_logger
from huntsman.pocs.guide.bisque import Guide
from huntsman.pocs.archive.utils import remove_empty_directories
from huntsman.pocs.scheduler.observation.dark import DarkObservation
from huntsman.pocs.utils.flats import make_flat_field_sequences, make_flat_field_observation
from huntsman.pocs.utils.flats import get_cameras_with_filter
from huntsman.pocs.utils.safety import get_solar_altaz
from huntsman.pocs.camera.group import CameraGroup, dispatch_parallel
from huntsman.pocs.error import NotTwilightError
class HuntsmanObservatory(Observatory):
def __init__(self, with_autoguider=True, hdr_mode=False, take_flats=True, logger=None,
*args, **kwargs):
"""Huntsman POCS Observatory
Args:
with_autoguider (bool, optional): If autoguider is attached, defaults to True.
hdr_mode (bool, optional): If pics should be taken in HDR mode, defaults to False.
take_flats (bool, optional): If flat field images should be taken, defaults to True.
logger (logger, optional): The logger instance. If not provided, use default Huntsman
logger.
*args: Parsed to Observatory init function.
**kwargs: Parsed to Observatory init function.
"""
if not logger:
logger = get_logger()
super().__init__(logger=logger, *args, **kwargs)
# Make a camera group
self.camera_group = CameraGroup(self.cameras)
self._has_hdr_mode = hdr_mode
self._has_autoguider = with_autoguider
self.flat_fields_required = take_flats
# Focusing
self.last_coarse_focus_time = None
self.last_coarse_focus_temp = None
self._coarse_focus_interval = self.get_config('focusing.coarse.interval_hours', 1) * u.hour
self._coarse_focus_filter = self.get_config('focusing.coarse.filter_name')
self._coarse_focus_temptol = self.get_config('focusing.coarse.temp_tol_deg', 5) * u.Celsius
self.last_fine_focus_time = None
self.last_fine_focus_temp = None
self._fine_focus_interval = self.get_config('focusing.fine.interval_hours', 1) * u.hour
self._fine_focus_temptol = self.get_config('focusing.fine.temp_tol_deg', 5) * u.Celsius
if self.has_autoguider:
self.logger.info("Setting up autoguider")
try:
self._create_autoguider()
except Exception as e:
self._has_autoguider = False
self.logger.warning(f"Problem setting autoguider, continuing without: {e!r}")
# Hack solution to the observatory not knowing whether it is safe or not
# This can be overridden when creating the HuntsmanPOCS instance
self._is_safe = None
# Properties
@property
def has_hdr_mode(self):
""" Does camera support HDR mode
Returns:
bool: HDR enabled, default False
"""
return self._has_hdr_mode
@property
def has_autoguider(self):
""" Does camera have attached autoguider
Returns:
bool: True if has autoguider
"""
return self._has_autoguider
@property
def coarse_focus_required(self):
""" Return True if we should do a coarse focus. """
return self._focus_required(coarse=True)
@property
def fine_focus_required(self):
""" Return True if we should do a fine focus. """
return self._focus_required()
@property
def is_past_midnight(self):
"""Check if it's morning, useful for going into either morning or evening flats."""
# Get the time of the nearest midnight to now
# If the nearest midnight is in the past, it's the morning
midnight = self.observer.midnight(current_time(), which='nearest')
return midnight < current_time()
@property
def is_twilight(self):
""" Return True if it is twilight, else False. """
return self.is_dark(horizon="twilight_max") and not self.is_dark(horizon="twilight_min")
@property
def temperature(self):
""" Return the ambient temperature. """
temp = None
try:
reading = self.db.get_current("weather")["data"]["ambient_temp_C"]
temp = get_quantity_value(reading, u.Celsius) * u.Celsius
except (KeyError, TypeError) as err:
self.logger.warning(f"Unable to determine temperature: {err!r}")
return temp
@property
def solar_altaz(self):
""" Return the current solar alt az. """
return get_solar_altaz(location=self.earth_location, time=current_time())
# Context managers
@contextmanager
def safety_checking(self, *args, **kwargs):
""" Check safety before and after the code block.
To be used with a "with" statement, e.g.:
with self.safety_checking():
print(x)
Args:
*args, **kwargs: Parsed to self._assert_safe
Raises:
RuntimeError: If not safe.
"""
self._assert_safe(*args, **kwargs)
try:
yield None
finally:
self._assert_safe(*args, **kwargs)
# Methods
def initialize(self):
"""Initialize the observatory and connected hardware """
super().initialize()
if self.has_autoguider:
self.logger.debug("Connecting to autoguider")
self.autoguider.connect()
def is_safe(self, park_if_not_safe=False, *args, **kwargs):
""" Return True if it is safe, else False.
Args:
*args, **kwargs: Parsed to self._is_safe. See panoptes.pocs.core.POCS.is_safe.
park_if_not_safe (bool): If True, park if safety fails. Default: False.
Returns:
bool: True if safe, else False.
"""
if self._is_safe is not None:
return self._is_safe(park_if_not_safe=park_if_not_safe, *args, **kwargs)
self.logger.warning("Safety function not set. Returning False")
return False
def remove_camera(self, cam_name):
""" Remove a camera from the observatory.
Args:
cam_name (str): The name of the camera to remove.
"""
super().remove_camera(cam_name)
with suppress(KeyError):
del self.camera_group.cameras[cam_name]
def autofocus_cameras(self, coarse=False, filter_name=None, default_timeout=900,
blocking=True, **kwargs):
""" Override autofocus_cameras to update the last focus time and move filterwheels.
Args:
coarse (bool, optional): Perform coarse focus? Default False.
filter_name (str, optional): The filter name to focus with. If None (default), will
attempt to get from config, by default using the coarse focus filter.
*args, **kwargs: Parsed to `pocs.observatory.Observatory.autofocus_cameras`.
Returns:
threading.Event: The autofocus event.
"""
focus_type = "coarse" if coarse else "fine"
# Choose the filter to focus with
# TODO: Move this logic to the camera level
if filter_name is None:
if coarse:
filter_name = self._coarse_focus_filter
else:
try:
filter_name = self.current_observation.filter_name
except AttributeError:
filter_name = self._coarse_focus_filter
self.logger.warning("Unable to retrieve filter name from current observation."
f" Defaulting to coarse focus filter ({filter_name}).")
# Asyncronously dispatch autofocus calls
with self.safety_checking(horizon="focus"):
events = self.camera_group.autofocus(coarse=coarse, filter_name=filter_name, **kwargs)
# Wait for sequences to finish
if blocking:
timeout = self.get_config(f"focusing.{focus_type}.timeout", default_timeout)
if not wait_for_events(list(events.values()), timeout=timeout):
raise error.Timeout(f"Timeout of {timeout} reached while waiting for fine focus.")
# Update last focus time
setattr(self, f"last_{focus_type}_focus_time", current_time())
# Update last focus temperature
setattr(self, f"last_{focus_type}_focus_temp", self.temperature)
return events
def cleanup_observations(self, *args, **kwargs):
""" Override method to remove empty directories. Called in housekeeping state."""
super().cleanup_observations(*args, **kwargs)
self.logger.info("Removing empty directories in images directory.")
images_dir = self.get_config("directories.images")
remove_empty_directories(images_dir)
self.logger.info("Removing empty directories in archive directory.")
archive_dir = self.get_config("directories.archive")
remove_empty_directories(archive_dir)
def take_flat_fields(self, cameras=None, **kwargs):
""" Take flat fields for each camera in each filter, respecting filter order.
Args:
cameras (dict): Dict of cam_name: camera pairs. If None (default), use all cameras.
**kwargs: Overrides config entries under `calibs.flat`.
"""
if cameras is None:
cameras = self.cameras
# Load the flat field config, allowing overrides from kwargs
flat_config = self.get_config('calibs.flat', default=dict())
flat_config.update(kwargs)
# Specify filter order
filter_order = flat_config['filter_order'].copy()
if self.is_past_midnight: # If it's the morning, order is reversed
filter_order.reverse()
# Take flat fields in each filter
for filter_name in filter_order:
if not (self.is_safe(horizon="twilight_max") and self.is_twilight):
raise RuntimeError("Not safe for twilight flats. Aborting.")
# Get a dict of cameras that have this filter
cameras_with_filter = get_cameras_with_filter(cameras, filter_name)
# Go to next filter if there are no cameras with this one
if not cameras_with_filter:
self.logger.warning(f'No cameras found with {filter_name} filter.')
continue
# Get the flat field observation
observation = make_flat_field_observation(self.earth_location, filter_name=filter_name)
observation.seq_time = current_time(flatten=True)
# Take the flats for each camera in this filter
self.logger.info(f'Taking flat fields in {filter_name} filter.')
autoflat_config = flat_config.get("autoflats", {})
try:
self._take_autoflats(cameras_with_filter, observation, **autoflat_config)
# Break out of loop if no longer twilight
# Catch the error so the state machine keeps running
except NotTwilightError as err:
self.logger.warning(f"{err!r}")
break
self.logger.info('Finished flat-fielding.')
def prepare_cameras(self, drop=True, *args, **kwargs):
""" Make sure cameras are all cooled and ready.
Args:
drop (bool): If True, drop cameras that do not become ready in time. Default: True.
*args, **kwargs: Parsed to self.camera_group.wait_until_ready.
"""
self.logger.info(f"Preparing {len(self.cameras)} cameras.")
failed_cameras = self.camera_group.wait_until_ready(*args, **kwargs)
# Remove cameras that didn't become ready in time
if drop:
for cam_name in failed_cameras:
self.logger.debug(f'Removing {cam_name} from {self} for not being ready.')
self.remove_camera(cam_name)
def take_observation_block(self, observation, cameras=None, timeout=60 * u.second,
remove_on_error=False, do_focus=True, safety_kwargs=None,
do_slew=True):
""" Macro function to take an observation block.
This function will perform:
- slewing (when necessary)
- fine focusing (when necessary)
- observation exposures
- safety checking
Args:
observation (Observation): The observation object.
cameras (dict, optional): Dict of cam_name: camera pairs. If None (default), use all
cameras.
timeout (float, optional): The timeout in addition to the exposure time. Default 60s.
remove_on_error (bool, default False): If True, remove cameras that timeout. If False,
raise a TimeoutError instead.
do_slew (bool, optional): If True, do not attempt to slew the telescope. Default
False.
**safety_kwargs (dict, optional): Extra kwargs to be parsed to safety function.
Raises:
RuntimeError: If safety check fails.
"""
if cameras is None:
cameras = self.cameras
safety_kwargs = {} if safety_kwargs is None else safety_kwargs
self._assert_safe(**safety_kwargs)
# Set the sequence time of the observation
if observation.seq_time is None:
observation.seq_time = current_time(flatten=True)
headers = self.get_standard_headers(observation=observation)
# Take the observation block
self.logger.info(f"Starting observation block for {observation}")
# The start new set flag is True before we enter the loop and is set to False
# immediately inside the loop. This allows the loop to start a new set in case
# the set_is_finished condition is already satisfied.
start_new_set = True
current_field = None
while (start_new_set or not observation.set_is_finished):
start_new_set = False # We don't want to start another set after this one
# Perform the slew if necessary
slew_required = (current_field != observation.field) and do_slew
if slew_required:
with self.safety_checking(**safety_kwargs):
self.slew_to_observation(observation)
current_field = observation.field
# Fine focus the cameras if necessary
focus_required = self.fine_focus_required or observation.current_exp_num == 0
if do_focus and focus_required:
with self.safety_checking(**safety_kwargs):
self.autofocus_cameras(blocking=True, filter_name=observation.filter_name)
# Set a common start time for this batch of exposures
headers['start_time'] = current_time(flatten=True)
# Start the exposures and get events
with self.safety_checking(**safety_kwargs):
events = self.camera_group.take_observation(observation, headers=headers)
# Wait for the exposures (blocking)
# TODO: Use same timeout as camera client
try:
self._wait_for_camera_events(events, duration=observation.exptime + timeout,
remove_on_error=remove_on_error, **safety_kwargs)
except error.Timeout as err:
self.logger.error(f"{err!r}")
self.logger.warning("Continuing with observation block after error.")
# Explicitly mark the observation as complete
with suppress(AttributeError):
observation.mark_exposure_complete()
self.logger.info(f"Observation status: {observation.status}")
def take_dark_observation(self, bias=False, **kwargs):
""" Take a bias observation block on each camera (blocking).
Args:
bias (bool, optional): If True, take Bias observation instead of dark observation.
Default: False.
**kwargs: Parsed to `self.take_observation_block`.
"""
# Move telescope to park position
if not self.mount.is_parked:
self.logger.info("Moving telescope to park position for dark observation.")
self.mount.park()
# Create the observation
# Keep the mount where it is since we are just taking darks
position = self.mount.get_current_coordinates()
ObsClass = BiasObservation if bias else DarkObservation
observation = ObsClass(position=position)
# Dark observations don't care if it's dark or not
safety_kwargs = {"ignore": ["is_dark"]}
# Can ignore weather safety if dome is closed
with suppress(AttributeError):
if self.dome.is_closed:
self.logger.warning(f"Ignoring weather safety for {observation}.")
safety_kwargs["ignore"].append("good_weather")
# Take the observation (blocking)
self.take_observation_block(observation, do_focus=False, do_slew=False,
safety_kwargs=safety_kwargs, **kwargs)
def slew_to_observation(self, observation, min_solar_alt=10 * u.deg):
""" Slew to the observation field coordinates.
Args:
observation (Observation): The observation object.
min_solar_alt (astropy.Quantity, optional): The minimum solar altitude above which the
FWs will be moved to their dark positions before slewing.
"""
self.logger.info(f"Slewing to target coordinates for {observation}.")
if not self.mount.set_target_coordinates(observation.field.coord):
raise RuntimeError(f"Unable to set target coordinates for {observation.field}.")
# Move FWs to dark pos if Sun too high to minimise damage potential
move_fws = self.solar_altaz.alt > get_quantity_value(min_solar_alt, u.deg) * u.deg
if move_fws:
self.logger.warning("Solar altitude above minimum for safe slew. Moving FWs to dark"
" positions.")
# Record curent positions so we can put them back after slew
# NOTE: These positions could include the dark position so can't use last_light_position
current_fw_positions = {}
for cam_name, cam in self.cameras.items():
if cam.has_filterwheel:
current_fw_positions[cam_name] = cam.filterwheel.current_filter
self.camera_group.filterwheel_move_to(current_fw_positions)
self.mount.slew_to_target()
if move_fws:
self.logger.info("Moving FWs back to last positions.")
self.camera_group.filterwheel_move_to(current_fw_positions)
# Private methods
def _create_autoguider(self):
guider_config = self.get_config('guider')
guider = Guide(**guider_config)
self.autoguider = guider
def _take_autoflats(
self, cameras, observation, target_scaling=0.17, scaling_tolerance=0.05, timeout=60,
bias=32, remove_on_error=False, sleep_time=300, evening_initial_flat_exptime=0.01,
morning_initial_flat_exptime=1, **kwargs):
""" Take flat fields using automatic updates for exposure times.
Args:
cameras (dict): Dict of camera name: Camera pairs.
observation: The flat field observation. TODO: Integrate with FlatFieldSequence.
target_scaling (float, optional): Required to be between [0, 1] so
target_adu is proportionally between 0 and digital saturation level.
Default: 0.17.
scaling_tolerance (float, optional): The minimum precision on the average counts
required to keep the exposure, expressed as a fraction of the dynamic range.
Default: 0.05.
timeout (float): The timeout on top of the exposure time, default 60s.
bias (int): The bias to subtract from the frames. TODO: Use a real bias image!
remove_on_error (bool, default False): If True, remove cameras that timeout. If False,
raise a TimeoutError instead.
**kwargs: Parsed to FlatFieldSequence.
"""
# set the initial exposure time
if self.is_past_midnight:
initial_exptime = morning_initial_flat_exptime
else:
initial_exptime = evening_initial_flat_exptime
# Create a flat field sequence for each camera
sequences = make_flat_field_sequences(cameras, target_scaling, scaling_tolerance,
bias, initial_exposure_time=initial_exptime, **kwargs)
# Loop until sequence has finished
self.logger.info(f"Starting flat field sequence for {len(self.cameras)} cameras.")
while True:
if not self.is_twilight:
raise NotTwilightError("No longer twilight. Aborting flat fields.")
# Slew to field
with self.safety_checking(horizon="twilight_max"):
self.slew_to_observation(observation)
# Get standard fits headers
headers = self.get_standard_headers(observation=observation)
events = {}
exptimes = {}
filenames = {}
start_times = {}
# Define function to start the exposures
def func(cam_name):
seq = sequences[cam_name]
camera = cameras[cam_name]
# Get exposure time, filename and current time
exptimes[cam_name] = seq.get_next_exptime(past_midnight=self.is_past_midnight)
filenames[cam_name] = observation.get_exposure_filename(camera)
start_times[cam_name] = current_time()
try:
events[cam_name] = camera.take_observation(
observation, headers=headers, filename=filenames[cam_name],
exptime=exptimes[cam_name])
except error.PanError as err:
self.logger.error(f"{err!r}")
self.logger.warning("Continuing with flat observation after error.")
# Start the exposures in parallel
dispatch_parallel(func, list(cameras.keys()))
# Wait for the exposures
self.logger.info('Waiting for flat field exposures to complete.')
duration = get_quantity_value(max(exptimes.values()), u.second) + timeout
try:
self._wait_for_camera_events(events, duration, remove_on_error=remove_on_error,
horizon="twilight_max")
except error.Timeout as err:
self.logger.error(f"{err!r}")
self.logger.warning("Continuing with flat observation after timeout error.")
# Mark the current exposure as complete
observation.mark_exposure_complete()
# Update the flat field sequences with new data
for cam_name in list(sequences.keys()):
# Remove sequence for any removed cameras
if cam_name not in self.cameras:
del sequences[cam_name]
continue
# Attempt to update the exposure sequence for this camera.
# If the exposure failed, use info from the last successful exposure.
try:
sequences[cam_name].update(filename=filenames[cam_name],
exptime=exptimes[cam_name],
time_start=start_times[cam_name])
except (KeyError, FileNotFoundError) as err:
self.logger.warning(f"Unable to update flat field sequence for {cam_name}:"
f" {err!r}")
# Log sequence status
status = sequences[cam_name].status
status["filter_name"] = observation.filter_name
self.logger.info(f"Flat field status for {cam_name}: {status}")
# Check if sequences are complete
if all([s.is_finished for s in sequences.values()]):
self.logger.info("All flat field sequences finished.")
break
# Check if counts are ok
if self.is_past_midnight:
# Terminate if Sun is coming up and all exposures are too bright
if all([s.min_exptime_reached for s in sequences.values()]):
self.logger.info(f"Terminating flat sequence for {observation.filter_name}"
f" filter because min exposure time reached.")
break
# Wait if Sun is coming up and all exposures are too faint
elif all([s.max_exptime_reached for s in sequences.values()]):
self.logger.info(f"All exposures are too faint. Waiting for {sleep_time}s")
self._safe_sleep(sleep_time, horizon="twilight_max")
else:
# Terminate if Sun is going down and all exposures are too faint
if all([s.max_exptime_reached for s in sequences.values()]):
self.logger.info(f"Terminating flat sequence for {observation.filter_name}"
f" filter because max exposure time reached.")
break
# Wait if Sun is going down and all exposures are too bright
elif all([s.max_exptime_reached for s in sequences.values()]):
self.logger.info(f"All exposures are too bright. Waiting for {sleep_time}s")
self._safe_sleep(sleep_time, horizon="twilight_max")
def _wait_for_camera_events(self, events, duration, remove_on_error=False, sleep=1, **kwargs):
""" Wait for camera events to be set.
Args:
events (dict of camera_name: threading.Event): The events to wait for.
duration (float): The total amount of time to wait for (should include exptime).
remove_on_error (bool, default False): If True, remove cameras that timeout. If False,
raise a TimeoutError instead.
sleep (float): Sleep this long between event checks. Default 1s.
**kwargs: Parsed to self._assert_safe.
"""
self.logger.debug(f'Waiting for {len(events)} events with timeout of {duration}.')
timer = CountdownTimer(duration)
while not timer.expired():
# Check safety here
self._assert_safe(**kwargs)
# Check if all cameras have finished
if all([e.is_set() for e in events.values()]):
break
time.sleep(sleep)
# Make sure events are set
for cam_name, event in events.items():
if not event.is_set():
if remove_on_error:
self.logger.warning(f"Timeout while waiting for camera event on {cam_name}. "
"Removing from observatory.")
self.remove_camera(cam_name)
else:
raise error.Timeout(f"Timeout while waiting for camera event on {cam_name}.")
def _focus_required(self, coarse=False):
""" Check if a focus is required based on current conditions.
Args:
coarse (bool): If True, check if we need to do a coarse focus. Default: False.
Returns:
bool: True if focus required, else False.
"""
focus_type = "coarse" if coarse else "fine"
# If a long time period has passed then focus again
last_focus_time = getattr(self, f"last_{focus_type}_focus_time")
interval = getattr(self, f"_{focus_type}_focus_interval")
if last_focus_time is None: # If we haven't focused yet
self.logger.info(f"{focus_type} focus required because we haven't focused yet.")
return True
if current_time() - last_focus_time > interval:
self.logger.info(f"{focus_type} focus required because of time difference.")
return True
# If there has been a large change in temperature then we need to focus again
last_focus_temp = getattr(self, f"last_{focus_type}_focus_temp")
temptol = getattr(self, f"_{focus_type}_focus_temptol")
if (last_focus_temp is not None) and (self.temperature is not None):
if abs(last_focus_temp - self.temperature) > temptol:
self.logger.info(f"{focus_type} focus required because of temperature change.")
return True
return False
def _assert_safe(self, *args, **kwargs):
""" Raise a RuntimeError if not safe to continue.
TODO: Raise a custom error type indicating lack of safety.
Args:
*args, **kwargs: Parsed to self.is_safe.
"""
if not self.is_safe(*args, **kwargs):
raise RuntimeError("Safety check failed!")
def _safe_sleep(self, duration, interval=1, *args, **kwargs):
""" Sleep for a specified amount of time while ensuring safety.
A RuntimeError is raised if safety fails while waiting.
Args:
duration (float or Quantity): The time to wait.
interval (float): The time in between safety checks.
*args, **kwargs: Parsed to is_safe.
Raises:
RuntimeError: If safety fails while waiting.
"""
self.logger.debug(f"Safe sleeping for {duration}")
timer = CountdownTimer(duration)
while not timer.expired():
self._assert_safe(*args, **kwargs)
time.sleep(interval)
| 44.300146 | 100 | 0.623228 | 29,320 | 0.969032 | 498 | 0.016459 | 2,330 | 0.077007 | 0 | 0 | 13,188 | 0.435866 |
21f6ac953977adb43d4446f137f14e3d19478056 | 285 | py | Python | dist/urls.py | tfmt/netboot | abd690d463dfd14488b0d295512d61ce5c5bc97d | [
"Apache-2.0"
] | null | null | null | dist/urls.py | tfmt/netboot | abd690d463dfd14488b0d295512d61ce5c5bc97d | [
"Apache-2.0"
] | null | null | null | dist/urls.py | tfmt/netboot | abd690d463dfd14488b0d295512d61ce5c5bc97d | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from dist import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^add$', views.AddCategoryView.as_view(), name='add_category'),
url(r'^(?P<cat_id>\d+)/$', views.CategoryView.as_view(), name='category'),
]
| 28.5 | 78 | 0.663158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.22807 |
21f7222a7e651ad58c05783910d629bf0a7924e4 | 3,980 | py | Python | src/tensorforce/tensorforce/tests/test_optimizers.py | linus87/drl_shape_optimization | 39e6b66bd5b70dfce07e145aafe815071bc1b6fe | [
"MIT"
] | 17 | 2020-12-28T16:25:47.000Z | 2022-03-27T18:28:44.000Z | src/tensorforce/tensorforce/tests/test_optimizers.py | linus87/drl_shape_optimization | 39e6b66bd5b70dfce07e145aafe815071bc1b6fe | [
"MIT"
] | 2 | 2021-04-18T03:40:02.000Z | 2022-01-24T08:40:10.000Z | src/tensorforce/tensorforce/tests/test_optimizers.py | linus87/drl_shape_optimization | 39e6b66bd5b70dfce07e145aafe815071bc1b6fe | [
"MIT"
] | 8 | 2020-12-23T05:59:52.000Z | 2022-03-28T12:06:35.000Z | # Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
from tensorforce.agents import VPGAgent
from tensorforce.tests.unittest_base import UnittestBase
class TestOptimizers(UnittestBase, unittest.TestCase):
agent = VPGAgent
config = dict(update_mode=dict(batch_size=2))
def test_adam(self):
states = dict(type='float', shape=(1,))
actions = dict(type='int', shape=(), num_values=3)
config = dict(
optimizer=dict(type='adam', learning_rate=1e-3)
)
self.unittest(name='adam', states=states, actions=actions, **config)
def test_clipped_step(self):
states = dict(type='float', shape=(1,))
actions = dict(type='int', shape=(), num_values=3)
config = dict(
optimizer=dict(
type='clipped_step', optimizer=dict(type='adam', learning_rate=1e-3),
clipping_value=1e-2
)
)
self.unittest(name='clipped-step', states=states, actions=actions, **config)
def test_evolutionary(self):
states = dict(type='float', shape=(1,))
actions = dict(type='int', shape=(), num_values=3)
config = dict(optimizer=dict(type='evolutionary', learning_rate=1e-3))
self.unittest(name='evolutionary', states=states, actions=actions, **config)
def test_meta_optimizer_wrapper(self):
states = dict(type='float', shape=(1,))
actions = dict(type='int', shape=(), num_values=3)
config = dict(
optimizer=dict(
optimizer='adam', learning_rate=1e-3, multi_step=5, subsampling_fraction=0.5,
clipping_value=1e-2, optimized_iterations=3
)
)
self.unittest(name='meta-optimizer-wrapper', states=states, actions=actions, **config)
def test_multi_step(self):
states = dict(type='float', shape=(1,))
actions = dict(type='int', shape=(), num_values=3)
config = dict(
optimizer=dict(
type='multi_step', optimizer=dict(type='adam', learning_rate=1e-3), num_steps=10
)
)
self.unittest(name='multi-step', states=states, actions=actions, **config)
def test_natural_gradient(self):
states = dict(type='float', shape=(1,))
actions = dict(type='int', shape=(), num_values=3)
config = dict(
optimizer=dict(type='natural_gradient', learning_rate=1e-3)
)
self.unittest(name='natural-gradient', states=states, actions=actions, **config)
def test_optimized_step(self):
states = dict(type='float', shape=(1,))
actions = dict(type='int', shape=(), num_values=3)
config = dict(
optimizer=dict(type='optimized_step', optimizer=dict(type='adam', learning_rate=1e-3))
)
self.unittest(name='optimized-step', states=states, actions=actions, **config)
def test_subsampling_step(self):
states = dict(type='float', shape=(1,))
actions = dict(type='int', shape=(), num_values=3)
config = dict(
optimizer=dict(
type='subsampling_step', optimizer=dict(type='adam', learning_rate=1e-3),
fraction=0.5
)
)
self.unittest(name='subsampling-step', states=states, actions=actions, **config)
| 32.357724 | 98 | 0.61407 | 3,179 | 0.798744 | 0 | 0 | 0 | 0 | 0 | 0 | 1,015 | 0.255025 |
21f908150b5a25cbfb04d6621c761e9d94cfeb15 | 1,981 | py | Python | main.py | materoy/strobe_light_coms | 2d59be7fe1e6f80e531ee2b9375bae0acb249e2d | [
"MIT"
] | null | null | null | main.py | materoy/strobe_light_coms | 2d59be7fe1e6f80e531ee2b9375bae0acb249e2d | [
"MIT"
] | null | null | null | main.py | materoy/strobe_light_coms | 2d59be7fe1e6f80e531ee2b9375bae0acb249e2d | [
"MIT"
] | null | null | null | import cv2
import numpy
import time
import iir_filter
from scipy import signal
import math
import matplotlib.pylab as pl
# This program detects and measures the frequency of strobe lights
def main():
capture = cv2.VideoCapture(0)
prev_frame = None
point_light_threshold = 200
time_now = time.time()
strobe_count = 0
# Sampling time in seconds
sampling_time: int = 10
start_sampling_time = time.time()
sampling_freq_data = dict()
# Collects samples for [start_sampling_time] amount of time
while time.time() - start_sampling_time < sampling_time:
# Capture video frame by frame
ret, frame = capture.read()
frame_time = time.time() - time_now
# Sampling frequency 1 / T
sampling_frequency = int(1 / frame_time)
sampling_freq_data.setdefault(time.time() - start_sampling_time, sampling_frequency)
time_now = time.time()
# Unfiltered frame output
cv2.imshow("Unfiltered output", frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# frame = cv2.GaussianBlur(frame, 2, sigmaX=5, sigmaY=5)
frame = cv2.blur(frame, (5,5))
min_val, max_val, min_index, max_index = cv2.minMaxLoc(frame)
sos = signal.butter(2, [0.45], 'lowpass', analog=False, output='sos', fs=30)
filter = iir_filter.IIR_filter(sos)
frame = filter.filter(frame)
if max_val > point_light_threshold:
cv2.circle(frame, center=max_index, radius=20, color=(0, 255, 255))
strobe_count += 1
if time.time() - time_now >= 1:
print(f"{strobe_count} Hz")
time_now = time.time()
strobe_count = 0
# Display the resulting frame
cv2.imshow('Video capture', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main() | 27.136986 | 92 | 0.623423 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.201918 |
21f9319e8b6fa0bf2f0d17cbb3ff738368d5fe28 | 358 | py | Python | advanced/react-django/APITestProject/api/migrations/0002_auto_20210110_0406.py | rocabrera/python-learning | 578b6f6f64a59039956e2ff8eca9eb486127722f | [
"MIT"
] | 3 | 2021-04-16T01:30:05.000Z | 2021-07-22T21:00:45.000Z | advanced/react-django/APITestProject/api/migrations/0002_auto_20210110_0406.py | rocabrera/python-learning | 578b6f6f64a59039956e2ff8eca9eb486127722f | [
"MIT"
] | null | null | null | advanced/react-django/APITestProject/api/migrations/0002_auto_20210110_0406.py | rocabrera/python-learning | 578b6f6f64a59039956e2ff8eca9eb486127722f | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-01-10 04:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='descripton',
new_name='description',
),
]
| 18.842105 | 47 | 0.578212 | 273 | 0.76257 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.27933 |
21f9ac98a22385277ede92007595928086780eae | 687 | py | Python | easy/array/reverse_integer/reverse_integer.py | deepshig/leetcode-solutions | 1e99e0852b8329bf699eb149e7dfe312f82144bc | [
"MIT"
] | null | null | null | easy/array/reverse_integer/reverse_integer.py | deepshig/leetcode-solutions | 1e99e0852b8329bf699eb149e7dfe312f82144bc | [
"MIT"
] | null | null | null | easy/array/reverse_integer/reverse_integer.py | deepshig/leetcode-solutions | 1e99e0852b8329bf699eb149e7dfe312f82144bc | [
"MIT"
] | null | null | null | import numpy
class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
INTMAX32 = 2147483647
if abs(x) > INTMAX32:
return 0
negative = False
if x < 0:
negative = True
x = abs(x)
str_x = str(x)
reverse_str = str_x[::-1]
reverse_int = int(reverse_str)
if reverse_int > INTMAX32:
return 0
if negative:
reverse_int = reverse_int * -1
return reverse_int
s = Solution()
print("Solution 1 : ", s.reverse(123))
print("Solution 2 : ", s.reverse(-123))
print("Solution 3 : ", s.reverse(120))
| 19.628571 | 42 | 0.505095 | 536 | 0.780204 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.147016 |
21f9b7045b329df6f1fc9828f5e918547ef50c4d | 917 | py | Python | core/dbt/perf_utils.py | dcereijodo/dbt | 204fc25c2168710d2549515ffe4846880b89fdec | [
"Apache-2.0"
] | 1 | 2021-04-08T03:33:33.000Z | 2021-04-08T03:33:33.000Z | core/dbt/perf_utils.py | azhard/dbt | 9cd7cbc9e35e5a7c8c4f17a3d113263f4421ab55 | [
"Apache-2.0"
] | 1 | 2021-04-30T21:33:11.000Z | 2021-04-30T21:33:11.000Z | core/dbt/perf_utils.py | azhard/dbt | 9cd7cbc9e35e5a7c8c4f17a3d113263f4421ab55 | [
"Apache-2.0"
] | 1 | 2021-06-04T16:00:44.000Z | 2021-06-04T16:00:44.000Z | """A collection of performance-enhancing functions that have to know just a
little bit too much to go anywhere else.
"""
from dbt.adapters.factory import get_adapter
from dbt.parser.manifest import load_manifest
from dbt.contracts.graph.manifest import Manifest
from dbt.config import RuntimeConfig
def get_full_manifest(config: RuntimeConfig) -> Manifest:
"""Load the full manifest, using the adapter's internal manifest if it
exists to skip parsing internal (dbt + plugins) macros a second time.
Also, make sure that we force-laod the adapter's manifest, so it gets
attached to the adapter for any methods that need it.
"""
adapter = get_adapter(config) # type: ignore
internal: Manifest = adapter.load_internal_manifest()
def set_header(manifest: Manifest) -> None:
adapter.connections.set_query_header(manifest)
return load_manifest(config, internal, set_header)
| 38.208333 | 75 | 0.760087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 419 | 0.456925 |
21fba08b9abc98fedd07afe43100cb26b94bd135 | 7,576 | py | Python | RoadDamageGAN/utils.py | ZhangXG001/RoadDamgeDetection | 2880545f595368e8076ede2015335f6537f44b2a | [
"MIT"
] | 7 | 2020-11-04T06:38:18.000Z | 2022-03-09T07:13:30.000Z | RoadDamageGAN/utils.py | ZhangXG001/RoadDamgeDetection | 2880545f595368e8076ede2015335f6537f44b2a | [
"MIT"
] | 1 | 2020-11-26T07:28:31.000Z | 2020-11-26T07:28:31.000Z | RoadDamageGAN/utils.py | ZhangXG001/RoadDamgeDetection | 2880545f595368e8076ede2015335f6537f44b2a | [
"MIT"
] | 1 | 2021-03-14T09:00:17.000Z | 2021-03-14T09:00:17.000Z | import tensorflow as tf
from tensorflow.contrib import slim
from scipy import misc
import os, random
import numpy as np
from glob import glob
from keras.utils import np_utils
try:
import xml.etree.cElementTree as ET #解析xml的c语言版的模块
except ImportError:
import xml.etree.ElementTree as ET
class ImageData:
def __init__(self, data_path, img_shape=(64,64,1), augment_flag=False, data_type='None', img_type='jpg', pad_flag=False, label_size=8):
self.data_path = data_path
self.data_type = data_type
self.img_shape = img_shape
self.img_h = img_shape[0]
self.img_w = img_shape[1]
self.channels = img_shape[2]
self.augment_flag = augment_flag
self.img_type = img_type
self.pad_flag = pad_flag
self.label_size = label_size
self.class_names = os.listdir(self.data_path)
self.train_dataset = []
self.train_label = []
images = []
for cl_name in self.class_names:
img_names = os.listdir(os.path.join(self.data_path, cl_name))
for img_name in img_names:
self.train_dataset.append(os.path.abspath(os.path.join(self.data_path, cl_name, img_name)))
hot_cl_name = self.get_class_one_hot(cl_name)
self.train_label.append(hot_cl_name)
self.train_label = np.reshape(self.train_label, (len(self.train_label), self.label_size))
def get_class_one_hot(self, class_str):
label_encoded = self.class_names.index(class_str)
label_hot = np_utils.to_categorical(label_encoded, len(self.class_names))
label_hot = label_hot
return label_hot
def image_processing(self, filename, label):
x = tf.read_file(filename)
if self.img_type == 'jpg':
x_decode = tf.image.decode_jpeg(x, channels=self.channels)
if self.img_type == 'png':
x_decode = tf.image.decode_png(x, channels=self.channels)
if self.img_type == 'bmp':
x_decode = tf.image.decode_bmp(x)
if self.channels == 1 :
x_decode = tf.image.rgb_to_grayscale(x_decode)
img = tf.image.resize_images(x_decode, [self.img_h, self.img_w])
img = tf.reshape(img, [self.img_h, self.img_w, self.channels])
img = tf.cast(img, tf.float32) / 127.5 - 1
if self.augment_flag :
img = tf.cond(pred=tf.greater_equal(tf.random_uniform(shape=[], minval=0.0, maxval=1.0), 0.5),
true_fn=lambda: augmentation(img),
false_fn=lambda: img)
return img, label
def one_hot(batch_size, mask_size, location):
l = tf.constant([location])
m = tf.one_hot(l,mask_size,1.,0.)
m = tf.tile(m,[batch_size,1])
return m
def load_test_data(image_path, size_h=256, size_w=256):
img = misc.imread(image_path, mode='RGB')
img = misc.imresize(img, [size_h, size_w])
img = np.expand_dims(img, axis=0)
img = preprocessing(img)
return img
def preprocessing(x):
x = x/127.5 - 1 # -1 ~ 1
return x
def augmentation(image):
seed = random.randint(0, 2 ** 31 - 1)
image = tf.image.random_flip_left_right(image, seed=seed)
# image = tf.image.random_brightness(image,max_delta=0.2)
# image = tf.image.random_contrast(image, 0.5, 1.5)
# image = tf.clip_by_value(image,-1.,1.)
# image = tf.image.random_saturation(image, 0, 0.3)
return image
def GetAnnotBoxLoc(AnotPath):#AnotPath VOC标注文件路径
tree = ET.ElementTree(file=AnotPath) #打开文件,解析成一棵树型结构
root = tree.getroot()#获取树型结构的根
ObjectSet=root.findall('object')#找到文件中所有含有object关键字的地方,这些地方含有标注目标
ObjBndBoxSet={} #以目标类别为关键字,目标框为值组成的字典结构
for Object in ObjectSet:
ObjName=Object.find('name').text
BndBox=Object.find('bndbox')
x1 = int(BndBox.find('xmin').text)#-1 #-1是因为程序是按0作为起始位置的
y1 = int(BndBox.find('ymin').text)#-1
x2 = int(BndBox.find('xmax').text)#-1
y2 = int(BndBox.find('ymax').text)#-1
BndBoxLoc=[x1,y1,x2,y2]
if ObjName in ObjBndBoxSet:
ObjBndBoxSet[ObjName].append(BndBoxLoc)#如果字典结构中含有这个类别了,那么这个目标框要追加到其值的末尾
else:
ObjBndBoxSet[ObjName]=[BndBoxLoc]#如果字典结构中没有这个类别,那么这个目标框就直接赋值给其值吧
return ObjBndBoxSet
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def inverse_transform(images):
return (images+1.) / 2
def imsave(images, size, path):
return misc.imsave(path, merge(images, size))
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[h*j:h*(j+1), w*i:w*(i+1), :] = image
return img
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def check_folder(log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def str2bool(x):
return x.lower() in ('true')
def summary(tensor_collection, summary_type=['mean', 'stddev', 'max', 'min', 'sparsity', 'histogram']):
"""
usage:
1. summary(tensor)
2. summary([tensor_a, tensor_b])
3. summary({tensor_a: 'a', tensor_b: 'b})
"""
def _summary(tensor, name, summary_type=['mean', 'stddev', 'max', 'min', 'sparsity', 'histogram']):
""" Attach a lot of summaries to a Tensor. """
if name is None:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
name = re.sub('%s_[0-9]*/' % 'tower', '', tensor.name)
name = re.sub(':', '-', name)
with tf.name_scope('summary_' + name):
summaries = []
if len(tensor.shape) == 0:
summaries.append(tf.summary.scalar(name, tensor))
else:
if 'mean' in summary_type:
mean = tf.reduce_mean(tensor)
summaries.append(tf.summary.scalar(name + '/mean', mean))
if 'stddev' in summary_type:
mean = tf.reduce_mean(tensor)
stddev = tf.sqrt(tf.reduce_mean(tf.square(tensor - mean)))
summaries.append(tf.summary.scalar(name + '/stddev', stddev))
if 'max' in summary_type:
summaries.append(tf.summary.scalar(name + '/max', tf.reduce_max(tensor)))
if 'min' in summary_type:
summaries.append(tf.summary.scalar(name + '/min', tf.reduce_min(tensor)))
if 'sparsity' in summary_type:
summaries.append(tf.summary.scalar(name + '/sparsity', tf.nn.zero_fraction(tensor)))
if 'histogram' in summary_type:
summaries.append(tf.summary.histogram(name, tensor))
return tf.summary.merge(summaries)
if not isinstance(tensor_collection, (list, tuple, dict)):
tensor_collection = [tensor_collection]
with tf.name_scope('summaries'):
summaries = []
if isinstance(tensor_collection, (list, tuple)):
for tensor in tensor_collection:
summaries.append(_summary(tensor, None, summary_type))
else:
for tensor, name in tensor_collection.items():
summaries.append(_summary(tensor, name, summary_type))
return tf.summary.merge(summaries)
| 38.653061 | 139 | 0.619324 | 2,317 | 0.29344 | 0 | 0 | 0 | 0 | 0 | 0 | 1,367 | 0.173126 |
21fbf33def7e2a7ceecd0cf2a88408cb26ed3962 | 1,812 | py | Python | release/stubs.min/System/Security/AccessControl_parts/PrivilegeNotHeldException.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | release/stubs.min/System/Security/AccessControl_parts/PrivilegeNotHeldException.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | release/stubs.min/System/Security/AccessControl_parts/PrivilegeNotHeldException.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | class PrivilegeNotHeldException(UnauthorizedAccessException):
"""
The exception that is thrown when a method in the System.Security.AccessControl namespace attempts to enable a privilege that it does not have.
PrivilegeNotHeldException()
PrivilegeNotHeldException(privilege: str)
PrivilegeNotHeldException(privilege: str,inner: Exception)
"""
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return PrivilegeNotHeldException()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def GetObjectData(self,info,context):
"""
GetObjectData(self: PrivilegeNotHeldException,info: SerializationInfo,context: StreamingContext)
Sets the info parameter with information about the exception.
info: The System.Runtime.Serialization.SerializationInfo that holds the serialized object data about the exception being thrown.
context: The System.Runtime.Serialization.StreamingContext that contains contextual information about the source or destination.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,privilege=None,inner=None):
"""
__new__(cls: type)
__new__(cls: type,privilege: str)
__new__(cls: type,privilege: str,inner: Exception)
"""
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
PrivilegeName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the name of the privilege that is not enabled.
Get: PrivilegeName(self: PrivilegeNotHeldException) -> str
"""
SerializeObjectState=None
| 37.75 | 215 | 0.742826 | 1,808 | 0.997792 | 0 | 0 | 197 | 0.10872 | 0 | 0 | 1,301 | 0.717991 |
21fc6b78c90650153fe0a66ddcecb6e108d72054 | 367 | py | Python | apc/apc/apc_config.py | jmsung/APC | 9f0e065aa748a4d041b783b07cd8078715d39625 | [
"MIT"
] | null | null | null | apc/apc/apc_config.py | jmsung/APC | 9f0e065aa748a4d041b783b07cd8078715d39625 | [
"MIT"
] | null | null | null | apc/apc/apc_config.py | jmsung/APC | 9f0e065aa748a4d041b783b07cd8078715d39625 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 11:30:05 2019
@author: Jongmin Sung
"""
# config.py
import os
from pathlib import Path
from inspect import currentframe, getframeinfo
fname = getframeinfo(currentframe()).filename # current file name
current_dir = Path(fname).resolve().parent
data_dir = Path(fname).resolve().parent.parent.parent/'data'
| 16.681818 | 65 | 0.716621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.346049 |
21fdcfcb45fd6e4c2b13357b9059ca59108a10c0 | 968 | py | Python | recipes/happly/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 562 | 2019-09-04T12:23:43.000Z | 2022-03-29T16:41:43.000Z | recipes/happly/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 9,799 | 2019-09-04T12:02:11.000Z | 2022-03-31T23:55:45.000Z | recipes/happly/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 1,126 | 2019-09-04T11:57:46.000Z | 2022-03-31T16:43:38.000Z | from conans import ConanFile, tools
class HapplyConan(ConanFile):
name = "happly"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/nmwsharp/happly"
topics = ("conan", "happly", "ply", "3D")
license = "MIT"
description = "A C++ header-only parser for the PLY file format. Parse .ply happily!"
settings = "compiler"
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def validate(self):
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, 11)
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def package(self):
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
self.copy("happly.h", src=self._source_subfolder, dst="include")
def package_id(self):
self.info.header_only()
| 31.225806 | 114 | 0.663223 | 929 | 0.959711 | 0 | 0 | 76 | 0.078512 | 0 | 0 | 267 | 0.275826 |
21fdeb6692cc16bcf769e03462d13ce0331dfbad | 48 | py | Python | Weather-Data-Collector/API_key.py | Sachinsingh14/Python-Projects | 1edba3574b618bc59c68a7647217a7957c604878 | [
"Apache-2.0"
] | 1 | 2021-10-18T14:52:42.000Z | 2021-10-18T14:52:42.000Z | Weather-Data-Collector/API_key.py | Sachinsingh14/Python-Projects | 1edba3574b618bc59c68a7647217a7957c604878 | [
"Apache-2.0"
] | 1 | 2021-10-18T15:30:50.000Z | 2021-10-18T15:34:24.000Z | Weather-Data-Collector/API_key.py | Sachinsingh14/Python-Projects | 1edba3574b618bc59c68a7647217a7957c604878 | [
"Apache-2.0"
] | 1 | 2021-10-18T15:20:48.000Z | 2021-10-18T15:20:48.000Z |
api_key = "1be3b57d0592f2096543bc46ebf302f0"
| 16 | 45 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.708333 |
21fe43d98daef7dd3ebf40de9036c681e2844778 | 8,798 | py | Python | balsam/management/commands/balsam_service.py | hep-cce/hpc-edge-service | 57f2b9252d21d478eabe06cbdced5b623f08c75f | [
"BSD-3-Clause"
] | null | null | null | balsam/management/commands/balsam_service.py | hep-cce/hpc-edge-service | 57f2b9252d21d478eabe06cbdced5b623f08c75f | [
"BSD-3-Clause"
] | null | null | null | balsam/management/commands/balsam_service.py | hep-cce/hpc-edge-service | 57f2b9252d21d478eabe06cbdced5b623f08c75f | [
"BSD-3-Clause"
] | null | null | null | import os,sys,logging,multiprocessing,Queue,traceback
logger = logging.getLogger(__name__)
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from balsam import models,BalsamJobReceiver,QueueMessage
from common import DirCleaner,log_uncaught_exceptions,TransitionJob
from balsam import scheduler
from balsam.schedulers import exceptions,jobstates
# assign this function to the system exception hook
sys.excepthook = log_uncaught_exceptions.log_uncaught_exceptions
class Command(BaseCommand):
help = 'Start Balsam Service, which monitors the message queue for new jobs and submits them to the local batch system.'
logger.info('''
>>>>> Starting Balsam Service <<<<<
>>>>> pid: ''' + str(os.getpid()) + ''' <<<<<
''')
def handle(self, *args, **options):
try:
logger.debug('starting BalsamJobReceiver')
subprocesses = {}
# start the balsam job receiver in separate thread
try:
p = BalsamJobReceiver.BalsamJobReceiver()
p.start()
subprocesses['BalsamJobReceiver'] = p
except Exception,e:
logger.exception(' Received Exception while trying to start job receiver: ' + str(e))
raise
# setup timer for cleaning the work folder of old files
logger.debug('creating DirCleaner')
workDirCleaner = DirCleaner.DirCleaner(settings.BALSAM_WORK_DIRECTORY,
settings.BALSAM_DELETE_OLD_WORK_PERIOD,
settings.BALSAM_DELETE_OLD_WORK_AGE,
)
# create the balsam service queue which subprocesses use to commicate
# back to the the service. It is also used to wake up the while-loop
logger.debug('creating balsam_service_queue')
balsam_service_queue = multiprocessing.Queue()
jobs_in_transition_by_id = {}
# this is the loop that never ends, yes it goes on and on my friends...
while True:
logger.debug('begin service loop ')
# loop over queued jobs and check their status
# also look for jobs that have been submitted but are not in the queued or running state, which
# may mean they have finished or exited.
logger.debug( ' checking for active jobs ')
active_jobs = models.BalsamJob.objects.filter(state__in = models.CHECK_STATUS_STATES)
if len(active_jobs) > 0:
logger.info( 'monitoring ' + str(len(active_jobs)) + ' active jobs')
else:
logger.debug(' no active jobs')
for job in active_jobs:
# update job's status
try:
jobstate = scheduler.get_job_status(job)
if jobstate == jobstates.JOB_RUNNING and job.state != models.RUNNING.name:
job.state = models.RUNNING.name
elif jobstate == jobstates.JOB_QUEUED and job.state != models.QUEUED.name:
job.state = models.QUEUED.name
elif jobstate == jobstates.JOB_FINISHED and job.state != models.EXECUTION_FINISHED.name:
job.state = models.EXECUTION_FINISHED.name
#scheduler.postprocess(job) <<< check on this...
else:
logger.debug('job pk=' + str(job.pk) + ' remains in state ' + str(jobstate))
continue # jump to next job, skip remaining actions
job.save(update_fields=['state'])
models.send_status_message(job,'Job entered ' + job.state + ' state')
except exceptions.JobStatusFailed,e:
message = 'get_job_status failed for pk='+str(job.pk)+': ' + str(e)
logger.error(message)
# TODO: Should I fail the job?
models.send_status_message(job,message)
except Exception,e:
message = 'failed to get status for pk='+str(job.pk)+', exception: ' + str(e)
logger.error(message)
# TODO: Should I fail the job?
models.send_status_message(job,message)
# first loop over jobs in transition and remove entries that are complete
for pk in jobs_in_transition_by_id.keys():
proc = jobs_in_transition_by_id[pk]
if not proc.is_alive():
# did subprocess exit cleanly with exitcode == 0
if proc.exitcode != 0:
logger.error('transition subprocess for pk=' + str(pk)
+ ' returned exit code ' + str(proc.exitcode))
# probably want to do other things to recover from error?
del jobs_in_transition_by_id[pk]
# see if any jobs are ready to transition, but exclude jobs already in transition
transitionable_jobs = models.BalsamJob.objects.filter(state__in=models.TRANSITIONABLE_STATES).exclude(pk__in=jobs_in_transition_by_id.keys())
logger.debug( ' found ' + str(len(transitionable_jobs)) + ' in states that need to be transitioned ')
# loop over jobs and transition
for job in transitionable_jobs:
# place a limit on the number of concurrent threads to avoid overloading CPU
if len(jobs_in_transition_by_id) < settings.BALSAM_MAX_CONCURRENT_TRANSITIONS:
logger.debug(' creating job transition ')
proc = TransitionJob.TransitionJob(
job.pk,
balsam_service_queue,
models.BalsamJob,
models.STATES_BY_NAME[job.state].transition_function
)
logger.debug(' start ')
proc.start()
jobs_in_transition_by_id[job.pk] = proc
else:
logger.debug(' too many jobs currently transitioning '
+ str(len(jobs_in_transition_by_id)) + ' and max is '
+ str(settings.BALSAM_MAX_CONCURRENT_TRANSITIONS))
# clean work directory periodically
if settings.BALSAM_DELETE_OLD_WORK:
workDirCleaner.clean()
# loop over running process and check status
for name,proc in subprocesses.iteritems():
if not proc.is_alive():
logger.info(' subprocess ' + name + ' has stopped with returncode ' + str(proc.exitcode) )
# block on getting message from the queue where subprocesses will send messages
try:
logger.debug('getting message from queue, blocking for '
+ str(settings.BALSAM_SERVICE_PERIOD) + ' seconds')
qmsg = balsam_service_queue.get(block=True,timeout=settings.BALSAM_SERVICE_PERIOD)
# act on messages
logger.debug('Received queue message code: ' + QueueMessage.msg_codes[qmsg.code])
logger.debug('Received queue message: ' + qmsg.message)
if qmsg.code == QueueMessage.TransitionComplete:
logger.debug('Transition Succeeded')
elif qmsg.code == QueueMessage.TransitionDbConnectionFailed:
logger.error('Transition DB connection failed: ' + qmsg.message)
job = models.BalsamJob.objects.get(pk=qmsg.pk)
job.state = models.STATE_BY_NAME[job.state].failed_state
job.save(update_fields=['state'])
elif qmsg.code == QueueMessage.TransitionDbRetrieveFailed:
logger.error('Transition failed to retrieve job from DB: ' + qmsg.message)
job = models.BalsamJob.objects.get(pk=qmsg.pk)
job.state = models.STATE_BY_NAME[job.state].failed_state
job.save(update_fields=['state'])
elif qmsg.code == QueueMessage.TransitionFunctionException:
logger.error('Exception received while running transition function: ' + qmsg.message)
job = models.BalsamJob.objects.get(pk=qmsg.pk)
job.state = models.STATE_BY_NAME[job.state].failed_state
job.save(update_fields=['state'])
else:
logger.error('No recognized QueueMessage code')
except Queue.Empty,e:
logger.debug('no messages on queue')
logger.info(' Balsam Service Exiting ')
except KeyboardInterrupt,e:
logger.info('Balsam Service Exiting')
return
| 50.855491 | 153 | 0.587406 | 8,280 | 0.941123 | 0 | 0 | 0 | 0 | 0 | 0 | 2,507 | 0.284951 |
21ff67c59d30d6f1412fcfe74d55aa15df4f7837 | 857 | py | Python | FlightPlan_DS.py | dsimmons123/tello-flight-2021 | fbf86e1bfcb518395c29516938ee816f0e603984 | [
"MIT"
] | null | null | null | FlightPlan_DS.py | dsimmons123/tello-flight-2021 | fbf86e1bfcb518395c29516938ee816f0e603984 | [
"MIT"
] | null | null | null | FlightPlan_DS.py | dsimmons123/tello-flight-2021 | fbf86e1bfcb518395c29516938ee816f0e603984 | [
"MIT"
] | 1 | 2021-11-16T22:33:18.000Z | 2021-11-16T22:33:18.000Z | from djitellopy import Tello
from time import sleep
# Initialize and Connect
tello = Tello()
tello.connect()
# Takeoff and move up to 6 feet (183cm)
tello.takeoff()
tello.move_up(101)
# Move forward (east) 5 feet (152cm)
tello.move_forward(152)
sleep(.5)
# rotate 90 degrees CCW
tello.rotate_counter_clockwise(90)
# move forward (north) 6 feet (183cm)
tello.move_forward(183)
sleep(.5)
# rotate CW 90 degrees
tello.rotate_clockwise(90)
# move down to 3 feet (91cm)
tello.move_down(91)
# move forward (east)
tello.move_forward(91)
sleep(.5)
# rotate 90 degrees CW
tello.rotate_clockwise(90)
# move up 1 foot (to 4 feet, 121cm)
tello.move_up(30)
# move forward 3 feet (91cm)
tello.move_forward(91)
sleep(.5)
# rotate CCW 90 degrees
tello.rotate_counter_clockwise(90)
# move forward 6 feet (183cm)
tello.move_forward(183)
sleep(.5)
tello.land()
| 16.480769 | 39 | 0.740957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 367 | 0.428238 |
21ff6ed5659b080f8d47c705b13562c2b1de1ccc | 882 | py | Python | dd.py | GPrathap/rrt-algorithms | b97af0b57306cdbc0e148f5c086345571d34e823 | [
"MIT"
] | null | null | null | dd.py | GPrathap/rrt-algorithms | b97af0b57306cdbc0e148f5c086345571d34e823 | [
"MIT"
] | null | null | null | dd.py | GPrathap/rrt-algorithms | b97af0b57306cdbc0e148f5c086345571d34e823 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import math
# img = cv2.imread('/home/geesara/Pictures/bp8OO.jpg', 0)
# img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)[1] # ensure binary
# ret, labels = cv2.connectedComponents(img)
#
# print("Number of labels" , len(labels))
#
# def imshow_components(labels):
# # Map component labels to hue val
# label_hue = np.uint8(179*labels/np.max(labels))
# blank_ch = 255*np.ones_like(label_hue)
# labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
#
# # cvt to BGR for display
# labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
#
# # set bg label to black
# labeled_img[label_hue==0] = 0
#
# cv2.imshow('labeled.png', labeled_img)
# cv2.waitKey()
# imshow_components(labels)
def sigmoid(x):
return 1 / (1 + math.exp(-x))
d = sigmoid(2)
ddf = 45
f = 0.869
P = 0.645
R = (f*P)/(2*P-f) | 21.512195 | 75 | 0.655329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 700 | 0.793651 |
1d0099bd08d14b67316e62ac1133b0502a7f11ee | 369 | py | Python | jts/backend/review/migrations/0003_auto_20191011_1025.py | goupaz/babylon | 4e638d02705469061e563fec349676d8faa9f648 | [
"MIT"
] | 1 | 2019-08-08T09:03:17.000Z | 2019-08-08T09:03:17.000Z | backend/review/migrations/0003_auto_20191011_1025.py | goupaz/website | ce1bc8b6c52ee0815a7b98842ec3bde0c20e0add | [
"Apache-2.0"
] | 2 | 2020-10-09T19:16:09.000Z | 2020-10-10T20:40:41.000Z | jts/backend/review/migrations/0003_auto_20191011_1025.py | goupaz/babylon-hackathon | 4e638d02705469061e563fec349676d8faa9f648 | [
"MIT"
] | 1 | 2019-07-21T01:42:21.000Z | 2019-07-21T01:42:21.000Z | # Generated by Django 2.2 on 2019-10-11 17:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('review', '0002_auto_20191009_1119'),
]
operations = [
migrations.RenameField(
model_name='review',
old_name='is_deleted',
new_name='is_rejected',
),
]
| 19.421053 | 46 | 0.593496 | 286 | 0.775068 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.300813 |
1d0414064a76791a07370cb0449ad44c12e6bd53 | 556 | py | Python | src/148.py | cloudzfy/euler | b82efad753ee98375fd40ec4e3989be57828e82c | [
"MIT"
] | 12 | 2016-10-19T09:03:20.000Z | 2021-01-10T10:53:23.000Z | src/148.py | cloudzfy/euler | b82efad753ee98375fd40ec4e3989be57828e82c | [
"MIT"
] | null | null | null | src/148.py | cloudzfy/euler | b82efad753ee98375fd40ec4e3989be57828e82c | [
"MIT"
] | 6 | 2018-09-12T03:13:58.000Z | 2021-07-07T00:29:43.000Z | # We can easily verify that none of the entries in the first seven
# rows of Pascal's triangle are divisible by 7:
# 1
# 1 1
# 1 2 1
# 1 3 3 1
# 1 4 6 4 1
# 1 5 10 10 5 1
# 1 6 15 20 15 6 1
# However, if we check the first one hundred rows, we will find
# that only 2361 of the 5050 entries are not divisible by 7.
# Find the number of entries which are not divisible by 7 in the
# first one billion (10^9) rows of Pascal's triangle. | 34.75 | 66 | 0.546763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 541 | 0.973022 |
1d058aadd2ea53eaf7bc8c3792809f3c95cbd8ca | 2,056 | py | Python | algorithms_in_python/_12_sorting_and_selection/examples/quick_select.py | junteudjio/algorithms_in_python | 90ceced09828aedf845605e5236f48ea92a4419e | [
"MIT"
] | null | null | null | algorithms_in_python/_12_sorting_and_selection/examples/quick_select.py | junteudjio/algorithms_in_python | 90ceced09828aedf845605e5236f48ea92a4419e | [
"MIT"
] | null | null | null | algorithms_in_python/_12_sorting_and_selection/examples/quick_select.py | junteudjio/algorithms_in_python | 90ceced09828aedf845605e5236f48ea92a4419e | [
"MIT"
] | 1 | 2018-10-15T06:28:45.000Z | 2018-10-15T06:28:45.000Z | from random import shuffle
__author__ = 'Junior Teudjio'
def quick_select(l, k, find_largest=True):
"""
return the k_th largest/smallest element of list l
Parameters
----------
l : list
k : int
find_largest : Boolean
True if return the k_th largest element False if return the k-th smallest element
Returns
-------
element of list l
"""
def _partition(l, pivot, left, right):
i,j = left + 1, right-1
while i <= j:
while i <= j:
if l[i] <= pivot:
i += 1
else:
break
while i <= j:
if pivot < l[j]:
j -= 1
else:
break
# if the two pointers have not cross we need to swap positions and update pointers
if i <= j:
l[j], l[i] = l[i], l[j]
i += 1
j -= 1
return j
def _quick_select(l, k, left, right):
pivot = l[left]
pivot_new_position = _partition(l, pivot, left, right)
if pivot_new_position == k-1:
return pivot_new_position
elif k-1 < pivot_new_position:
return _quick_select(l, k, left, pivot_new_position)
else:
return _quick_select(l, k, pivot_new_position+1, right)
if len(l) == 0 or k == 0 or len(l) < k:
return None
# shuffle the list to make sure the partition steps doesn't take 0(n) time if the list is 'almost' sorted
shuffle(l)
k_smallest_idx = _quick_select(l, k, left=0, right=len(l))
if find_largest:
return len(l)-1 - k_smallest_idx
else:
return k_smallest_idx
def find_median(l):
return quick_select(l, len(l)//2)
if __name__ == '__main__':
l = range(100)
print quick_select(l, 1, find_largest=False)
print quick_select(l, 5, find_largest=True)
print quick_select(l, 54, find_largest=False)
print quick_select(l, 1, find_largest=True)
print find_median(l) | 28.164384 | 109 | 0.548638 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 497 | 0.241732 |
1d067213814691f84d993aa83db62abacd69db0f | 6,895 | py | Python | value_based_1.py | frankfangy/gridlab | 6251da042c67d3b939e25a9c800c6d61492a5efb | [
"Apache-2.0"
] | null | null | null | value_based_1.py | frankfangy/gridlab | 6251da042c67d3b939e25a9c800c6d61492a5efb | [
"Apache-2.0"
] | null | null | null | value_based_1.py | frankfangy/gridlab | 6251da042c67d3b939e25a9c800c6d61492a5efb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
这个来源于 udacity 的讲座
验证基础的概念和算法
'''
from grid_world import *
import sys
#from PyQt5.QtCore import QPoint, QRect, QSize, Qt
#from PyQt5.QtGui import (QBrush, QPainter, QColor, QPen )
#from PyQt5.QtWidgets import (QApplication, QPushButton, QCheckBox, QGridLayout,QLabel, QWidget, QInputDialog)
from PyQt5.QtWidgets import ( QLabel , QTextEdit , QPlainTextEdit , QLineEdit )
import numpy as np
from random import randint
import time
from math import fabs
class value_based(grid_world):
'''
用来实验各种算法,并图形展示,基于方块地图
'''
def __init__(self):
super(value_based, self).__init__()
def config_map(self):
# 地图大小 , 初始map , 供子类再定义
self.map_mask = '''
1113
1214
1111
''' # map mask must be plus i8 (0 ~ 127 )
def config_gui(self):
super(value_based, self).config_gui()
self.add_ctrl( QLabel , '移动奖励' )
self.edit_moving_reward = self.add_ctrl( QLineEdit , '-0.04' , 15 )
self.add_ctrl( QLabel , '绿位奖励' )
self.edit_green_reward = self.add_ctrl( QLineEdit , '+1' , 15 )
self.add_ctrl( QLabel , '红位奖励' )
self.edit_red_reward = self.add_ctrl( QLineEdit , '-1' , 15 )
self.add_ctrl( QLabel , '运动稳定度' )
self.edit_moving_stability = self.add_ctrl( QLineEdit , '0.8' , 15 )
self.block_size = 60 # using a bigger block
def reset(self):
'''
初始化地图,以及计算条件,可能会反复运算
'''
self.moving_reward = float( self.edit_moving_reward.text() )
self.green_reward = float( self.edit_green_reward.text() )
self.red_reward = float( self.edit_red_reward.text() )
self.moving_stability = float( self.edit_moving_stability.text() )
print('get config :',self.moving_reward , self.green_reward , self.red_reward , self.moving_stability )
# init value
self.value_init = -100
self.value = np.zeros( ( self.map_width , self.map_height ) , float )
for x in range(self.map_width):
for y in range(self.map_height):
mpv = self.map[x][y]
self.value[x][y] = {1:self.value_init , 2:self.value_init , 3: self.green_reward , 4:self.red_reward }.get(mpv,0)
self.show_direction = False
self.update()
def avaliable_for_value(self,x,y):
if x < 0 or x >= self.map_width or y < 0 or y >= self.map_height:
return False
return self.map[x][y] != 2
def calc_direction(self):
self.direction = np.ndarray( ( self.map_width , self.map_height ) , dtype=np.int8 )
for x in range(self.map_width):
for y in range(self.map_height):
maxv = self.value_init
ii = 4 # stay there
if self.avaliable_for_value(x,y):
v0 = self.value[x][y]
for i in range(5):
d = ((0,-1),(1,0),(0,1),(-1,0),(0,0))[i]
nx = x + d[0]
ny = y + d[1]
if self.avaliable_for_value(nx,ny):
if maxv < self.value[nx][ny] :
ii = i
maxv = self.value[nx][ny]
self.direction[x][y] = ii
self.show_direction = True
def run_proc(self):
''' 此处是一个示范代码,子类应沿袭相同结构 '''
# setup flag
self.running = True
self.set_log('运算中')
while self.running:
updated = 0
self.value_old = self.value.copy()
for x in range(self.map_width):
for y in range(self.map_height):
if self.map[x][y] == 1:
v0 = self.value_old[x][y]
maxv = self.value_init
for i in range(4):
d =((0,-1),(1,0),(0,1),(-1,0))[i]
d1 =((-1,0),(0,-1),(1,0),(0,1))[i]
d2 =((1,0),(0,1),(-1,0),(0,-1))[i]
nx = x + d[0]
ny = y + d[1]
nx1 = x + d1[0]
ny1 = y + d1[1]
nx2 = x + d2[0]
ny2 = y + d2[1]
if self.avaliable_for_value(nx,ny): # this nx,ny is avaliable for value
# 计算,如果向 nx ny 点移动, 本地的value值可能是多少
v = self.value_old[nx][ny] * self.moving_stability
if self.avaliable_for_value(nx1,ny1):
v += (1.0-self.moving_stability )*0.5 * self.value_old[nx1][ny1]
else: # 留在原地
v += (1-self.moving_stability )*0.5 * v0
if self.avaliable_for_value(nx2,ny2):
v += (1-self.moving_stability )*0.5 * self.value_old[nx2][ny2]
else: # 留在原地
v += (1-self.moving_stability )*0.5 * v0
if v > maxv:
maxv = v
# here we got the maxv
if v0 - self.moving_reward < maxv:
updated += fabs( v0 - maxv - self.moving_reward )
self.value[x][y] = maxv + self.moving_reward
if updated < 1e-5 :
break
self.update()
time.sleep(0.3)
self.set_log('价值网络计算完成')
self.calc_direction()
self.running = False
self.update()
def draw_block(self, painter,x,y , block_map_value = None ):
painter.save()
if block_map_value is None:
block_map_value = self.map[x][y]
block_value = self.value[x][y]
bkcolor = {0:(55,55,55) , 1:(222,255,222) , 2 :(111,111,111) , 3:(111,255,111) , 4:(255,0,0) }.get( block_map_value , (0,0,0) )
self.draw_background( painter, x,y , bkcolor )
if block_map_value == 1: # path block
self.draw_text( painter , x,y , '%g'%(block_value) , (0,0,0) , 'd' )
if self.show_direction:
self.draw_arrow(painter,x,y, 'urdlo'[ self.direction[x][y] ] , (255,0,0) , 'u' )
elif block_map_value != 2: # other
self.draw_text( painter , x,y , str(block_value) , (0,0,0) , 'd' )
painter.restore()
if __name__ == '__main__':
run_gui( value_based )
| 39.4 | 138 | 0.4657 | 6,564 | 0.91357 | 0 | 0 | 0 | 0 | 0 | 0 | 1,112 | 0.154767 |
1d07ee21511b5314cfa719c3b45d940106ac6bb8 | 1,711 | py | Python | src/simmate/calculators/vasp/error_handlers/test/test_large_sigma.py | laurenmm/simmate-1 | c06b94c46919b01cda50f78221ad14f75c100a14 | [
"BSD-3-Clause"
] | 9 | 2021-12-21T02:58:21.000Z | 2022-01-25T14:00:06.000Z | src/simmate/calculators/vasp/error_handlers/test/test_large_sigma.py | laurenmm/simmate-1 | c06b94c46919b01cda50f78221ad14f75c100a14 | [
"BSD-3-Clause"
] | 51 | 2022-01-01T15:59:58.000Z | 2022-03-26T21:25:42.000Z | src/simmate/calculators/vasp/error_handlers/test/test_large_sigma.py | laurenmm/simmate-1 | c06b94c46919b01cda50f78221ad14f75c100a14 | [
"BSD-3-Clause"
] | 7 | 2022-01-01T03:44:32.000Z | 2022-03-29T19:59:27.000Z | # -*- coding: utf-8 -*-
import os
import pytest
from simmate.conftest import copy_test_files
from simmate.calculators.vasp.inputs import Incar
from simmate.calculators.vasp.error_handlers import LargeSigma
def test_large_sigma(tmpdir):
copy_test_files(
tmpdir,
test_directory=__file__,
test_folder="large_sigma",
)
# we reference the files several spots below so we grab its path up front
incar_filename = os.path.join(tmpdir, "INCAR")
outcar_filename = os.path.join(tmpdir, "OUTCAR")
# init class with default settings
error_handler = LargeSigma()
# Confirm an error IS found
assert error_handler.check(tmpdir) == True
# Make first attempt at fixing the error
fix = error_handler.correct(tmpdir)
assert fix == "reduced SIGMA from 0.1 to 0.04000000000000001"
assert Incar.from_file(incar_filename)["SIGMA"] == 0.04000000000000001
# Make 2nd attempt at fixing the error
fix = error_handler.correct(tmpdir)
assert fix == "switched KSPACING from 0.5 to 0.4"
assert Incar.from_file(incar_filename)["KSPACING"] == 0.4
# Make final attempt at fixing the error, which raises an error
incar = Incar.from_file(incar_filename)
incar["KSPACING"] = 0.2
incar.to_file(incar_filename)
with pytest.raises(Exception):
fix = error_handler.correct(tmpdir)
# Confirm an error IS NOT found when no outcar exists
os.remove(outcar_filename)
assert error_handler.check(tmpdir) == False
# Confirm an error IS NOT found when ISMEAR > 0
incar = Incar.from_file(incar_filename)
incar["ISMEAR"] = -1
incar.to_file(incar_filename)
assert error_handler.check(tmpdir) == False
| 31.109091 | 77 | 0.71128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 543 | 0.317358 |
1d08ffcc3348bfa440b3477262067290d3893191 | 3,043 | py | Python | wyggles/wyggle/dna.py | kfields/wyggles-arcade | f9cfafa15247e09f21aa06c2fe2f2ca1347b672a | [
"MIT"
] | 1 | 2020-03-15T23:10:12.000Z | 2020-03-15T23:10:12.000Z | wyggles/wyggle/dna.py | kfields/wyggles-arcade | f9cfafa15247e09f21aa06c2fe2f2ca1347b672a | [
"MIT"
] | null | null | null | wyggles/wyggle/dna.py | kfields/wyggles-arcade | f9cfafa15247e09f21aa06c2fe2f2ca1347b672a | [
"MIT"
] | null | null | null | import math
import random
from PIL import Image
import cairo
from wyggles import Dna
PI = math.pi
RADIUS = 32
WIDTH = RADIUS
HEIGHT = RADIUS
class WyggleDna(Dna):
def __init__(self, klass):
super().__init__(klass)
name = self.name
r = random.uniform(0, .75)
r1 = r + .20
r2 = r + .25
g = random.uniform(0, .75)
g1 = g + .20
g2 = g + .25
b = random.uniform(0, .75)
b1 = b + .20
b2 = b + .25
self.color1 = r,g,b,1
self.color2 = r1,g1,b1,1
self.color3 = r2,g2,b2,1
#
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, WIDTH, HEIGHT)
ctx = cairo.Context(surface)
#ctx.scale(1, 1) # Normalizing the canvas
imgsize = (RADIUS, RADIUS) #The size of the image
self.draw_segment(ctx)
self.tail_texture = self.create_texture(surface, name + 'tail', imgsize)
#Eating
self.draw_munchy_face(ctx)
self.munchy_face_texture = self.create_texture(surface, name + 'munchy_face', imgsize)
#Sad
self.draw_happy_face(ctx, -1)
self.sadFaceImage = self.create_texture(surface, name + 'sadFace', imgsize)
#Neutral
self.draw_happy_face(ctx, 0)
self.neutralFaceImage = self.create_texture(surface, name + 'neutralFace', imgsize)
#Happy
self.draw_happy_face(ctx, 1)
self.happy_face_texture = self.create_texture(surface, name + 'happy_face', imgsize)
#
self.face_texture = self.happy_face_texture
def draw_segment(self, ctx):
r1, g1, b1, a1 = self.color1
r2, g2, b2, a2 = self.color2
r3, g3, b3, a3 = self.color3
pat = cairo.RadialGradient(16,16,16, 8,8,4)
pat.add_color_stop_rgba(1, r3, g3, b3, a3)
pat.add_color_stop_rgba(0.9, r2, g2, b2, a2)
pat.add_color_stop_rgba(0, r1, g1, b1, a1)
ctx.arc(16, 16, 12, 0, PI*2)
ctx.close_path()
ctx.set_source(pat)
ctx.fill()
def draw_happy_face(self, ctx, valence):
self.draw_face(ctx)
#Mouth
x0 = 8
y0 = 20 - (4 * valence)
x1 = 16
y1 = 26 + (4 * valence)
x2 = 24
y2 = y0
#
#ctx.move_to(x0, y0)
ctx.curve_to(x0, y0, x1, y1, x2, y2)
ctx.set_line_width(2)
ctx.set_source_rgb(255, 0, 0) #red
ctx.stroke()
def draw_munchy_face(self, ctx):
self.draw_face(ctx)
#Mouth
ctx.arc(16, 16, 8, PI, 2 * PI)
ctx.close_path()
ctx.set_source_rgb(255, 0, 0) #red
ctx.fill()
def draw_face(self, ctx):
self.draw_segment(ctx)
#Eyes - Whites
ctx.arc(8, 8, 4, 0, PI*2)
ctx.arc(24, 8, 4, 0, PI*2)
ctx.close_path()
ctx.set_source_rgb(255, 255, 255)
ctx.fill()
#Eyes - Darks
ctx.arc(8, 8, 2, 0, PI*2)
ctx.arc(24, 8, 2, 0, PI*2)
ctx.close_path()
ctx.set_source_rgb(0,0,0)
ctx.fill()
| 27.917431 | 94 | 0.550772 | 2,896 | 0.951692 | 0 | 0 | 0 | 0 | 0 | 0 | 212 | 0.069668 |
1d09f4af7ac6dd139ab8ee8934a37f14f97144a4 | 17,847 | py | Python | scripts/trajectories.py | Miedema/MCNetwork | daab1fe5880c47695c6e21124f99aa6b2589aba1 | [
"Apache-2.0"
] | null | null | null | scripts/trajectories.py | Miedema/MCNetwork | daab1fe5880c47695c6e21124f99aa6b2589aba1 | [
"Apache-2.0"
] | null | null | null | scripts/trajectories.py | Miedema/MCNetwork | daab1fe5880c47695c6e21124f99aa6b2589aba1 | [
"Apache-2.0"
] | 1 | 2021-10-05T14:34:30.000Z | 2021-10-05T14:34:30.000Z | #!/usr/bin/python3
from tools import *
from sys import argv
from os.path import join
import h5py
import matplotlib.pylab as plt
from matplotlib.patches import Wedge
import numpy as np
if len(argv) > 1:
pathToSimFolder = argv[1]
else:
pathToSimFolder = "../data/"
parameters, electrodes = readParameters(pathToSimFolder)
electrodeNumber = len(electrodes)
acceptorPos = np.zeros((int(parameters["acceptorNumber"]), 2))
try:
donorPos = np.zeros((int(parameters["donorNumber"]), 2))
except KeyError:
donorPos = np.zeros(
(int(parameters["acceptorNumber"] * parameters["compensationFactor"]), 2)
)
with open(join(pathToSimFolder, "device.txt")) as deviceFile:
line = next(deviceFile)
line = next(deviceFile)
for i in range(acceptorPos.shape[0]):
acceptorPos[i] = next(deviceFile).split(" ")
line = next(deviceFile)
line = next(deviceFile)
for i in range(donorPos.shape[0]):
donorPos[i] = next(deviceFile).split(" ")
# print(acceptorPos)
# print(donorPos)
electrodePositions = np.empty((len(electrodes), 2))
for i in range(len(electrodes)):
if parameters["geometry"] == "rect":
if electrodes[i][1] == 0:
electrodePositions[i] = [0, electrodes[i][0] * parameters["lenY"]]
if electrodes[i][1] == 1:
electrodePositions[i] = [
parameters["lenX"],
electrodes[i][0] * parameters["lenY"],
]
if electrodes[i][1] == 2:
electrodePositions[i] = [electrodes[i][0] * parameters["lenX"], 0]
if electrodes[i][1] == 3:
electrodePositions[i] = [
electrodes[i][0] * parameters["lenX"],
parameters["lenY"],
]
elif parameters["geometry"] == "circle":
electrodePositions[i] = [
parameters["radius"] * np.cos(electrodes[i][0] / 360 * 2 * np.pi),
parameters["radius"] * np.sin(electrodes[i][0] / 360 * 2 * np.pi),
]
# print(electrodePositions)
def colorMaker(x):
from matplotlib import colors
from scipy.interpolate import interp1d
cols = ["darkred", "darkgreen"]
rgbaData = np.array([colors.to_rgba(c) for c in cols])
rInterpolater = interp1d(np.linspace(0, 1, len(cols)), rgbaData[:, 0])
gInterpolater = interp1d(np.linspace(0, 1, len(cols)), rgbaData[:, 1])
bInterpolater = interp1d(np.linspace(0, 1, len(cols)), rgbaData[:, 2])
return np.array([rInterpolater(x), gInterpolater(x), bInterpolater(x), 1])
inp = ["0_0", "0_1", "1_0", "1_1"]
for fileNumber in [1, 2, 3, 4]:
print(inp[fileNumber - 1])
# for fileNumber in [1]:
data = np.genfromtxt(
join(pathToSimFolder, f"swapTrackFile{fileNumber}.txt"),
delimiter=";",
dtype=int,
)
trajectoriesSortedByStartEnd = [
[[] for j in range(len(electrodes))] for i in range(len(electrodes))
]
trajectories = []
hops = 20000
IDs = {}
hitID = 0
for i in range(hops):
hoppingSite1 = data[i, 0]
hoppingSite2 = data[i, 1]
# print("hoppingSite1",hoppingSite1,"hoppingSite2",hoppingSite2)
if hoppingSite1 in IDs:
ID = IDs[hoppingSite1]
del IDs[hoppingSite1]
# print("found ID",ID)
else:
ID = hitID
hitID += 1
trajectories.append([])
# print("new ID", ID)
if hoppingSite2 < parameters["acceptorNumber"]:
IDs[hoppingSite2] = ID
trajectories[ID].append([hoppingSite1, hoppingSite2])
# sort trajectories
for i in range(len(trajectories)):
if trajectories[i][0][0] >= parameters["acceptorNumber"]:
if trajectories[i][-1][1] >= parameters["acceptorNumber"]:
trajectoriesSortedByStartEnd[
trajectories[i][0][0] - int(parameters["acceptorNumber"])
][trajectories[i][-1][1] - int(parameters["acceptorNumber"])].append(
trajectories[i]
)
# print(trajectories[i][0][0], trajectories[i][-1][1])
for k in range(len(electrodes)):
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
electodePlotWidth = 8
for i in range(len(electrodes)):
if i == parameters["outputElectrode"]:
col = "blue"
elif i == parameters["inputElectrode1"]:
if fileNumber in [3, 4]:
col = "red"
else:
col = "rosybrown"
elif i == parameters["inputElectrode2"]:
if fileNumber in [2, 4]:
col = "red"
else:
col = "rosybrown"
else:
col = "green"
if parameters["geometry"] == "rect":
if electrodes[i][1] == 0:
angle = 0
xy = (
0 - electodePlotWidth / 2,
electrodes[i][0] * parameters["lenY"]
- parameters["electrodeWidth"] / 2,
)
elif electrodes[i][1] == 1:
angle = 0
xy = (
parameters["lenX"] - electodePlotWidth / 2,
electrodes[i][0] * parameters["lenY"]
- parameters["electrodeWidth"] / 2,
)
elif electrodes[i][1] == 2:
angle = 90
xy = (
electrodes[i][0] * parameters["lenX"]
+ parameters["electrodeWidth"] / 2,
0 - electodePlotWidth / 2,
)
elif electrodes[i][1] == 3:
angle = 90
xy = (
electrodes[i][0] * parameters["lenX"]
+ parameters["electrodeWidth"] / 2,
parameters["lenY"] - electodePlotWidth / 2,
)
ax.add_artist(
plt.Rectangle(
xy,
electodePlotWidth,
parameters["electrodeWidth"],
angle=angle,
fc=col,
ec=col,
zorder=-1,
)
)
elif parameters["geometry"] == "circle":
electrodeWidth = (
parameters["electrodeWidth"]
/ (parameters["radius"] * 2 * np.pi)
* 360
) # in degrees
ax.add_artist(
Wedge(
(0, 0),
parameters["radius"] + electodePlotWidth / 2,
electrodes[i][0] - electrodeWidth / 2,
electrodes[i][0] + electrodeWidth / 2,
width=electodePlotWidth,
fc=col,
ec=col,
zorder=-1,
)
)
ax.scatter(acceptorPos[:, 0], acceptorPos[:, 1], c="k", marker=".", s=20)
ax.scatter(donorPos[:, 0], donorPos[:, 1], c="k", marker="x", s=20)
for l in range(len(electrodes)):
trajectories = trajectoriesSortedByStartEnd[k][l]
for i in range(len(trajectories)):
for j in range(len(trajectories[i])):
hoppingSite1 = trajectories[i][j][0]
hoppingSite2 = trajectories[i][j][1]
if hoppingSite1 >= parameters["acceptorNumber"]:
x1, y1 = (
electrodePositions[
hoppingSite1 - int(parameters["acceptorNumber"])
][0],
electrodePositions[
hoppingSite1 - int(parameters["acceptorNumber"])
][1],
)
else:
x1, y1 = (
acceptorPos[hoppingSite1, 0],
acceptorPos[hoppingSite1, 1],
)
if hoppingSite2 >= parameters["acceptorNumber"]:
x2, y2 = (
electrodePositions[
hoppingSite2 - int(parameters["acceptorNumber"])
][0],
electrodePositions[
hoppingSite2 - int(parameters["acceptorNumber"])
][1],
)
else:
x2, y2 = (
acceptorPos[hoppingSite2, 0],
acceptorPos[hoppingSite2, 1],
)
# ax.plot([x1,x2],[y1,y2],"-",alpha=0.05,color="k",linewidth=2)
ax.plot(
[x1, x2],
[y1, y2],
"-",
alpha=0.05,
color=color(l, len(electrodes)),
linewidth=2,
)
# if currentRatio>0.5:
# ax.arrow((x2+x1)/2,(y2+y1)/2,(x2-x1)*0.001,(y2-y1)*0.001,color=colorMaker(abs(currentRatio-0.5)*2),ec=None,alpha=absBins[i,j],linewidth=0,head_width=(currentRatio-0.5)*20)
ax.axis("off")
if parameters["geometry"] == "circle":
ax.add_artist(
plt.Circle((0, 0), parameters["radius"], fc="none", ec="k", zorder=-2)
)
elif parameters["geometry"] == "rect":
ax.add_artist(
plt.Rectangle(
(0, 0),
parameters["lenX"],
parameters["lenY"],
fc="none",
ec="k",
zorder=-2,
)
)
if parameters["geometry"] == "rect":
ax.set_xlim(
-electodePlotWidth / 2, parameters["lenX"] + electodePlotWidth / 2
)
ax.set_ylim(
-electodePlotWidth / 2, parameters["lenY"] + electodePlotWidth / 2
)
elif parameters["geometry"] == "circle":
ax.set_xlim(
-parameters["radius"] - electodePlotWidth,
parameters["radius"] + electodePlotWidth,
)
ax.set_ylim(
-parameters["radius"] - electodePlotWidth,
parameters["radius"] + electodePlotWidth,
)
ax.set_aspect("equal")
plt.savefig(
join(pathToSimFolder, f"trajectory_fromEl_{k}_{inp[fileNumber-1]}.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
for k in range(len(electrodes)):
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
electodePlotWidth = 8
for i in range(len(electrodes)):
if i == parameters["outputElectrode"]:
col = "blue"
elif i == parameters["inputElectrode1"]:
if fileNumber in [3, 4]:
col = "red"
else:
col = "rosybrown"
elif i == parameters["inputElectrode2"]:
if fileNumber in [2, 4]:
col = "red"
else:
col = "rosybrown"
else:
col = "green"
if parameters["geometry"] == "rect":
if electrodes[i][1] == 0:
angle = 0
xy = (
0 - electodePlotWidth / 2,
electrodes[i][0] * parameters["lenY"]
- parameters["electrodeWidth"] / 2,
)
elif electrodes[i][1] == 1:
angle = 0
xy = (
parameters["lenX"] - electodePlotWidth / 2,
electrodes[i][0] * parameters["lenY"]
- parameters["electrodeWidth"] / 2,
)
elif electrodes[i][1] == 2:
angle = 90
xy = (
electrodes[i][0] * parameters["lenX"]
+ parameters["electrodeWidth"] / 2,
0 - electodePlotWidth / 2,
)
elif electrodes[i][1] == 3:
angle = 90
xy = (
electrodes[i][0] * parameters["lenX"]
+ parameters["electrodeWidth"] / 2,
parameters["lenY"] - electodePlotWidth / 2,
)
ax.add_artist(
plt.Rectangle(
xy,
electodePlotWidth,
parameters["electrodeWidth"],
angle=angle,
fc=col,
ec=col,
zorder=-1,
)
)
elif parameters["geometry"] == "circle":
electrodeWidth = (
parameters["electrodeWidth"]
/ (parameters["radius"] * 2 * np.pi)
* 360
) # in degrees
ax.add_artist(
Wedge(
(0, 0),
parameters["radius"] + electodePlotWidth / 2,
electrodes[i][0] - electrodeWidth / 2,
electrodes[i][0] + electrodeWidth / 2,
width=electodePlotWidth,
fc=col,
ec=col,
zorder=-1,
)
)
ax.scatter(acceptorPos[:, 0], acceptorPos[:, 1], c="k", marker=".", s=20)
ax.scatter(donorPos[:, 0], donorPos[:, 1], c="k", marker="x", s=20)
for l in range(len(electrodes)):
trajectories = trajectoriesSortedByStartEnd[l][k]
for i in range(len(trajectories)):
for j in range(len(trajectories[i])):
hoppingSite1 = trajectories[i][j][0]
hoppingSite2 = trajectories[i][j][1]
if hoppingSite1 >= parameters["acceptorNumber"]:
x1, y1 = (
electrodePositions[
hoppingSite1 - int(parameters["acceptorNumber"])
][0],
electrodePositions[
hoppingSite1 - int(parameters["acceptorNumber"])
][1],
)
else:
x1, y1 = (
acceptorPos[hoppingSite1, 0],
acceptorPos[hoppingSite1, 1],
)
if hoppingSite2 >= parameters["acceptorNumber"]:
x2, y2 = (
electrodePositions[
hoppingSite2 - int(parameters["acceptorNumber"])
][0],
electrodePositions[
hoppingSite2 - int(parameters["acceptorNumber"])
][1],
)
else:
x2, y2 = (
acceptorPos[hoppingSite2, 0],
acceptorPos[hoppingSite2, 1],
)
# ax.plot([x1,x2],[y1,y2],"-",alpha=0.05,color="k",linewidth=2)
ax.plot(
[x1, x2],
[y1, y2],
"-",
alpha=0.05,
color=color(l, len(electrodes)),
linewidth=2,
)
# if currentRatio>0.5:
# ax.arrow((x2+x1)/2,(y2+y1)/2,(x2-x1)*0.001,(y2-y1)*0.001,color=colorMaker(abs(currentRatio-0.5)*2),ec=None,alpha=absBins[i,j],linewidth=0,head_width=(currentRatio-0.5)*20)
ax.axis("off")
if parameters["geometry"] == "circle":
ax.add_artist(
plt.Circle((0, 0), parameters["radius"], fc="none", ec="k", zorder=-2)
)
elif parameters["geometry"] == "rect":
ax.add_artist(
plt.Rectangle(
(0, 0),
parameters["lenX"],
parameters["lenY"],
fc="none",
ec="k",
zorder=-2,
)
)
if parameters["geometry"] == "rect":
ax.set_xlim(
-electodePlotWidth / 2, parameters["lenX"] + electodePlotWidth / 2
)
ax.set_ylim(
-electodePlotWidth / 2, parameters["lenY"] + electodePlotWidth / 2
)
elif parameters["geometry"] == "circle":
ax.set_xlim(
-parameters["radius"] - electodePlotWidth,
parameters["radius"] + electodePlotWidth,
)
ax.set_ylim(
-parameters["radius"] - electodePlotWidth,
parameters["radius"] + electodePlotWidth,
)
ax.set_aspect("equal")
plt.savefig(
join(pathToSimFolder, f"trajectory_toEl_{k}_{inp[fileNumber-1]}.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
| 36.646817 | 197 | 0.428083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,402 | 0.134588 |
1d0a47a3797d1d7c500c16f2bd41153122397e81 | 1,187 | py | Python | rest_framework_sav/views.py | JamesRitchie/django-rest-framework-session-endpoint | a968129be88a1981d9904c3679e5fdd9490e890d | [
"BSD-2-Clause"
] | 21 | 2015-03-04T09:25:47.000Z | 2019-11-08T14:19:24.000Z | rest_framework_sav/views.py | JamesRitchie/django-rest-framework-session-endpoint | a968129be88a1981d9904c3679e5fdd9490e890d | [
"BSD-2-Clause"
] | 1 | 2015-03-04T09:26:17.000Z | 2015-03-11T13:10:20.000Z | rest_framework_sav/views.py | JamesRitchie/django-rest-framework-session-endpoint | a968129be88a1981d9904c3679e5fdd9490e890d | [
"BSD-2-Clause"
] | 1 | 2020-05-17T04:16:27.000Z | 2020-05-17T04:16:27.000Z | """Views for Django Rest Framework Session Endpoint extension."""
from django.contrib.auth import login, logout
from rest_framework import parsers, renderers
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.response import Response
from rest_framework.views import APIView
class SessionAuthView(APIView):
"""Provides methods for REST-like session authentication."""
throttle_classes = ()
permission_classes = ()
parser_classes = (
parsers.FormParser,
parsers.MultiPartParser,
parsers.JSONParser
)
renderer_classes = (renderers.JSONRenderer,)
def post(self, request):
"""Login using posted username and password."""
serializer = AuthTokenSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
login(request, user)
return Response({'detail': 'Session login successful.'})
def delete(self, request):
"""Logout the current session."""
logout(request)
return Response({'detail': 'Session logout successful.'})
session_auth_view = SessionAuthView.as_view()
| 30.435897 | 68 | 0.711879 | 821 | 0.69166 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.237574 |
1d0ef63ab672909bfeb78546b4b689de11692b51 | 4,348 | py | Python | app.py | thesadru/genshinstats-api | 0b0d3b62ee29e4617b74c2fa033c3488cf77fec0 | [
"MIT"
] | 7 | 2021-02-09T11:34:38.000Z | 2021-07-27T10:39:30.000Z | app.py | thesadru/genshinstats-flask-api | 0b0d3b62ee29e4617b74c2fa033c3488cf77fec0 | [
"MIT"
] | 4 | 2021-09-19T04:10:48.000Z | 2021-12-04T22:13:22.000Z | app.py | thesadru/genshinstats-flask-api | 0b0d3b62ee29e4617b74c2fa033c3488cf77fec0 | [
"MIT"
] | 2 | 2021-02-21T04:39:30.000Z | 2021-08-16T04:22:43.000Z | import os
import time
from concurrent.futures import ThreadPoolExecutor
from enum import Enum
from hashlib import sha256
from typing import List, Type
import genshinstats as gs
from cachetools import TTLCache
from fastapi import Depends, FastAPI, HTTPException, Path, Query, Request
from fastapi.responses import JSONResponse, RedirectResponse
from fastapi.security import HTTPBasic, HTTPBasicCredentials
app = FastAPI(
title="genshinstats API",
description="Api for genshin stats",
contact={"name": "sadru", "url": "https://github.com/thesadru"},
)
security = HTTPBasic()
gs.set_cookie(ltuid=os.environ["GS_LTUID"], ltoken=os.environ["GS_LTOKEN"])
cache = TTLCache(1024, 3600)
gs.install_cache(cache)
user_passwords = {
"sadru": "c0e21a8ff85153deac82fe7f09c0da1b3bd90ac0ae204e78d7148753b4363c03",
"desertfox": "c0e21a8ff85153deac82fe7f09c0da1b3bd90ac0ae204e78d7148753b4363c03",
}
langs = gs.get_langs()
Lang: Type[str] = Enum("Lang", dict(zip(langs, langs)), type=str)
@app.on_event("startup")
async def startup_event():
global startup_time
startup_time = time.time()
@app.get("/", include_in_schema=False)
def index():
return RedirectResponse(app.docs_url)
@app.get("/user/{uid}", tags=["stats"], summary="User Stats")
def user(uid: int = Path(..., description="user's game uid", example=710785423)):
"""User's stats, characters, exploration percentage and teapot info"""
return gs.get_user_stats(uid)
@app.get("/abyss/{uid}", tags=["stats"], summary="Spiral Abyss")
def abyss(
uid: int = Path(..., description="user's game uid", example=710785423),
previous: bool = Query(False, description="Return the previous season's spiral abyss"),
):
"""User's Spiral Abyss runs during an entire season"""
return gs.get_spiral_abyss(uid, previous)
@app.get("/characters/{uid}", tags=["stats"], summary="Characters")
def characters(
uid: int = Path(..., description="user's game uid", example=710785423),
name: str = Query(None, description="Character's name"),
lang: Lang = Query("en-us", description="Language of the response"),
):
"""List of user's characters"""
characters = gs.get_characters(uid, lang=lang)
if name is None:
return characters
for char in characters:
if char["name"].title() == name.title():
return char
else:
raise HTTPException(status_code=418, detail=f"User doesn't have a character named {name!r}")
@app.get("/gacha", tags=["gacha"], summary="Current banners")
def gacha():
"""List of the information of all current gacha banners."""
with open("gacha_banners.txt") as file:
banners = file.read().splitlines()
with ThreadPoolExecutor() as executor:
return list(executor.map(gs.get_banner_details, banners))
@app.post("/gacha", tags=["gacha"])
def update_gacha(ids: List[str], credentials: HTTPBasicCredentials = Depends(security)):
"""Update the banner ids, requires the user to be authorized"""
password = user_passwords.get(credentials.username)
if sha256(credentials.password.encode()).hexdigest() != password:
raise HTTPException(403, "You are not authorized")
with open("gacha_banners.txt", "w") as file:
file.write("\n".join(ids))
return {"detail": "success"}
@app.get("/gacha/items", tags=["gacha"], summary="Gacha Items")
def gacha_items():
"""A list of all items that you can pull from the gacha excluding limited items."""
return gs.get_gacha_items()
@app.get("/debug", include_in_schema=False)
def debug(request: Request, include_cache: bool = Query(False, alias="cache")):
from importlib.metadata import version
return {
"type": request.scope["type"],
"host": request.scope["server"][0],
"port": request.scope["server"][1],
"asgi": request.scope["asgi"],
"uptime": time.time() - startup_time,
"genshinstats_version": version('genshinstats'),
"cache": {", ".join(map(str, k)): v for k, v in cache.items()} if include_cache else None,
}
@app.exception_handler(gs.GenshinStatsException)
def handle_genshinstats(request: Request, exc: gs.GenshinStatsException):
return JSONResponse(
{"error": type(exc).__name__, "retcode": exc.retcode, "message": exc.msg, "orig_msg": exc.orig_msg},
status_code=400,
)
| 33.96875 | 108 | 0.691122 | 0 | 0 | 0 | 0 | 3,322 | 0.764029 | 81 | 0.018629 | 1,264 | 0.290708 |
1d0f4c5eb9d4c29e42a28ad69535879d4ee1dd48 | 940 | py | Python | tests/integration/serialization_test.py | markowanga/stweet | 7f103b5c88fcef1d993d8cdc99cec358e55293f7 | [
"MIT"
] | 101 | 2020-11-22T16:44:25.000Z | 2022-03-30T08:42:07.000Z | tests/integration/serialization_test.py | markowanga/stweet | 7f103b5c88fcef1d993d8cdc99cec358e55293f7 | [
"MIT"
] | 53 | 2020-11-21T19:40:36.000Z | 2022-03-02T10:09:52.000Z | tests/integration/serialization_test.py | markowanga/stweet | 7f103b5c88fcef1d993d8cdc99cec358e55293f7 | [
"MIT"
] | 16 | 2020-12-12T23:02:51.000Z | 2022-03-01T12:10:32.000Z | import pytest
import stweet as st
from tests.test_util import get_temp_test_file_name, get_tweets_to_tweet_output_test, \
two_lists_assert_equal
def test_csv_serialization():
csv_filename = get_temp_test_file_name('csv')
tweets_collector = st.CollectorTweetOutput()
get_tweets_to_tweet_output_test([
st.CsvTweetOutput(csv_filename),
tweets_collector
])
tweets_from_csv = st.read_tweets_from_csv_file(csv_filename)
two_lists_assert_equal(tweets_from_csv, tweets_collector.get_raw_list())
def test_file_json_lines_serialization():
jl_filename = get_temp_test_file_name('jl')
tweets_collector = st.CollectorTweetOutput()
get_tweets_to_tweet_output_test([
st.JsonLineFileTweetOutput(jl_filename),
tweets_collector
])
tweets_from_jl = st.read_tweets_from_json_lines_file(jl_filename)
two_lists_assert_equal(tweets_from_jl, tweets_collector.get_raw_list())
| 33.571429 | 87 | 0.787234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.009574 |
0dfccc824d2f975fcc92e6cabd32a3783f6c5d15 | 391 | py | Python | scripts/python/calculus/ch2.py | jeremiahmarks/dangerzone | fe2946b8463ed018d2136ca0eb178161ad370565 | [
"MIT"
] | 1 | 2015-08-15T05:25:35.000Z | 2015-08-15T05:25:35.000Z | scripts/python/calculus/ch2.py | jeremiahmarks/dangerzone | fe2946b8463ed018d2136ca0eb178161ad370565 | [
"MIT"
] | null | null | null | scripts/python/calculus/ch2.py | jeremiahmarks/dangerzone | fe2946b8463ed018d2136ca0eb178161ad370565 | [
"MIT"
] | null | null | null | #
from mypy.physics import constants
def averageVelocity(positionEquation, startTime, endTime):
"""
The position equation is in the form of a one variable lambda and the
averagevelocity=(changeinposition)/(timeelapsed)
"""
startTime=float(startTime)
endTime=float(endTime)
vAvg=(positionEquation(startTime)-positionEquation(endTime))/(startTime-endTime)
return vAvg
| 24.4375 | 82 | 0.762148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.350384 |
0dfde004acc4ea83cf0c183251d4f37479f7e957 | 690 | py | Python | Maths_And_Stats/Number_Theory/Segmented_Sieve/segmented_sieve.py | arslantalib3/algo_ds_101 | a1293f407e00b8346f93e8770727f769e7add00e | [
"MIT"
] | 182 | 2020-10-01T17:16:42.000Z | 2021-10-04T17:52:49.000Z | Maths_And_Stats/Number_Theory/Segmented_Sieve/segmented_sieve.py | arslantalib3/algo_ds_101 | a1293f407e00b8346f93e8770727f769e7add00e | [
"MIT"
] | 759 | 2020-10-01T00:12:21.000Z | 2021-10-04T19:35:11.000Z | Maths_And_Stats/Number_Theory/Segmented_Sieve/segmented_sieve.py | arslantalib3/algo_ds_101 | a1293f407e00b8346f93e8770727f769e7add00e | [
"MIT"
] | 1,176 | 2020-10-01T16:02:13.000Z | 2021-10-04T19:20:19.000Z | def segmented_sieve(n):
# Create an boolean array with all values True
primes = [True]*n
for p in range(2,n):
#If prime[p] is True,it is a prime and its multiples are not prime
if primes[p]:
for i in range(2*p,n,p):
# Mark every multiple of a prime as not prime
primes[i]=False
#If value is true it is prime and print value
for l in range(2,n):
if primes[l]:
print(f"{l} ")
#Test
while True:
try:
input_value = int(input("Please a number: "))
segmented_sieve(input_value)
break
except ValueError:
print("No valid integer! Please try again ...") | 25.555556 | 74 | 0.569565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.395652 |
df02884c6d0111c6be162d57811e3b0d85ecd572 | 2,003 | py | Python | setup.py | UCL/scikit-surgerytf | b81ae994c44da790b101b868d12dd20b6d862df3 | [
"Apache-2.0"
] | null | null | null | setup.py | UCL/scikit-surgerytf | b81ae994c44da790b101b868d12dd20b6d862df3 | [
"Apache-2.0"
] | 41 | 2020-06-15T12:13:06.000Z | 2022-03-22T19:04:48.000Z | setup.py | UCL/scikit-surgerytf | b81ae994c44da790b101b868d12dd20b6d862df3 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
"""
Setup for scikit-surgerytf
"""
from setuptools import setup, find_packages
import versioneer
# Get the long description
with open('README.rst') as f:
long_description = f.read()
setup(
name='scikit-surgerytf',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='scikit-surgerytf is a Python package for Tensor Flow examples and utilities',
long_description=long_description,
long_description_content_type='text/x-rst',
url='https://github.com/UCL/scikit-surgerytf',
author='Matt Clarkson',
author_email='m.clarkson@ucl.ac.uk',
license='Apache Software License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
],
keywords='medical imaging',
packages=find_packages(
exclude=[
'docs',
'tests',
]
),
install_requires=[
'pyyaml',
'h5py',
'ipykernel',
'nbsphinx',
'Pillow',
'scipy',
'opencv-contrib-python==4.1.1.26',
'tensorflow==2.0.0',
'tensorflow-datasets==1.3.0',
'matplotlib==3.1.1'
],
entry_points={
'console_scripts': [
'sksurgeryfashion=sksurgerytf.ui.sksurgery_fashion_command_line:main',
'sksurgeryrgbunet=sksurgerytf.ui.sksurgery_rgbunet_command_line:main',
'sksurgerysegstats=sksurgerytf.ui.sksurgery_segstats_command_line:main'
],
},
)
| 27.819444 | 94 | 0.62656 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,170 | 0.584124 |
df04608972e31495e8f964d15c9dd90a93f28d89 | 2,155 | py | Python | Codigos Python/String_to_int.py | BrunoHarlis/Solucoes_LeetCode | cca9b1331cbfe7d8dc8d844a810ac651a92d8c97 | [
"MIT"
] | null | null | null | Codigos Python/String_to_int.py | BrunoHarlis/Solucoes_LeetCode | cca9b1331cbfe7d8dc8d844a810ac651a92d8c97 | [
"MIT"
] | null | null | null | Codigos Python/String_to_int.py | BrunoHarlis/Solucoes_LeetCode | cca9b1331cbfe7d8dc8d844a810ac651a92d8c97 | [
"MIT"
] | null | null | null | # Fonte: https://leetcode.com/problems/string-to-integer-atoi/
# Autor: Bruno Harlis
# Data: 03/08/2021
"""
Implemente a função myAtoi(string s), que converte uma string em um
inteiro assinado de 32 bits (semelhante à função C / C ++ atoi).
O algoritmo para myAtoi(string s) é o seguinte:
Leia e ignore qualquer espaço em branco à esquerda.
Verifique se o próximo caractere (se ainda não estiver no final da string)
é '-' ou '+'. Leia este caractere se for algum. Isso determina se o resultado
final é negativo ou positivo, respectivamente. Suponha que o resultado seja
positivo se nenhum estiver presente.
Leia a seguir os caracteres até que o próximo caractere não digitado ou o
final da entrada seja alcançado. O resto da string é ignorado.
Converta esses dígitos em um número inteiro (ou seja "123" -> 123, "0032" -> 32).
Se nenhum dígito foi lido, o número inteiro é 0. Altere o sinal conforme
necessário (da etapa 2).
Se o inteiro estiver fora do intervalo de inteiros com sinal de 32 bits, fixe
o inteiro para que ele permaneça no intervalo. Especificamente, números inteiros
menores do que deveriam ser fixados e inteiros maiores do que deveriam ser fixados.
[-231, 231 - 1]-231-231231 - 1231 - 1
Retorne o inteiro como o resultado final.
Observação:
Apenas o caractere de espaço ' ' é considerado um caractere de espaço em branco.
Não ignore nenhum caractere além do espaço em branco inicial ou o resto da string após os dígitos.
Tempo de execução : 36 ms, mais rápido que 60,05 % dos envios.
Uso da memória : 14,5 MB, menos de 26,10 % dos envios.
"""
def myAtoi(s):
s = s.strip()
negativo = False
i = 0
r = 0
if len(s) == 0:
return 0
if s[i] == '-':
negativo = True
i += 1
elif s[i] == '+':
i += 1
while i < len(s):
if s[i].isnumeric():
temp = int(s[i:i+1])
r = r * 10 + temp
if negativo:
if r * -1 < -2**31:
return -2**31
elif r > 2**31 - 1:
return 2**31 - 1
i += 1
else:
break
return r * -1 if negativo else r
| 30.352113 | 98 | 0.644084 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,620 | 0.738377 |
df0469beb5c62575548fc779498cc9b3a9cc1490 | 1,547 | py | Python | tests/bench_bgra2rgb.py | RedFantom/python-mss | 7e26de184b1e6a0800231b01451f794087a76f73 | [
"MIT"
] | null | null | null | tests/bench_bgra2rgb.py | RedFantom/python-mss | 7e26de184b1e6a0800231b01451f794087a76f73 | [
"MIT"
] | null | null | null | tests/bench_bgra2rgb.py | RedFantom/python-mss | 7e26de184b1e6a0800231b01451f794087a76f73 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
2018-03-19.
Maximum screenshots in 1 second by computing BGRA raw values to RGB.
GNU/Linux
pil_frombytes 139
mss_rgb 119
pil_frombytes_rgb 51
numpy_flip 31
numpy_slice 29
macOS
pil_frombytes 209
mss_rgb 174
pil_frombytes_rgb 113
numpy_flip 39
numpy_slice 36
Windows
pil_frombytes 81
mss_rgb 66
pil_frombytes_rgb 42
numpy_flip 25
numpy_slice 22
"""
from __future__ import print_function
import time
import numpy
from PIL import Image
import mss
def mss_rgb(im):
return im.rgb
def numpy_flip(im):
frame = numpy.array(im, dtype=numpy.uint8)
return numpy.flip(frame[:, :, :3], 2).tobytes()
def numpy_slice(im):
return numpy.array(im, dtype=numpy.uint8)[..., [2, 1, 0]].tobytes()
def pil_frombytes_rgb(im):
return Image.frombytes('RGB', im.size, im.rgb).tobytes()
def pil_frombytes(im):
return Image.frombytes('RGB', im.size, im.bgra, 'raw', 'BGRX').tobytes()
def benchmark():
with mss.mss() as sct:
im = sct.grab(sct.monitors[0])
for func in (pil_frombytes,
mss_rgb,
pil_frombytes_rgb,
numpy_flip,
numpy_slice):
count = 0
start = time.time()
while (time.time() - start) <= 1:
func(im)
im._ScreenShot__rgb = None
count += 1
print(func.__name__.ljust(17), count)
benchmark()
| 19.582278 | 76 | 0.580478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 503 | 0.325145 |
df06fb8878eee413e40a010aae2c0a5236a2dce0 | 431 | py | Python | app1/migrations/0002_auto_20210925_0546.py | rianaansari/My_library | 95b817ac2760d9dcb43b7cfff053056cfe3eb12d | [
"Apache-2.0"
] | null | null | null | app1/migrations/0002_auto_20210925_0546.py | rianaansari/My_library | 95b817ac2760d9dcb43b7cfff053056cfe3eb12d | [
"Apache-2.0"
] | null | null | null | app1/migrations/0002_auto_20210925_0546.py | rianaansari/My_library | 95b817ac2760d9dcb43b7cfff053056cfe3eb12d | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-25 05:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app1', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='author',
name='date_of_birth',
),
migrations.RemoveField(
model_name='author',
name='date_of_death',
),
]
| 19.590909 | 47 | 0.559165 | 346 | 0.802784 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.262181 |
df0719c519efa5ba53c3190dcb91a4426cd19f1a | 672 | py | Python | third_party/blink/renderer/bindings/scripts/blink_idl_parser_test.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | third_party/blink/renderer/bindings/scripts/blink_idl_parser_test.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | third_party/blink/renderer/bindings/scripts/blink_idl_parser_test.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=no-member,relative-import
"""Unit tests for blink_idl_parser.py."""
import unittest
from blink_idl_parser import BlinkIDLParser
class BlinkIDLParserTest(unittest.TestCase):
def test_missing_semicolon_between_definitions(self):
# No semicolon after enum definition.
text = '''enum TestEnum { "value" } dictionary TestDictionary {};'''
parser = BlinkIDLParser()
parser.ParseText(filename='', data=text)
self.assertGreater(parser.GetErrors(), 0)
| 33.6 | 76 | 0.730655 | 358 | 0.532738 | 0 | 0 | 0 | 0 | 0 | 0 | 343 | 0.510417 |
df0a77a4b3e3c8e88b291a3d6e1f829363ad81e7 | 1,705 | py | Python | encode.py | Kyobito/Pencil-Unrefined | 352a0281a00129a4e5d8d793997d09c0a29a1c7c | [
"MIT"
] | null | null | null | encode.py | Kyobito/Pencil-Unrefined | 352a0281a00129a4e5d8d793997d09c0a29a1c7c | [
"MIT"
] | null | null | null | encode.py | Kyobito/Pencil-Unrefined | 352a0281a00129a4e5d8d793997d09c0a29a1c7c | [
"MIT"
] | null | null | null | import calc
def caesar_cipher(word, base):
up_alpha = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
low_alpha = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
new_word = ''
base = base if abs(base) <= 26 else calc.base_finder(base)
for letter in word:
if letter.isnumeric() == True or letter.isalpha()==False:
new_word += letter
elif letter.isupper() == True:
try:
new_word += up_alpha[up_alpha.index(letter)+base]
except IndexError:
difference = (up_alpha.index(letter)+base) - 26
print(difference)
new_word += up_alpha[difference]
else:
try:
new_word += low_alpha[low_alpha.index(letter)+base]
except IndexError:
difference_low = (low_alpha.index(letter)+base) - 26
print(difference_low)
new_word += low_alpha[difference_low]
return new_word
def substitution(word, substitute, specify): #proto
char_list = list(substitute)
new_word = ''
counter = 0
if len(substitute) != len(specify):
return "Second and third arguments were not entered correctly"
for letter in word:
counter=0
for sub in char_list:
if letter == sub:
counter+=1
new_word+=specify[char_list.index(sub)]
if counter==0:
new_word+=letter
return new_word
def binary(number):
new_number = bin(number) + ""
new_number = new_number[2:]
return new_number
def hexadecimal(number):
new_number = hex(int(number))
new_number = new_number[2:]
return new_number | 32.788462 | 144 | 0.575953 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.130792 |
df0aff4197ef0ad00e311fbd3784ea4b9b5eea37 | 2,142 | py | Python | snuba/utils/types.py | fpacifici/snuba | cf732b71383c948f9387fbe64e9404ca71f8e9c5 | [
"Apache-2.0"
] | null | null | null | snuba/utils/types.py | fpacifici/snuba | cf732b71383c948f9387fbe64e9404ca71f8e9c5 | [
"Apache-2.0"
] | null | null | null | snuba/utils/types.py | fpacifici/snuba | cf732b71383c948f9387fbe64e9404ca71f8e9c5 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from typing import Any, Generic, TypeVar
from typing_extensions import Protocol
TComparable = TypeVar("TComparable", contravariant=True)
class Comparable(Protocol[TComparable]):
"""
Defines the protocol for comparable objects. Objects that satisfy this
protocol are assumed to have an ordering via the "rich comparison"
methods defined here.
"An ordering" does not necessarily imply a *total* ordering, or even that
the ordering itself is deterministic. The Python documentation provides a
more detailed explanation about the guarantees (or lack thereof) provided
by these methods: https://docs.python.org/3/reference/datamodel.html#object.__lt__
This class exists primarily to satisfy the type checker when dealing with
generics that will be directly compared, and secondarily to provide
documentation via type annotations.
In reality, this class provides little to no practical benefit, since all
of these methods defined in the protocol are part of the ``object`` class
definition (which is shared by all classes by default) but returns
``NotImplemented`` rather than returning a valid result. (This protocol
is not defined as runtime checkable for that reason.)
"""
def __lt__(self, other: TComparable) -> bool:
raise NotImplementedError
def __le__(self, other: TComparable) -> bool:
raise NotImplementedError
def __gt__(self, other: TComparable) -> bool:
raise NotImplementedError
def __ge__(self, other: TComparable) -> bool:
raise NotImplementedError
def __eq__(self, other: object) -> bool:
raise NotImplementedError
def __ne__(self, other: object) -> bool:
raise NotImplementedError
T = TypeVar("T", bound=Comparable[Any])
@dataclass(frozen=True)
class InvalidRangeError(ValueError, Generic[T]):
lower: T
upper: T
@dataclass(frozen=True)
class Interval(Generic[T]):
lower: T
upper: T
def __post_init__(self) -> None:
if not self.upper >= self.lower:
raise InvalidRangeError(self.lower, self.upper)
| 31.5 | 86 | 0.723156 | 1,869 | 0.872549 | 0 | 0 | 314 | 0.146592 | 0 | 0 | 1,074 | 0.501401 |
df0b8deb7b8139e6384b8790377b34bff7533519 | 2,725 | py | Python | ACME/render/renderer.py | mauriziokovacic/ACME | 2615b66dd4addfd5c03d9d91a24c7da414294308 | [
"MIT"
] | 3 | 2019-10-23T23:10:55.000Z | 2021-09-01T07:30:14.000Z | ACME/render/renderer.py | mauriziokovacic/ACME-Python | 2615b66dd4addfd5c03d9d91a24c7da414294308 | [
"MIT"
] | null | null | null | ACME/render/renderer.py | mauriziokovacic/ACME-Python | 2615b66dd4addfd5c03d9d91a24c7da414294308 | [
"MIT"
] | 1 | 2020-07-11T11:35:43.000Z | 2020-07-11T11:35:43.000Z | import neural_renderer as nr
from ..math.unitvec import *
class Renderer(nr.Renderer):
"""
A class extending the Neural Renderer
Attributes
----------
device : str or torch.device (optional)
the tensor the renderer will be stored to (default is 'cuda:0')
culling : str (optional)
the current active face culling (default is None)
Methods
-------
toggle_lighting(status)
toggles the renderer directional lighting on or off depending on status
enable_lighting()
enables the renderer lighting
disable_lighting()
disables the renderer lighting
disable_culling()
disables face culling
enable_back_culling()
enables back face culling
enable_front_culling()
enables front face culling
"""
def __init__(self, device='cuda:0', culling=None, lighting=False, **kwargs):
"""
Parameters
----------
device : str or torch.device (optional)
the device the tensors will be stored to (default is 'cuda:0')
culling : str (optional)
the current active face culling, either 'front' or 'back'.
If None no culling is performed (default is None)
lighting : bool (optional)
if True activates the lighting, False otherwise
**kwargs : ...
the Neural Renderer keyworded arguments
"""
super(Renderer, self).__init__(camera_mode='look_at', **kwargs)
self.eye = unitvec(3, 0, device=device)
self.light_direction = -self.eye
self.device = device
self.culling = culling
self.toggle_lighting(lighting)
def toggle_lighting(self, status):
"""
Toggles the renderer directional lighting on or off depending on status
Parameters
----------
status : bool
the lighting status
"""
if status:
self.light_intensity_directional = 0.5
else:
self.light_intensity_directional = 0
self.light_intensity_ambient = 1-self.light_intensity_directional
def enable_lighting(self):
"""Enables the renderer lighting"""
self.toggle_lighting(True)
def disable_lighting(self):
"""Disables the renderer lighting"""
self.toggle_lighting(False)
def disable_culling(self):
"""Disables face culling"""
self.culling = None
def enable_back_culling(self):
"""
Enables back face culling
"""
self.culling = 'back'
def enable_front_culling(self):
"""
Enables front face culling
"""
self.culling = 'front'
| 27.806122 | 80 | 0.601468 | 2,664 | 0.977615 | 0 | 0 | 0 | 0 | 0 | 0 | 1,643 | 0.602936 |
df0c2bb253afbf6cb0b5b41d83b69d48eccf038f | 6,241 | py | Python | src/codeGameSimulation/Multirun.py | LukasWallisch/game-ur-analysis | 760a892cc20924cf1864b9602c5d3239a8995323 | [
"MIT"
] | null | null | null | src/codeGameSimulation/Multirun.py | LukasWallisch/game-ur-analysis | 760a892cc20924cf1864b9602c5d3239a8995323 | [
"MIT"
] | null | null | null | src/codeGameSimulation/Multirun.py | LukasWallisch/game-ur-analysis | 760a892cc20924cf1864b9602c5d3239a8995323 | [
"MIT"
] | null | null | null | import copy
from datetime import datetime
from typing import List, Tuple
from .store2db import createTabels, store_data_2_db
from .GameUr import GameUr
from .GameSettings import GameSettings
import multiprocessing as mp
import tqdm
def getThreadCount()->int:
return mp.cpu_count()
def runGameNTimes(n: int, gs: GameSettings, chunkId:int):
delta0 = datetime.now()
g = GameUr.GameUr(gs)
h = []
# print("chunk {}: start {} games".format(chunkId,n))
for _ in range(n):
g.run(1000)
h.append(g.getStonesHistory())
g.reset()
delta1 = datetime.now()
# print("chunk {}: finished {} games in {} ".format(chunkId,n, delta1-delta0))
return h
def runGame(settings:Tuple[GameSettings, bool]):
gs,forJson = settings
g = GameUr(gs)
g.run(1000)
return g.getStonesHistory(forJson)
def runGameDB(gs: GameSettings):
g = GameUr(gs)
g.run(1000)
return g.getStonesHistory4db()
def runGameDBNoStoneHistory(gs: GameSettings):
g = GameUr(gs,True)
g.run(1000)
return g.getStonesHistory4db()
def runGameDBfastest(settings:Tuple[GameSettings,int]):
gs,fastest = settings
g = GameUr(gs)
g.run(fastest)
return g.getStonesHistory4db(),g.getGamelength()
def multirun(n: int, gamesPerChunk:int, gs: List[GameSettings],forJson:bool):
PROCESSES = mp.cpu_count()
CHUNKS = n//gamesPerChunk
# print("processes:",PROCESSES)
# print("total Games:", gamesPerChunk*CHUNKS)
print("chunks:", CHUNKS)
print("gamePerChunk:", gamesPerChunk)
print("gamesettings:", len(gs))
gamesDone = 0
with mp.Pool(PROCESSES) as pool:
results = []
for sub_gs in gs:
# print("start pool")
# results = pool.imap_unordered(runGame,
# [copy.deepcopy(gs) for i in range(CHUNKS)],gamesPerChunk)
sub_results =[]
for x in tqdm.tqdm(pool.imap_unordered(runGame, [(copy.deepcopy(sub_gs), forJson) for i in range(n)], gamesPerChunk), total=n, unit="games"):
sub_results.append(x)
# print("finish pool")
results.append({"gs":sub_gs,"history":sub_results})
# h = []
# for h_sub in results:
# h.extend(h_sub)
return results
def multirunDB(n: int, processes: int, gamesPerChunk: int, gs: List[GameSettings], db_dir: str, db_filename: str):
if processes == -1 or processes >= mp.cpu_count():
PROCESSES = mp.cpu_count()
else:
PROCESSES = processes
CHUNKS = n//gamesPerChunk
# print("processes:",PROCESSES)
# print("total Games:", gamesPerChunk*CHUNKS)
print("chunks:", CHUNKS)
print("gamePerChunk:", gamesPerChunk)
print("gamesettings:", len(gs))
createTabels(db_dir, db_filename)
with mp.Pool(PROCESSES) as pool:
chunksFinished = 0
for i,sub_gs in enumerate(gs):
print("for gs {}/{}".format(i+1, len(gs)))
sub_results =[]
for x in tqdm.tqdm(pool.imap_unordered(runGameDB, [sub_gs for _ in range(n)], gamesPerChunk), total=n, unit="games"):
sub_results.append(x)
chunksFinished += 1
store_data_2_db(
{"gs": sub_gs, "history": sub_results}, db_dir, db_filename)
# h = []
# for h_sub in results:
# h.extend(h_sub)
return chunksFinished
def multirunDBNoStoneHistory(n: int,processes:int, gamesPerChunk:int, gs: List[GameSettings],db_dir:str,db_filename:str):
if processes == -1 or processes >= mp.cpu_count():
PROCESSES = mp.cpu_count()
else:
PROCESSES = processes
CHUNKS = n//gamesPerChunk
# print("processes:",PROCESSES)
# print("total Games:", gamesPerChunk*CHUNKS)
print("chunks:", CHUNKS)
print("gamePerChunk:", gamesPerChunk)
print("gamesettings:", len(gs))
createTabels(db_dir, db_filename)
with mp.Pool(PROCESSES) as pool:
chunksFinished = 0
for i,sub_gs in enumerate(gs):
# print("for gs {}/{}".format(i+1, len(gs)))
sub_results =[]
for x in tqdm.tqdm(pool.imap_unordered(runGameDBNoStoneHistory, [sub_gs for _ in range(n)], gamesPerChunk), total=n, unit="games", desc="run for gs {}/{}".format(i+1, len(gs))):
sub_results.append(x)
chunksFinished += 1
store_data_2_db(
{"gs": sub_gs, "history": sub_results}, db_dir, db_filename)
# h = []
# for h_sub in results:
# h.extend(h_sub)
return chunksFinished
def multirunDBSearchforFastest(n: int, processes: int, gamesPerChunk: int, gs: List[Tuple[GameSettings, int]], db_dir: str, db_filename: str, updateFastesNtimes: int = 0):
if processes == -1 or processes >= mp.cpu_count():
PROCESSES = mp.cpu_count()
else:
PROCESSES = processes
CHUNKS = n//gamesPerChunk
# print("processes:",PROCESSES)
# print("total Games:", gamesPerChunk*CHUNKS)
print("processes:", PROCESSES)
print("updateFastesNtimes:", updateFastesNtimes)
print("chunks:", CHUNKS)
print("gamePerChunk:", gamesPerChunk)
print("gamesettings:", len(gs))
createTabels(db_dir, db_filename)
with mp.Pool(PROCESSES) as pool:
chunksFinished = 0
for i,gs_ in enumerate(gs):
sub_gs,start_fastest = gs_
sub_results =[]
current_fastest = start_fastest
for j in range(updateFastesNtimes):
print("update {}/{} for gs {}/{}".format(j+1,updateFastesNtimes, i+1, len(gs)))
print("current_fastest: {}".format(current_fastest))
for x,new_fastest in tqdm.tqdm(pool.imap_unordered(runGameDBfastest, [(sub_gs,current_fastest) for i in range(n//updateFastesNtimes+1)], gamesPerChunk), total=n//updateFastesNtimes+1, unit="games"):
sub_results.append(x)
if new_fastest < current_fastest:
current_fastest = new_fastest
chunksFinished += 1
store_data_2_db(
{"gs": sub_gs, "history": sub_results}, db_dir, db_filename)
# h = []
# for h_sub in results:
# h.extend(h_sub)
return chunksFinished
| 34.291209 | 214 | 0.620253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,183 | 0.189553 |
df0c4609dc3d7030697a254872dda6cfb67438db | 641 | py | Python | setup.py | Wirtos/aioregex | b426fb99769d54d627e189bd058c4b724559fb80 | [
"MIT"
] | null | null | null | setup.py | Wirtos/aioregex | b426fb99769d54d627e189bd058c4b724559fb80 | [
"MIT"
] | null | null | null | setup.py | Wirtos/aioregex | b426fb99769d54d627e189bd058c4b724559fb80 | [
"MIT"
] | null | null | null | import setuptools
setuptools.setup(
name="aioregex",
version="0.1",
author="Wirtos_new",
author_email="Wirtos.new@gmail.com",
description="regex to allow both sync and async callables in the sub as repl",
url="https://wirtos.github.io/aioregex/",
packages=setuptools.find_packages(),
project_urls={
"Source Code": "https://github.com/Wirtos/aioregex",
},
install_requires=[],
keywords="regex re asyncio aioregex",
classifiers=[
"Programming Language :: Python :: >=3.5",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 29.136364 | 82 | 0.639626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 343 | 0.535101 |
df0edbdfa3781bf2e4a9e3ef7d2addd8064bef5a | 830 | py | Python | scripts/pyqtgraph-develop/pyqtgraph/pixmaps/__init__.py | kuldeepaman/tf-pose | 8050912c52a7b4f3c8a2656f267d47ba21d093f6 | [
"Apache-2.0"
] | null | null | null | scripts/pyqtgraph-develop/pyqtgraph/pixmaps/__init__.py | kuldeepaman/tf-pose | 8050912c52a7b4f3c8a2656f267d47ba21d093f6 | [
"Apache-2.0"
] | null | null | null | scripts/pyqtgraph-develop/pyqtgraph/pixmaps/__init__.py | kuldeepaman/tf-pose | 8050912c52a7b4f3c8a2656f267d47ba21d093f6 | [
"Apache-2.0"
] | null | null | null | """
Allows easy loading of pixmaps used in UI elements.
Provides support for frozen environments as well.
"""
import os, sys, pickle
from ..functions import makeQImage
from ..Qt import QtGui
from ..python2_3 import basestring
if sys.version_info[0] == 2:
from . import pixmapData_2 as pixmapData
else:
from . import pixmapData_3 as pixmapData
def getPixmap(name):
"""
Return a QPixmap corresponding to the image file with the given name.
(eg. getPixmap('auto') loads pyqtgraph/pixmaps/auto.png)
"""
key = name+'.png'
data = pixmapData.pixmapData[key]
if isinstance(data, basestring) or isinstance(data, bytes):
pixmapData.pixmapData[key] = pickle.loads(data)
arr = pixmapData.pixmapData[key]
return QtGui.QPixmap(makeQImage(arr, alpha=True))
| 29.642857 | 74 | 0.687952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 268 | 0.322892 |
df0f2509e6da533aefb1f279bb6e3f848aa2f366 | 7,920 | py | Python | defx/metrics/f1_measure.py | DFKI-NLP/defx | 0c8b7a5ff4904a54e28f187b938198744e4a450a | [
"MIT"
] | 5 | 2020-04-29T07:41:58.000Z | 2021-12-09T10:07:08.000Z | defx/metrics/f1_measure.py | DFKI-NLP/defx | 0c8b7a5ff4904a54e28f187b938198744e4a450a | [
"MIT"
] | 1 | 2020-10-17T16:51:06.000Z | 2020-10-27T13:44:35.000Z | defx/metrics/f1_measure.py | DFKI-NLP/defx | 0c8b7a5ff4904a54e28f187b938198744e4a450a | [
"MIT"
] | null | null | null | from statistics import mean
from typing import Dict, List, Optional, Set
from collections import defaultdict
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.nn.util import get_lengths_from_binary_sequence_mask
from allennlp.data.vocabulary import Vocabulary
from allennlp.training.metrics.metric import Metric
from defx.util.index_to_relation_and_type_mapping import map_index_to_relation_head_and_type
@Metric.register("f1-measure")
class F1Measure(Metric):
"""
Computes macro-averaged F1 score for relation classification on a token level.
"""
def __init__(self,
vocabulary: Vocabulary,
negative_label: str,
label_namespace: str = "labels",
evaluated_labels: List[str] = None) -> None:
self._label_vocabulary = vocabulary.get_index_to_token_vocabulary(label_namespace)
if evaluated_labels is None:
self.evaluated_labels = list(self._label_vocabulary.values())
self.evaluated_labels.remove(negative_label)
else:
self.evaluated_labels = evaluated_labels
self._true_positives: Dict[str, int] = defaultdict(int)
self._false_positives: Dict[str, int] = defaultdict(int)
self._false_negatives: Dict[str, int] = defaultdict(int)
self._support: Dict[str, int] = defaultdict(int)
assert self._label_vocabulary[0] == negative_label, 'Negative label should have index 0'
self._negative_label_idx = 0
def __call__(self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.Tensor] = None):
"""
Parameters
----------
predictions : ``torch.Tensor``, required.
A tensor of predictions of shape (batch_size, sequence_length, num_classes).
gold_labels : ``torch.Tensor``, required.
A tensor of integer class label of shape (batch_size, sequence_length).
It must be the same shape as the ``predictions`` tensor without the
``num_classes`` dimension.
mask: ``torch.Tensor``, optional (default = None).
A masking tensor the same size as ``gold_labels``.
"""
if mask is None:
mask = torch.ones_like(gold_labels)
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions,
gold_labels,
mask)
num_classes = predictions.size(-1)
if (gold_labels >= num_classes).any():
raise ConfigurationError("A gold label passed to SpanBasedF1Measure contains an "
"id >= {}, the number of classes.".format(num_classes))
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
argmax_predictions = predictions.max(-1)[1]
# Iterate over timesteps in batch.
batch_size = gold_labels.size(0)
for i in range(batch_size):
sequence_prediction = argmax_predictions[i, :]
sequence_gold_label = gold_labels[i, :]
length = sequence_lengths[i]
if length == 0:
# It is possible to call this metric with sequences which are
# completely padded. These contribute nothing, so we skip these rows.
continue
predicted_tuples = []
gold_tuples = []
for token_idx in range(length):
pred_head_and_type_idx = sequence_prediction[token_idx].item()
pred_head_and_type = map_index_to_relation_head_and_type(
label_vocab=self._label_vocabulary,
head_and_type_idx=pred_head_and_type_idx
)
pred_head, pred_label = pred_head_and_type
if pred_label in self.evaluated_labels:
predicted_tuples.append(
(token_idx, pred_head, pred_label)
)
gold_head_and_type_idx = sequence_gold_label[token_idx].item()
gold_head_and_type = map_index_to_relation_head_and_type(
self._label_vocabulary,
gold_head_and_type_idx
)
gold_head, gold_label = gold_head_and_type
if gold_label in self.evaluated_labels:
gold_tuples.append(
(token_idx, gold_head, gold_label)
)
self._support[gold_label] += 1
for idx_head_and_type in predicted_tuples:
relation_type = idx_head_and_type[2]
if idx_head_and_type in gold_tuples:
self._true_positives[relation_type] += 1
gold_tuples.remove(idx_head_and_type)
else:
self._false_positives[relation_type] += 1
# These tokens weren't predicted.
for idx_head_and_type in gold_tuples:
self._false_negatives[idx_head_and_type[2]] += 1
def get_metric(self, reset: bool = False):
"""
Returns
-------
A Dict per label containing following the span based metrics:
precision : float
recall : float
f1-measure : float
Additionally, an ``overall`` key is included, which provides the precision,
recall and f1-measure for all spans.
"""
all_tags: Set[str] = set()
all_tags.update(self._true_positives.keys())
all_tags.update(self._false_positives.keys())
all_tags.update(self._false_negatives.keys())
all_metrics = {}
overall_precision_values = []
overall_recall_values = []
overall_f1_values = []
for tag in all_tags:
precision, recall, f1_measure = self._compute_metrics(self._true_positives[tag],
self._false_positives[tag],
self._false_negatives[tag])
precision_key = "precision" + "-" + tag
recall_key = "recall" + "-" + tag
f1_key = "f1-measure" + "-" + tag
support_key = "support" + "-" + tag
all_metrics[precision_key] = precision
all_metrics[recall_key] = recall
all_metrics[f1_key] = f1_measure
all_metrics[support_key] = self._support[tag]
if tag in self.evaluated_labels:
overall_precision_values.append(precision)
overall_recall_values.append(recall)
overall_f1_values.append(f1_measure)
# If no samples are given, simply return 0
if len(overall_precision_values) < 1:
all_metrics["precision-overall"] = 0
all_metrics["recall-overall"] = 0
all_metrics["f1-measure-overall"] = 0
else:
all_metrics["precision-overall"] = mean(overall_precision_values)
all_metrics["recall-overall"] = mean(overall_recall_values)
all_metrics["f1-measure-overall"] = mean(overall_f1_values)
if reset:
self.reset()
return all_metrics
@staticmethod
def _compute_metrics(true_positives: int, false_positives: int, false_negatives: int):
precision = float(true_positives) / float(true_positives + false_positives + 1e-13)
recall = float(true_positives) / float(true_positives + false_negatives + 1e-13)
f1_measure = 2. * ((precision * recall) / (precision + recall + 1e-13))
return precision, recall, f1_measure
def reset(self):
self._true_positives = defaultdict(int)
self._false_positives = defaultdict(int)
self._false_negatives = defaultdict(int)
| 42.580645 | 96 | 0.597348 | 7,447 | 0.940278 | 0 | 0 | 7,478 | 0.944192 | 0 | 0 | 1,531 | 0.193308 |
df102fd4bc161dbff752d14a5d6d5415a2686808 | 78 | py | Python | test/test.py | hcamacho4200/dev_opts_training | 6ce91cbeb30af7eae29c084f6180d53f64f5e9b0 | [
"Apache-2.0"
] | 1 | 2021-10-03T22:23:06.000Z | 2021-10-03T22:23:06.000Z | test/test.py | hcamacho4200/dev_opts_training | 6ce91cbeb30af7eae29c084f6180d53f64f5e9b0 | [
"Apache-2.0"
] | null | null | null | test/test.py | hcamacho4200/dev_opts_training | 6ce91cbeb30af7eae29c084f6180d53f64f5e9b0 | [
"Apache-2.0"
] | 1 | 2021-12-11T19:24:59.000Z | 2021-12-11T19:24:59.000Z | def test_test():
"""A generic test
:return:
"""
assert True
| 9.75 | 21 | 0.512821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.5 |
df10a23870978fcf9c8629fcbb01b511ca18a8b5 | 833 | py | Python | create.py | felipesantoos/mython | ef4e91a1177ecdd3a6ecea5cd6a6605a084ea5a6 | [
"MIT"
] | null | null | null | create.py | felipesantoos/mython | ef4e91a1177ecdd3a6ecea5cd6a6605a084ea5a6 | [
"MIT"
] | null | null | null | create.py | felipesantoos/mython | ef4e91a1177ecdd3a6ecea5cd6a6605a084ea5a6 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
# Importação das bibliotecas necessárias.
import mysql.connector
import datetime
# Configuração da conexão.
connection = mysql.connector.connect(
host="localhost",
user="root",
password="",
database="teste"
)
# Variável que executará as operações.
cursor = connection.cursor()
# Comando e dados.
sql = "INSERT INTO users (name, email, created) VALUES (%s, %s, %s)"
data = (
"Primeiro usuário",
"primeirousuario@teste.com",
datetime.datetime.today()
)
# Execução e efetivação do comando.
cursor.execute(sql, data)
connection.commit()
# Obtenção do ID do usuário recém-cadastrado.
userid = cursor.lastrowid
# Fechamento da conexão.
cursor.close()
connection.close()
# Exibição do último ID inserido no banco de dados.
print("Foi cadastrado o nome usuário de ID: ", userid)
| 21.358974 | 68 | 0.711885 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 499 | 0.582264 |
df114fd76f5bb86b26cb7eeb35dd8decd3116f90 | 10,814 | py | Python | pydm/tests/widgets/test_enum_combo_box.py | KurtJacobson/pydm | 5a5cbc6cdb218b77335a3c4ad57a4a49e060e20d | [
"BSD-3-Clause-LBNL"
] | 89 | 2016-05-25T22:01:48.000Z | 2022-03-02T16:21:03.000Z | pydm/tests/widgets/test_enum_combo_box.py | KurtJacobson/pydm | 5a5cbc6cdb218b77335a3c4ad57a4a49e060e20d | [
"BSD-3-Clause-LBNL"
] | 653 | 2016-10-15T01:45:02.000Z | 2022-03-31T23:36:39.000Z | pydm/tests/widgets/test_enum_combo_box.py | KurtJacobson/pydm | 5a5cbc6cdb218b77335a3c4ad57a4a49e060e20d | [
"BSD-3-Clause-LBNL"
] | 56 | 2016-10-10T14:02:06.000Z | 2022-03-24T14:35:24.000Z | # Unit Tests for the Enum Combo Box
import pytest
from logging import ERROR
from qtpy.QtCore import Slot, Qt
from ...widgets.enum_combo_box import PyDMEnumComboBox
from ... import data_plugins
# --------------------
# POSITIVE TEST CASES
# --------------------
def test_construct(qtbot):
"""
Test the construction of the widget.
Expectations:
All the default values are properly set.
Parameters
----------
qtbot : fixture
pytest-qt window for widget test
"""
pydm_enumcombobox = PyDMEnumComboBox()
qtbot.addWidget(pydm_enumcombobox)
assert pydm_enumcombobox._has_enums is False
assert pydm_enumcombobox.contextMenuPolicy() == Qt.DefaultContextMenu
assert pydm_enumcombobox.contextMenuEvent == pydm_enumcombobox.open_context_menu
@pytest.mark.parametrize("enums", [
("spam", "eggs", "ham"),
("spam",),
("",),
])
def test_set_items(qtbot, enums):
"""
Test the populating of enum string (choices) to the widget.
Expectations:
All enum strings are populated to the widget, and the _has_enum flag is set to True if the enum string list is not
empty.
Parameters
----------
qtbot : fixture
pytest-qt window for widget test
enums : tuple
A list of enum strings to be populated as choices to the widget.
"""
pydm_enumcombobox = PyDMEnumComboBox()
qtbot.addWidget(pydm_enumcombobox)
assert pydm_enumcombobox.count() == 0
pydm_enumcombobox.set_items(enums)
assert pydm_enumcombobox.count() == len(enums)
assert all([enums[i] == pydm_enumcombobox.itemText(i) for i in range(0, len(enums))])
assert pydm_enumcombobox._has_enums is True if len(enums) else pydm_enumcombobox._has_enums is False
@pytest.mark.parametrize("connected, write_access, has_enum, is_app_read_only", [
(True, True, True, True),
(True, True, True, False),
(True, True, False, True),
(True, True, False, False),
(True, False, False, True),
(True, False, False, False),
(True, False, True, True),
(True, False, True, False),
(False, True, True, True),
(False, True, True, False),
(False, False, True, True),
(False, False, True, False),
(False, True, False, True),
(False, True, False, False),
(False, False, False, True),
(False, False, False, False),
])
def test_check_enable_state(qtbot, signals, connected, write_access, has_enum, is_app_read_only):
"""
Test the tooltip generated depending on the channel connection, write access, whether the widget has enum strings,
and whether the app is read-only.
Expectations:
1. If the data channel is disconnected, the widget's tooltip will display "PV is disconnected"
2. If the data channel is connected, but it has no write access:
a. If the app is read-only, the tooltip will read "Running PyDM on Read-Only mode."
b. If the app is not read-only, the tooltip will read "Access denied by Channel Access Security."
3. If the widget does not have any enum strings, the tooltip will display "Enums not available".
Parameters
----------
qtbot : fixture
Window for widget testing
signals : fixture
The signals fixture, which provides access signals to be bound to the appropriate slots
connected : bool
True if the channel is connected; False otherwise
write_access : bool
True if the widget has write access to the channel; False otherwise
has_enum: bool
True if the widget has enum strings populated; False if the widget contains no enum strings (empty of choices)
is_app_read_only : bool
True if the PyDM app is read-only; False otherwise
"""
pydm_enumcombobox = PyDMEnumComboBox()
qtbot.addWidget(pydm_enumcombobox)
signals.write_access_signal[bool].connect(pydm_enumcombobox.writeAccessChanged)
signals.write_access_signal[bool].emit(write_access)
signals.connection_state_signal[bool].connect(pydm_enumcombobox.connectionStateChanged)
signals.connection_state_signal[bool].emit(connected)
if has_enum:
signals.enum_strings_signal[tuple].connect(pydm_enumcombobox.enumStringsChanged)
signals.enum_strings_signal[tuple].emit(("START", "STOP", "PAUSE"))
assert pydm_enumcombobox._has_enums
data_plugins.set_read_only(is_app_read_only)
original_tooltip = "Original Tooltip"
pydm_enumcombobox.setToolTip(original_tooltip)
pydm_enumcombobox.check_enable_state()
actual_tooltip = pydm_enumcombobox.toolTip()
if not pydm_enumcombobox._connected:
assert "PV is disconnected." in actual_tooltip
elif not write_access:
if data_plugins.is_read_only():
assert "Running PyDM on Read-Only mode." in actual_tooltip
else:
assert "Access denied by Channel Access Security." in actual_tooltip
elif not pydm_enumcombobox._has_enums:
assert "Enums not available" in actual_tooltip
@pytest.mark.parametrize("values, selected_index, expected", [
(("RUN", "STOP"), 0, "RUN"),
(("RUN", "STOP"), 1, "STOP"),
(("RUN", "STOP"), "RUN", "RUN"),
(("RUN", "STOP"), "STOP", "STOP"),
])
def test_enum_strings_changed(qtbot, signals, values, selected_index, expected):
"""
Test the widget's handling of enum strings, which are choices presented to the user, and the widget's ability to
update the selected enum string when the user provides a choice index.
This test will also cover value_changed() testing.
Expectations:
The widget displays the correct enum string whose index from the enum string tuple is selected by the user.
Parameters
----------
qtbot : fixture
pytest-qt window for widget testing
signals : fixture
The signals fixture, which provides access signals to be bound to the appropriate slots
values : tuple
A set of enum strings for the user to choose from
selected_index : int
The index from the enum string tuple chosen by the user
expected : str
The expected enum string displayed by the widget after receiving the user's choice index
"""
pydm_enumcombobox = PyDMEnumComboBox()
qtbot.addWidget(pydm_enumcombobox)
signals.enum_strings_signal.connect(pydm_enumcombobox.enumStringsChanged)
signals.enum_strings_signal.emit(values)
signals.new_value_signal[type(selected_index)].connect(pydm_enumcombobox.channelValueChanged)
signals.new_value_signal[type(selected_index)].emit(selected_index)
assert pydm_enumcombobox.value == selected_index
assert pydm_enumcombobox.currentText() == expected
@pytest.mark.parametrize("index", [
0,
1,
-1,
])
def test_internal_combo_box_activated_int(qtbot, signals, index):
"""
Test the the capability of the widget's activated slot in sending out a new enum string index value.
Expectations:
The value sent out from the "activated" slot reflects the correct new index value.
Parameters
----------
qtbot : fixture
pytest-qt window for widget test
signals : fixture
The signals fixture, which provides access signals to be bound to the appropriate slots
index : int
The new enum string index value
"""
pydm_enumcombobox = PyDMEnumComboBox()
qtbot.addWidget(pydm_enumcombobox)
# Connect the send_value_signal also to the conftest's receiveValue slot to intercept and verify that the correct
# new index value is sent out
pydm_enumcombobox.send_value_signal[int].connect(signals.receiveValue)
pydm_enumcombobox.activated[int].emit(index)
assert signals.value == index
# --------------------
# NEGATIVE TEST CASES
# --------------------
@pytest.mark.parametrize("enums, expected_error_message", [
(None, "Invalid enum value '{0}'. The value is expected to be a valid list of string values.".format(None)),
((None, "abc"), "Invalid enum type '{0}'. The expected type is 'string'.".format(type(None))),
((None, 123.456), "Invalid enum type '{0}'. The expected type is 'string'".format(type(None))),
((None, None, None), "Invalid enum type '{0}'. The expected type is 'string'".format(type(None))),
((123,), "Invalid enum type '{0}'. The expected type is 'string'".format(type(123))),
((123.45,), "Invalid enum type '{0}'. The expected type is 'string'".format(type(123.45))),
((123, 456), "Invalid enum type '{0}'. The expected type is 'string'".format(type(123))),
((123.456, None), "Invalid enum type '{0}'. The expected type is 'string'".format(type(123.456))),
(("spam", 123, "eggs", "ham"), "Invalid enum type '{0}'. The expected type is 'string'".format(type(123))),
])
def test_set_items_neg(qtbot, caplog, enums, expected_error_message):
"""
Test sending setting the widget with an undefined list of enum strings.
Expectations:
The correct error message is logged.
Parameters
----------
qtbot : fixture
pytest-qt window for widget test
caplog : fixture
To capture the log messages
enums : tuple
A list of strings as enum strings (choices) to populate to the widget.
"""
pydm_enumcombobox = PyDMEnumComboBox()
qtbot.addWidget(pydm_enumcombobox)
pydm_enumcombobox.set_items(enums)
for record in caplog.records:
assert record.levelno == ERROR
assert expected_error_message in caplog.text
@pytest.mark.parametrize("values, selected_index, expected", [
(("ON", "OFF"), 3, ""),
(("ON", "OFF"), -1, ""),
])
def test_enum_strings_changed_incorrect_index(qtbot, signals, values, selected_index, expected):
"""
Test the widget's handling of incorrectly provided enum string index.
Expectations:
The widget will display an empty string.
Parameters
----------
qtbot : fixture
pytest-qt window for widget testing
signals : fixture
The signals fixture, which provides access signals to be bound to the appropriate slots
value : tuple
A set of enum strings for the user to choose from
selected_index : int
The incorrect (out-of-bound) index from the enum string tuple chosen by the user
expected : int
The expected text displayed by the widget to notify the user of the incorrect choice index
"""
pydm_enumcombobox = PyDMEnumComboBox()
qtbot.addWidget(pydm_enumcombobox)
signals.enum_strings_signal.connect(pydm_enumcombobox.enumStringsChanged)
signals.enum_strings_signal.emit(values)
signals.new_value_signal[type(selected_index)].connect(pydm_enumcombobox.channelValueChanged)
signals.new_value_signal[type(selected_index)].emit(selected_index)
assert pydm_enumcombobox.value == selected_index
assert pydm_enumcombobox.currentText() == expected
| 35.92691 | 118 | 0.69447 | 0 | 0 | 0 | 0 | 9,929 | 0.918162 | 0 | 0 | 5,623 | 0.519974 |
df122415da47ec1ae7bcf4c6c1f60a16cd13570e | 7,421 | py | Python | byol_train.py | jhvics1/pytorch-byol | e66a5f72a7d124578109fcee43062eb88b0fabf1 | [
"MIT"
] | null | null | null | byol_train.py | jhvics1/pytorch-byol | e66a5f72a7d124578109fcee43062eb88b0fabf1 | [
"MIT"
] | null | null | null | byol_train.py | jhvics1/pytorch-byol | e66a5f72a7d124578109fcee43062eb88b0fabf1 | [
"MIT"
] | null | null | null | import os
import random
import argparse
import multiprocessing
import numpy as np
import torch
from torchvision import models, transforms
from torch.utils.data import DataLoader, Dataset
from pathlib import Path
from PIL import Image
from utils import Bar, config, mkdir_p, AverageMeter
from datetime import datetime
from tensorboardX import SummaryWriter
from byol_pytorch import BYOL
# arguments
parser = argparse.ArgumentParser(description='byol-lightning-test')
# Architecture & hyper-parameter
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet',
help='model architecture: | [resnet, ...] (default: resnet18)')
parser.add_argument('--depth', type=int, default=18, help='Model depth.')
parser.add_argument('-c', '--checkpoint', default='../checkpoints', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--epoch', type=int, default=100, help='Epoch')
parser.add_argument('--batch-size', type=int, default=32, help='Epoch')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
# Device options
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--gpu-id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
# Paths
parser.add_argument('-d', '--dataset', default='neu', type=str)
parser.add_argument('--image_folder', type=str, required=True,
help='path to your folder of images for self-supervised learning')
parser.add_argument('--board-path', '--bp', default='../board', type=str,
help='tensorboardx path')
parser.add_argument('--board-tag', '--tg', default='byol', type=str,
help='tensorboardx writer tag')
args = parser.parse_args()
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
# Torch Seed
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
# Random Lib Seed
random.seed(args.manualSeed)
# Numpy Seed
np.random.seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
# constants
args.image_size = 256
NUM_GPUS = 1
IMAGE_EXTS = ['.jpg', '.png', '.jpeg', '.bmp']
NUM_WORKERS = multiprocessing.cpu_count()
# task_time = datetime.now().isoformat()
# args.checkpoint = os.path.join(args.checkpoint, args.dataset, "{}-{}{:d}-bs{:d}-lr{:.5f}-{}".format(args.arch,
# args.depth,
# args.batch_size,
# args.lr,
# args.board_tag),
# task_time)
# if not os.path.isdir(args.checkpoint):
# mkdir_p(args.checkpoint)
# config.save_config(args, os.path.join(args.checkpoint, "config.txt"))
#
# writer_train = SummaryWriter(
# log_dir=os.path.join(args.board_path, args.dataset, "{}-{}{:d}-bs{:d}-lr{:.5f}-{}".format(args.arch,
# args.depth,
# args.batch_size,
# args.lr,
# args.board_tag),
# task_time, "train"))
args.task_time = datetime.now().isoformat()
output_name = "{}{:d}-bs{:d}-lr{:.5f}-{}".format(args.arch,
args.depth,
args.batch_size,
args.lr,
args.board_tag)
args.checkpoint = os.path.join(args.checkpoint, args.dataset, output_name, args.task_time)
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
config.save_config(args, os.path.join(args.checkpoint, "config.txt"))
writer_train = SummaryWriter(
log_dir=os.path.join(args.board_path, args.dataset, output_name, args.task_time))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def expand_greyscale(t):
return t.expand(3, -1, -1)
class ImagesDataset(Dataset):
def __init__(self, folder, image_size):
super().__init__()
self.folder = folder
self.paths = []
for path in Path(f'{folder}').glob('**/*'):
_, ext = os.path.splitext(path)
if ext.lower() in IMAGE_EXTS:
self.paths.append(path)
print(f'{len(self.paths)} images found')
self.transform = transforms.Compose([
transforms.Resize(args.image_size),
transforms.RandomSizedCrop((args.image_size, args.image_size)),
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.ToTensor(),
# normalize
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
img = img.convert('RGB')
return self.transform(img)
if args.arch is "resnet":
if args.depth == 18:
model = models.resnet18(pretrained=False).cuda()
elif args.depth == 34:
model = models.resnet34(pretrained=False).cuda()
elif args.depth == 50:
model = models.resnet50(pretrained=False).cuda()
elif args.depth == 101:
model = models.resnet101(pretrained=False).cuda()
else:
assert ("Not supported Depth")
learner = BYOL(
model,
image_size=args.image_size,
hidden_layer='avgpool',
projection_size=256,
projection_hidden_size=4096,
moving_average_decay=0.99,
use_momentum=False # turn off momentum in the target encoder
)
opt = torch.optim.Adam(learner.parameters(), lr=args.lr)
ds = ImagesDataset(args.image_folder, args.image_size)
trainloader = DataLoader(ds, batch_size=args.batch_size, num_workers=NUM_WORKERS, shuffle=True)
losses = AverageMeter()
for epoch in range(args.epoch):
bar = Bar('Processing', max=len(trainloader))
for batch_idx, inputs in enumerate(trainloader):
loss = learner(inputs.cuda())
losses.update(loss.data.item(), inputs.size(0))
opt.zero_grad()
loss.backward()
opt.step()
# plot progress
bar.suffix = 'Epoch {epoch} - ({batch}/{size}) | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} '.format(
epoch=epoch,
batch=batch_idx + 1,
size=len(trainloader),
total=bar.elapsed_td,
eta=bar.eta_td,
loss=loss.item(),
)
n_iter = epoch * len(trainloader) + batch_idx + 1
writer_train.add_scalar('Train/loss', loss.data.item(), n_iter)
bar.next()
writer_train.add_scalar('Avg.loss', losses.avg, epoch)
bar.finish()
# save your improved network
torch.save(model.state_dict(), os.path.join(args.checkpoint, 'byol.pt'))
| 38.450777 | 118 | 0.566905 | 896 | 0.120738 | 0 | 0 | 0 | 0 | 0 | 0 | 2,527 | 0.34052 |
df144e0efba2d31bbcba42a1f4cb228efa1dd83b | 406 | py | Python | electrum/networks/auxpow_mixin.py | ZenyattaAbosom/AbosomElectrum | 02748b0b14e37385d6e77591d122e592740222bf | [
"MIT"
] | 4 | 2020-06-27T22:43:34.000Z | 2021-04-12T02:29:30.000Z | electrum/networks/auxpow_mixin.py | ZenyattaAbosom/AbosomElectrum | 02748b0b14e37385d6e77591d122e592740222bf | [
"MIT"
] | 21 | 2020-06-20T15:02:50.000Z | 2021-04-07T10:14:59.000Z | electrum/networks/auxpow_mixin.py | ZenyattaAbosom/AbosomElectrum | 02748b0b14e37385d6e77591d122e592740222bf | [
"MIT"
] | 13 | 2020-06-28T08:13:28.000Z | 2021-12-28T00:11:56.000Z | class AuxPowMixin(object):
AUXPOW_START_HEIGHT = 0
AUXPOW_CHAIN_ID = 0x0001
BLOCK_VERSION_AUXPOW_BIT = 0
@classmethod
def is_auxpow_active(cls, header) -> bool:
height_allows_auxpow = header['block_height'] >= cls.AUXPOW_START_HEIGHT
version_allows_auxpow = header['version'] & cls.BLOCK_VERSION_AUXPOW_BIT
return height_allows_auxpow and version_allows_auxpow | 40.6 | 80 | 0.743842 | 405 | 0.997537 | 0 | 0 | 283 | 0.697044 | 0 | 0 | 23 | 0.05665 |
df1524fa7728c1adb3ef0c319a786a66845f1e94 | 159 | py | Python | Problems/dicecup.py | rikgj/Kattis | 2e34dee307aef5acea5837732bf9f27f8c548e9c | [
"MIT"
] | null | null | null | Problems/dicecup.py | rikgj/Kattis | 2e34dee307aef5acea5837732bf9f27f8c548e9c | [
"MIT"
] | null | null | null | Problems/dicecup.py | rikgj/Kattis | 2e34dee307aef5acea5837732bf9f27f8c548e9c | [
"MIT"
] | null | null | null | from sys import stdin
a,b = [int(x) for x in stdin.readline().split(' ')]
diff = abs(a-b) +1
a = min(a,b) +1
print(a)
for x in range(1,diff):
print(a+x)
| 15.9 | 51 | 0.591195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.018868 |
df15ead6214feb74fc04cc18939e4d0ebbd4fa53 | 2,338 | py | Python | Castessoft_Python/Castessoft_Dahianna.py | JuanDiegoCastellanos/All-in-one-Python-Full | ba28a3d129a060d5a83c1d99ff97cf5c9d3d2afa | [
"MIT"
] | 1 | 2021-11-23T02:46:50.000Z | 2021-11-23T02:46:50.000Z | Castessoft_Python/Castessoft_Dahianna.py | JuanDiegoCastellanos/All-in-one-Python-Full | ba28a3d129a060d5a83c1d99ff97cf5c9d3d2afa | [
"MIT"
] | null | null | null | Castessoft_Python/Castessoft_Dahianna.py | JuanDiegoCastellanos/All-in-one-Python-Full | ba28a3d129a060d5a83c1d99ff97cf5c9d3d2afa | [
"MIT"
] | null | null | null | import os
class Cajero:
def __init__(self):
self.continuar = True
self.monto = 5000
self.menu()
def contraseña(self):
contador = 1
while contador <= 3:
x = int(input("ingrese su contraseña:" ))
if x == 5467:
print("Contraseña Correcta")
break
else:
print(f"Contraseña Incorrecta, le quedan {3 - contador} intentos")
if contador == 3:
print("No puede realizar operaciones.")
self.continuar = False
contador+=1
def menu(self):
os.system("cls") #esto es solo para windows
self.contraseña()
opcion = 0
while opcion != "4":
os.system("cls")
print(""" Bienvenido al cajero automatico
***Menú***
1- Depositar
2- Retirar
3- Ver saldo
4- Salir """)
opcion = input("Su opción es: ")
if self.continuar:
if opcion == "1" :
self.depositar()
elif opcion == "2" :
self.retiro()
elif opcion == "3":
self.ver()
elif opcion == "4":
print("Programa finalizado")
else:
print("NO existe esa opción")
else:
if opcion in "123":
print("Imposible realizar esa operación")
elif opcion == "4":
print("Programa finalizado")
else:
print("No existe esa opción")
def depositar(self):
dep = int(input("Ingrese su monto a depositar:"))
print("Usted a depositado:",dep)
print(f"Su nuevo saldo es {self.monto + dep}")
self.monto+=dep
def retiro(self):
retirar=int(input("¿Cuánto desea retirar? : "))
print("Su monto actual es", self.monto)
if self.monto >= retirar :
print(f"Usted a retirado: {retirar} , su nuevo monto es {self.monto - retirar}")
self.monto-=retirar
else:
print("Imposible realizar el retiro, su monto es menor")
def ver(self):
print("Su saldo es: " , self.monto)
app = Cajero() | 31.173333 | 92 | 0.47006 | 2,321 | 0.98766 | 0 | 0 | 0 | 0 | 0 | 0 | 773 | 0.328936 |
df16a9b6f456a271a02043344bd7fec5b2e043ce | 77,732 | py | Python | modules.py | faber6/kings-raid-daily | 07b5a6fc9a827dd7e536ff9643217393fb86c097 | [
"MIT"
] | null | null | null | modules.py | faber6/kings-raid-daily | 07b5a6fc9a827dd7e536ff9643217393fb86c097 | [
"MIT"
] | null | null | null | modules.py | faber6/kings-raid-daily | 07b5a6fc9a827dd7e536ff9643217393fb86c097 | [
"MIT"
] | null | null | null | from threading import Thread, enumerate
from random import choice
from time import sleep as slp
from time import time as tiime
from os import mkdir, getcwd, path as pth
from subprocess import run as run_
from math import ceil
from traceback import format_exc
from sys import exit
import logging, json, ctypes
from ppadb.client import Client
from PIL import Image, UnidentifiedImageError, ImageFile
from numpy import array
from imagehash import average_hash
from pytesseract import pytesseract
from pytesseract import image_to_string
from langdetect import detect, DetectorFactory
from fuzzywuzzy.process import extractOne
from difflib import SequenceMatcher
from cv2 import bilateralFilter
from requests import get
if pth.exists('./.cache') == False:
mkdir('./.cache')
logging.basicConfig(
handlers=[logging.FileHandler("./.cache/log.log", "a", "utf-8")],
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
)
ImageFile.LOAD_TRUNCATED_IMAGES = True
pytesseract.tesseract_cmd = ('./tesseract/tesseract.exe')
update_notice_ = Image.open('./base/login/update_notice.png')
introduction_ = Image.open('./base/login/introduction.png')
tap_to_play_ = Image.open('./base/login/tap_to_play.png')
tap_to_play_2_ = Image.open('./base/login/tap_to_play_2.png')
community_ = Image.open('./base/login/community.png')
sale_ = Image.open('./base/login/sale.png')
attendance_ = Image.open('./base/login/attendance.png')
event_ = Image.open('./base/login/event.png')
guild_attendance_ = Image.open('./base/login/guild_attendance.png')
accumualated_ = Image.open('./base/login/accumualated.png')
sale_2_ = Image.open('./base/login/sale_2.png')
special_shop_ = Image.open('./base/login/special_shop.png')
home_screen_ = Image.open('./base/login/home_screen.png')
mb_ = Image.open('./base/login/mission_button.png')
loh_new_ = Image.open('./base/loh/loh_new.png')
kr_discord_ = Image.open('./base/login/kr_discord.png')
def crop(img, dimesions):
# size of the image in pixels (size of original image)
width, height = img.size
# cropped image
im = img.crop((dimesions[0], dimesions[1], width-dimesions[2], height-dimesions[3]))
return im
def check_similar(img1, img2, cutoff, bonus):
# get data for comparing image
image1 = average_hash(img1)
image2 = average_hash(img2)
# compare
if image1 - image2 < cutoff+bonus:
return "similar"
else:
return "not"
def filter(pil_image):
open_cv_image = array(pil_image.convert('RGB'))
open_cv_image = open_cv_image[:, :, ::-1].copy()
return bilateralFilter(open_cv_image, 9, 75, 75)
class Missions:
def __init__(self):
self.dragon_ = False
self.friendship_ = False
self.inn_ = False
self.lov_ = False
self.shop_ = False
self.stockage_ = False
self.tower_ = False
self.wb_ = False
self.lil_ = False
self.launched = None
self.cache_2 = None
self.game_count = 0
self.game_home_screen_count = 0
self.error_count = 0
self.gb_cf = None
def update_cache(self, device, check_crash=True):
count = 0
while True:
try:
device.shell('screencap -p /sdcard/screencap.png')
device.pull('/sdcard/screencap.png', './.cache/screencap-'+str(device.serial)+'.png')
im = Image.open('./.cache/screencap-'+str(device.serial)+'.png')
if check_crash == True:
if self.cache_2 is not None:
try:
if check_similar(self.cache_2, im, 5, self.gb_cf['bonus_cutoff']) == 'similar':
self.game_count+=1
if self.game_count >= 50: # game freeze
device.shell('am force-stop com.vespainteractive.KingsRaid')
slp(3)
device.shell('monkey -p com.vespainteractive.KingsRaid 1')
slp(3)
self.game_count = 0
self.run_execute(device, self.launched)
exit()
else:
self.cache_2 = im
self.game_count = 0
self.emulator_count = 0
except OSError:
self.error_count += 1
if self.error_count >= 50:
self.error_count = 0
break
self.cache_2 = im
slp(5)
continue
else:
self.cache_2 = im
break
except RuntimeError:
self.error_count += 1
if self.error_count >= 50:
self.error_count = 0
break
if count == 50:
im = "device offline"
break
count += 1
slp(5)
except PermissionError or UnidentifiedImageError or ConnectionResetError:
self.error_count += 1
if self.error_count >= 50:
self.error_count = 0
break
slp(5)
return im, device
def make_sure_loaded(self, original_img, device, dimensions=None, shell_=None, loop=None, sleep_duration=None, \
shell_first=False, cutoff=6, second_img=None, third_img=None, oposite=False, second_shell=None, ck=True, ck_special_shop=True):
count = 0
count_ = 0
while True:
if ck == True:
self.check_login(device, ck_special_shop)
# do adb shell first if passed
if shell_ is not None:
if shell_first is True:
device.shell(shell_)
if second_shell is not None:
if shell_first is True:
slp(3)
device.shell(second_shell)
# update cache
if count_ >= 100:
im, device = self.update_cache(device)
else:
im, device = self.update_cache(device, False)
if dimensions is not None:
cache = crop(im, dimensions)
else:
cache = im
# get data for comparing image
original = average_hash(Image.open(original_img))
cache = average_hash(cache)
# compare
bonus = self.gb_cf['bonus_cutoff']
if original - cache < cutoff+bonus:
if oposite == True:
pass
else:
break
else:
if second_img is not None:
second = average_hash(Image.open(second_img))
if second - cache < cutoff+bonus:
break
else:
if third_img is not None:
third = average_hash(Image.open(third_img))
if third - cache < cutoff+bonus:
break
if oposite == True:
break
pass
# adb shell if passed
if shell_ is not None:
if shell_first is False:
device.shell(shell_)
if second_shell is not None:
if shell_first is False:
slp(3)
device.shell(second_shell)
# break loop if given arg
if loop is not None:
if count == loop:
return 'loop'
count+=1
if sleep_duration is not None:
slp(sleep_duration)
count_+=1
def check_login(self, device, ck_special_shop):
# get device resolution
im, device = self.update_cache(device, check_crash=False)
size_ = f"{im.size[0]}x{im.size[1]}"
with open('./sets.json', encoding='utf-8') as j:
data = json.load(j)[size_]
count = 0
community_count = 0
bonus = self.gb_cf['bonus_cutoff']
im, device = self.update_cache(device)
# android home screen
im1 = home_screen_
im2 = crop(im, data['login']['home_screen']['dms'])
home_screen = check_similar(im1, im2, 15, bonus)
if home_screen == 'similar':
logging.info(device.serial+': android home screen detected')
device.shell('monkey -p com.vespainteractive.KingsRaid 1')
slp(3)
# login
# update notice
im1 = update_notice_
im2 = crop(im, data['update_notice']['dms'])
update_notice = check_similar(im1, im2, 10, bonus)
if update_notice == 'similar':
logging.info(device.serial+': update notice detected')
device.shell(data['update_notice']['shell'])
slp(3)
# introduction
im1 = introduction_
im2 = crop(im, data['introduction']['dms'])
introduction = check_similar(im1, im2, 10, bonus)
if introduction == 'similar':
logging.info(device.serial+': introduction detected')
device.shell(data['introduction']['shell'])
slp(3)
# tap to play
im1 = tap_to_play_
im2 = crop(im, data['tap_to_play']['dms'])
tap_to_play = check_similar(im1, im2, 10, bonus)
if tap_to_play == 'similar':
logging.info(device.serial+': tap to play detected')
device.shell(data['tap_to_play']['shell'])
slp(3)
# tap to play 2
im1 = tap_to_play_2_
im2 = crop(im, data['tap_to_play']['dms'])
tap_to_play_2 = check_similar(im1, im2, 10, bonus)
if tap_to_play_2 == 'similar':
logging.info(device.serial+': tap to play 2 detected')
device.shell(data['tap_to_play']['shell'])
slp(3)
# pass community page
im1 = community_
im2 = crop(im, data['login']['community']['dms'])
community = check_similar(im1, im2, 10, bonus)
if community == 'similar':
logging.info(device.serial+': community page detected')
device.shell(data['login']['community']['shell'])
slp(3)
# pass sale page
im1 = sale_
im2 = crop(im, data['login']['sale']['dms'])
sale = check_similar(im1, im2, 10, bonus)
if sale == 'similar':
logging.info(device.serial+': sale page detected')
device.shell(data['login']['sale']['shell'])
slp(3)
# claim login attendance
im1 = attendance_
im2 = crop(im, data['login']['attendance']['dms'])
attendance = check_similar(im1, im2, 10, bonus)
if attendance == 'similar':
logging.info(device.serial+': login attendance detected')
device.shell(data['login']['attendance']['shell'])
slp(1)
device.shell(data['login']['attendance']['second_shell'])
slp(3)
# pass event page
im1 = event_
im2 = crop(im, data['login']['event']['dms'])
event = check_similar(im1, im2, 10, bonus)
if event == 'similar':
logging.info(device.serial+': event page detected')
device.shell(data['login']['event']['shell'])
slp(3)
# claim guild attendance
im1 = guild_attendance_
im2 = crop(im, data['login']['guild_attendance']['dms'])
guild_attendance = check_similar(im1, im2, 10, bonus)
if guild_attendance == 'similar':
logging.info(device.serial+': guild attendance detected')
for day in data['login']['guild_attendance']['days']:
device.shell(day)
slp(1)
device.shell(data['login']['guild_attendance']['row_reward'])
slp(1)
device.shell(data['login']['guild_attendance']['exit'])
slp(3)
# claim login accumualated
im1 = accumualated_
im2 = crop(im, data['login']['accumualated']['dms'])
accumualated = check_similar(im1, im2, 10, bonus)
if accumualated == 'similar':
logging.info(device.serial+': login accumualated detected')
device.shell(data['login']['accumualated']['shell'])
slp(1)
device.shell(data['login']['accumualated']['second_shell'])
slp(3)
# check loh new season
im1 = loh_new_
im2 = crop(im, data['loh']['0']['dms'])
loh_new = check_similar(im1, im2, 10, bonus)
if loh_new == 'similar':
logging.info(device.serial+': new loh season detected')
device.shell(data['loh']['0']['shell'])
slp(3)
# sale 2
im1 = sale_2_
im2 = crop(im, data['login']['sale_2']['dms'])
sale_2 = check_similar(im1, im2, 10, bonus)
if sale_2 == 'similar':
logging.info(device.serial+': sale 2 page detected')
device.shell(data['login']['sale_2']['shell'])
slp(1)
device.shell(data['login']['sale_2']['second_shell'])
slp(3)
# special shop
if ck_special_shop != False:
im1 = special_shop_
im2 = crop(im, data['login']['special_shop']['dms'])
special_shop = check_similar(im1, im2, 10, bonus)
if special_shop == 'similar':
logging.info(device.serial+': special shop detected')
device.shell(data['login']['special_shop']['shell'])
slp(3)
# return to main page
im1 = mb_
im2 = crop(im, data['login']['mission_button'])
mb = check_similar(im1, im2, 20, bonus)
if mb == 'similar':
self.game_home_screen_count += 1
if self.game_home_screen_count >= 100:
logging.info(device.serial+': game home screen detected')
device.shell(data['daily']['shell'])
self.game_home_screen_count = 0
self.run_execute(device, launched=self.launched)
exit()
def run_execute(self, device, launched=None):
with open('./config.json') as j:
self.gb_cf = json.load(j)
try:
self.execute(device, launched)
except SystemExit:
pass
except ConnectionResetError:
if launched is not None:
text = device.serial+': connection to remote host was forcibly closed, closing emulator'
print(text)
logging.warn(text)
path = self.gb_cf['ldconsole'].replace('|', '"')
run_(path+f' quit --index {str(launched)}')
except:
var = format_exc()
logging.warn(device.serial+': Exception | '+var)
return
def execute(self, device, launched=None):
if launched is not None:
self.launched = launched
# get device resolution
im, device = self.update_cache(device, check_crash=False)
if im == 'device offline':
if str(device.serial).startswith('127'):
return
text = 'device '+device.serial+' is offline, script ended'
logging.info(text)
print(text)
return
size_ = f"{im.size[0]}x{im.size[1]}"
logging.info(device.serial+': size '+size_+' detected')
with open('./sets.json', encoding='utf-8') as j:
data = json.load(j)[size_]
device.shell('monkey -p com.vespainteractive.KingsRaid 1')
path = self.gb_cf['ldconsole'].replace('|', '"')
def claim():
# claim rewards
count = 0
while True:
if count == 9:
break
device.shell(data['claim'][0])
device.shell(data['claim'][1])
count+=1
# open daily mission board
self.make_sure_loaded('./base/other/daily.png', device, data['daily']['dms'], data['daily']['shell'], cutoff=8)
if self.gb_cf['buff'] == True:
# claim exp and gold buff in etc
self.make_sure_loaded('./base/other/etc.png', device, data['buff']['1']['dms'], data['buff']['1']['shell'], second_img='./base/other/etc_2.png', third_img='./base/other/etc_3.png', cutoff=8)
# claim exp buff
self.make_sure_loaded('./base/other/use_exp.png', device, data['buff']['2']['dms'], data['buff']['2']['shell'], cutoff=15, sleep_duration=1, loop=5)
self.make_sure_loaded('./base/other/etc.png', device, data['buff']['1']['dms'], data['buff']['2']['second_shell'], second_img='./base/other/etc_2.png', third_img='./base/other/etc_3.png', cutoff=8)
slp(5)
# claim gold buff
self.make_sure_loaded('./base/other/use_gold.png', device, data['buff']['3']['dms'], data['buff']['3']['shell'], cutoff=15, sleep_duration=1, loop=5)
self.make_sure_loaded('./base/other/etc.png', device, data['buff']['1']['dms'], data['buff']['3']['second_shell'], second_img='./base/other/etc_2.png', third_img='./base/other/etc_3.png', cutoff=8)
# click back to mission board
# open daily mission board
self.make_sure_loaded('./base/other/daily.png', device, data['daily']['dms'], data['daily']['second_shell'], cutoff=8, shell_first=True, sleep_duration=0.5)
claim()
text = device.serial+': opened and claimed rewards (and exp/gold buff) on daily mission board for the first time'
logging.info(text)
print(text)
# get game language
im, device = self.update_cache(device)
first_misison = crop(im, data['first mission'])
image = filter(first_misison)
text_lang = image_to_string(image).splitlines()[0].lower().replace('♀', '')
while True:
try:
lang = detect(text_lang)
break
except:
device.shell(data['daily']['second_shell'])
slp(1)
claim()
slp(5)
continue
if lang == 'en' or lang == 'da' or lang == 'fr':
lang = 'eng'
elif lang == 'ja':
lang = 'jpn'
elif lang == 'vi':
lang = 'vie'
else:
with open('./languages.json', encoding='utf-8') as j:
langs = json.load(j)
lang = None
missions_ = []
langs_ = []
_langs_ = {}
for lang__ in langs:
langs_.append(lang__)
for _lang_ in langs[lang__]:
missions_.append(_lang_)
_langs_[_lang_] = lang__
for lang__ in langs_:
text_lang = image_to_string(image, lang__).splitlines()[0].lower().replace('♀', '')
if lang__ == 'jpn':
text_lang = text_lang.replace(' ', '')
lang_ = extractOne(text_lang, missions_)
print(lang_[1])
if lang_[1] > 85:
lang = _langs_[lang_[0]]
if lang is None:
text = device.serial+': language not supported or cannot recognized (supported languages: english, japanese, vietnamese)'
logging.info(text)
print(text)
if self.launched is not None:
text = device.serial+': because launched from config so closing after done'
logging.info(text)
print(text)
run_(path+f' quit --index {str(self.launched)}')
exit()
# check for undone missions
not_done = []
not_done_ = []
count = 0
while True:
im, device = self.update_cache(device)
# get 4 visible missions on mission board
visible_missions = [crop(im, data['first mission']), crop(im, data['second mission']), \
crop(im, data['third mission']), crop(im, data['fourth mission'])]
if not_done_ == not_done:
if count >= 20:
self.weekly(device, data)
if self.gb_cf['mails'] == True:
self.mails(device, data)
if self.gb_cf['loh'] == True:
re = self.loh(device, data, lang)
if re != 'success':
text = device.serial+': loh not enough currency or unavailable'
logging.info(text)
print(text)
text = device.serial+': all avalible missions has been completed, script ended'
logging.info(text)
print(text)
if self.launched is not None:
text = device.serial+': because launched from config so closing after done'
logging.info(text)
print(text)
run_(path+f' quit --index {str(self.launched)}')
exit()
count+=1
not_done_ = not_done
count_ = 0
for mission in visible_missions:
pil_image = mission
text = image_to_string(pil_image, lang).splitlines()[0].lower().replace('♀', '')
if text == ' ':
img = filter(pil_image)
text = image_to_string(img, lang).splitlines()[0].lower().replace('♀', '')
re = self.do_mission(text, device, data['shortcut'][str(count_)], data, size_, lang)
if re == 'not':
if text not in not_done:
not_done.append(text)
else:
self.make_sure_loaded('./base/other/daily.png', device, data['daily']['dms'], data['daily']['shell'], cutoff=8)
claim()
logging.info(device.serial+': opened and claimed rewards on daily mission board')
break
count_+=1
def do_mission(self, mission, device, pos, data, res, lang):
with open('./languages.json', encoding='utf-8') as j:
lang_data = json.load(j)[lang]
lst = []
for name in lang_data:
lst.append(name)
ext = extractOne(mission, lst)
re = lang_data[ext[0]]
if re == 'dragon':
if self.gb_cf['dragon'] == False:
return 'not'
if self.dragon_ == True:
return 'not'
return self.dragon(device, pos, data, lang)
elif re == 'friendship':
if self.gb_cf['friendship'] == False:
return 'not'
if self.friendship_ == True:
return 'not'
return self.friendship(device, pos, data)
elif re == 'inn':
if self.gb_cf['inn'] == False:
return 'not'
if self.inn_ == True:
return 'not'
return self.inn(device, pos, data)
elif re == 'lov':
if self.gb_cf['lov'] == False:
return 'not'
if self.lov_ == True:
return 'not'
return self.lov(device, pos, data)
elif re == 'shop':
if self.gb_cf['shop'] == False:
return 'not'
if self.shop_ == True:
return 'not'
return self.shop(device, pos, data)
elif re == 'stockage':
if self.gb_cf['stockage'] == False:
return 'not'
if self.stockage_ == True:
return 'not'
return self.stockage(device, pos, data)
elif re == 'tower':
if self.gb_cf['tower'] == False:
return 'not'
if self.tower_ == True:
return 'not'
return self.tower(device, pos, data, lang)
elif re == 'wb':
if self.gb_cf['wb'] == False:
return 'not'
if self.wb_ == True:
return 'not'
return self.wb(device, pos, data)
elif re == 'lil':
if self.gb_cf['lil'] == False:
return 'not'
if self.lil_ == True:
return 'not'
return self.lil(device, pos, data, res)
elif re == 'dungeons':
return 'not'
elif re == 'stamina':
return 'not'
elif re == 'login':
return 'not'
def dragon(self, device, position, data, lang):
print(device.serial+': hunting dragon...')
logging.info(device.serial+': hunting dragon')
# click mission shortcut
shortcut = self.make_sure_loaded('./base/dragon/raid_list.png', device, data['dragon']['1']['dms'], data['dragon']['1']['shell']+position, cutoff=20, loop=20, sleep_duration=10)
if shortcut == 'loop':
self.dragon_ = True
return 'not'
logging.info(device.serial+': loaded from mission shortcut')
# click create red dragon raid
self.make_sure_loaded('./base/dragon/red_dra.png', device, data['dragon']['2']['dms'], data['dragon']['2']['shell'])
logging.info(device.serial+': clicked create dragon raid')
with open('./languages.json', encoding='utf-8') as j:
dragon_text = json.load(j)[lang]['dragon']
# change hard level to t6 stage 1
while True:
im, device = self.update_cache(device)
pil_image = crop(im, data['dragon']['3']['dms'])
img = filter(pil_image)
text = image_to_string(img, lang).replace('♀', '')
if lang == 'jpn':
text = text.replace(' ', '')
text_ = text.splitlines()[0].lower().replace(' ', '')
if SequenceMatcher(None, dragon_text, text_).ratio() > 0.9:
device.shell(data['dragon']['3']['shell'])
break
else:
device.shell(data['dragon']['4']['shell'])
logging.info(device.serial+': changed to dragon t6 stage 1')
# click single raid
self.make_sure_loaded('./base/dragon/single_raid.png', device, data['dragon']['5']['dms'], data['dragon']['5']['shell'], shell_first=True)
logging.info(device.serial+': clicked single raid')
# click enter raid
self.make_sure_loaded('./base/dragon/party.png', device, data['dragon']['6']['dms'], data['dragon']['6']['shell'], sleep_duration=0.5, cutoff=20)
logging.info(device.serial+': clicked enter raid')
# check avalible party
# slot 1
self.make_sure_loaded('./base/dragon/party_4.png', device, data['dragon']['7']['dms'], data['dragon']['7']['shell'], oposite=True, sleep_duration=1)
# slot 2
self.make_sure_loaded('./base/dragon/party_3.png', device, data['dragon']['8']['dms'], data['dragon']['8']['shell'], oposite=True, sleep_duration=1)
# slot 3
self.make_sure_loaded('./base/dragon/party_2.png', device, data['dragon']['9']['dms'], data['dragon']['9']['shell'], oposite=True, sleep_duration=1)
# slot 4
self.make_sure_loaded('./base/dragon/party_1.png', device, data['dragon']['10']['dms'], data['dragon']['10']['shell'], oposite=True, sleep_duration=1)
# slot 5
self.make_sure_loaded('./base/dragon/party_6.png', device, data['dragon']['11']['dms'], data['dragon']['11']['shell'], oposite=True, sleep_duration=1)
# slot 6
self.make_sure_loaded('./base/dragon/party_5.png', device, data['dragon']['12']['dms'], data['dragon']['12']['shell'], oposite=True, sleep_duration=1)
logging.info(device.serial+': checked all avalible slots')
# click start battle
self.make_sure_loaded('./base/dragon/battle.png', device, data['dragon']['13']['dms'], data['dragon']['13']['shell'], cutoff=30)
logging.info(device.serial+': clicked start battle')
# wait until finish
self.make_sure_loaded('./base/dragon/end.png', device, data['dragon']['14']['dms'], sleep_duration=15, cutoff=10, ck=False, loop=4)
logging.info(device.serial+': battle completed')
# click exit battle
self.make_sure_loaded('./base/dragon/party.png', device, data['dragon']['15']['dms'], data['dragon']['15']['shell'], sleep_duration=0.5)
logging.info(device.serial+': exited battle')
# click exit
self.make_sure_loaded('./base/dragon/my_info.png', device, data['dragon']['16']['dms'], data['dragon']['16']['shell'], sleep_duration=0.5)
device.shell(data['dragon']['17']['shell'])
logging.info(device.serial+': successfully did dragon mission')
self.dragon_ = True
return 'success'
def friendship(self, device, position, data):
print(device.serial+': exchanging friendship points...')
logging.info(device.serial+': exchanging friendship points')
# click mission shortcut
shortcut = self.make_sure_loaded('./base/friendship/friends.png', device, data['friendship']['1']['dms'], data['friendship']['1']['shell']+position, loop=20, cutoff=20, sleep_duration=10)
if shortcut == 'loop':
self.friendship_ = True
return 'not'
logging.info(device.serial+': loaded from mission shortcut')
# click exchange friendship points
self.make_sure_loaded('./base/friendship/exchange.png', device, data['friendship']['2']['dms'], data['friendship']['2']['shell'], cutoff=10, shell_first=True, loop=30)
logging.info(device.serial+': clicked exchange friendship points')
# click exit
self.make_sure_loaded('./base/friendship/my_info.png', device, data['friendship']['3']['dms'], data['friendship']['3']['shell'], sleep_duration=0.5)
device.shell(data['friendship']['4']['shell'])
logging.info(device.serial+': successfully did friendship mission')
self.friendship_ = True
return 'success'
def inn(self, device, position, data):
print(device.serial+': doing stuffs in inn...')
logging.info(device.serial+': doing stuffs in inn')
# click mission shortcut
shortcut = self.make_sure_loaded('./base/inn/visit_inn.png', device, data['inn']['1']['dms'], data['inn']['1']['shell']+position, cutoff=20, loop=20, sleep_duration=10)
if shortcut == 'loop':
self.inn_ = True
return 'not'
logging.info(device.serial+': loaded from mission shortcut')
# open inn
self.make_sure_loaded('./base/inn/inn.png', device, data['inn']['2']['dms'], data['inn']['2']['shell'], second_img='./base/inn/inn_.png', cutoff=15)
logging.info(device.serial+': opened inn')
# give gifts
def gift():
slp(2)
self.make_sure_loaded('./base/inn/greet.png', device, data['inn']['3']['dms'], data['inn']['3']['shell'], second_shell=data['inn']['2']['shell'], cutoff=10, \
second_img='./base/inn/greet_.png', third_img='./base/inn/greet__.png', loop=5, shell_first=True)
self.make_sure_loaded('./base/inn/start_conversation.png', device, data['inn']['4']['dms'], data['inn']['4']['shell'], second_shell=data['inn']['2']['shell'], cutoff=10, \
second_img='./base/inn/start_conversation_.png', third_img='./base/inn/start_conversation__.png', loop=5, shell_first=True)
self.make_sure_loaded('./base/inn/send_gift.png', device, data['inn']['5']['dms'], data['inn']['5']['shell'], second_shell=data['inn']['2']['shell'], cutoff=10, \
second_img='./base/inn/send_gift_.png', third_img='./base/inn/send_gift__.png', loop=5, shell_first=True)
# choose hero in inn
def choose_hero(tap1, tap2):
self.make_sure_loaded('./base/inn/inn.png', device, data['inn']['6']['dms'], data['inn']['6']['shell']+str(tap1)+' '+str(tap2),
shell_first=True, second_img='./base/inn/inn_.png', cutoff=25, second_shell=data['inn']['2']['shell'])
# give gifts to first hero
gift()
logging.info(device.serial+': gave gifts to first hero')
# give gifts to second hero
choose_hero(data['inn']['7']['shell'][0], data['inn']['7']['shell'][1])
gift()
logging.info(device.serial+': gave gifts to second hero')
# give gifts to third hero
choose_hero(data['inn']['8']['shell'][0], data['inn']['8']['shell'][1])
gift()
logging.info(device.serial+': gave gifts to third hero')
# give gifts to fourth hero
choose_hero(data['inn']['9']['shell'][0], data['inn']['9']['shell'][1])
gift()
logging.info(device.serial+': gave gifts to fourth hero')
# give gifts to fifth hero
choose_hero(data['inn']['10']['shell'][0], data['inn']['10']['shell'][1])
gift()
logging.info(device.serial+': gave gifts to fifth hero')
# give gifts to sixth hero
choose_hero(data['inn']['11']['shell'][0], data['inn']['11']['shell'][1])
gift()
logging.info(device.serial+': gave gifts to sixth hero')
# click 'Mini Game'
count = 0
while True:
if count == 6:
break
self.make_sure_loaded('./base/inn/mini_game.png', device, data['inn']['12']['dms'], data['inn']['12']['shell'])
slp(0.5)
device.shell(data['inn']['13']['shell'])
slp(0.5)
self.make_sure_loaded('./base/inn/inn.png', device, data['inn']['14']['dms'], data['inn']['14']['shell'], cutoff=20, second_img='./base/inn/inn_.png')
slp(1)
count+=1
logging.info(device.serial+': played minigames')
# click exit
self.make_sure_loaded('./base/inn/visit_inn.png', device, data['inn']['15']['dms'], data['inn']['15']['shell'], cutoff=20, sleep_duration=3)
self.make_sure_loaded('./base/inn/my_info.png', device, data['inn']['16']['dms'], data['inn']['16']['shell'], sleep_duration=0.5)
device.shell(data['inn']['17']['shell'])
logging.info(device.serial+': successfully did some stuffs in inn mission')
self.inn_ = True
return 'success'
def lov(self, device, position, data):
print(device.serial+': suiciding in lov...')
logging.info(device.serial+': suiciding in lov')
# click mission shortcut
shortcut = self.make_sure_loaded('./base/lov/arena.png', device, data['lov']['1']['dms'], data['lov']['1']['shell']+position, loop=20, cutoff=20, sleep_duration=10)
if shortcut == 'loop':
self.lov_ = True
return 'not'
logging.info(device.serial+': loaded from mission shortcut')
# click select arena
self.make_sure_loaded('./base/lov/arenas.png', device, data['lov']['2']['dms'], data['lov']['2']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked select arena')
# click enter lov
self.make_sure_loaded('./base/lov/lov.png', device, data['lov']['3']['dms'], data['lov']['3']['shell'], sleep_duration=1)
logging.info(device.serial+': clicked enter lov')
# click ready to dual
self.make_sure_loaded('./base/lov/party.png', device, data['lov']['4']['dms'], data['lov']['4']['shell'], sleep_duration=0.5, cutoff=20)
logging.info(device.serial+': clicked ready to dual')
# check avalible team
self.make_sure_loaded('./base/lov/party_.png', device, data['lov']['5']['dms'], data['lov']['5']['shell'], sleep_duration=1, oposite=True, cutoff=20)
logging.info(device.serial+': checked avalible team')
# click register match
self.make_sure_loaded('./base/lov/end.png', device, data['lov']['6']['dms'], data['lov']['6']['shell'], sleep_duration=0.5, cutoff=25, second_shell=data['lov']['6']['second_shell'])
logging.info(device.serial+': clicked and exited battle')
# click exit match
self.make_sure_loaded('./base/lov/lov.png', device, data['lov']['7']['dms'], data['lov']['7']['shell'], sleep_duration=0.5)
logging.info(device.serial+': exited match')
# click exit
self.make_sure_loaded('./base/lov/my_info.png', device, data['lov']['8']['dms'], data['lov']['8']['shell'], sleep_duration=0.5)
device.shell(data['lov']['9']['shell'])
logging.info(device.serial+': successfully did lov mission')
self.lov_ = True
return 'success'
def shop(self, device, position, data):
print(device.serial+': buying stuffs in shop...')
logging.info(device.serial+': buying stuffs in shop')
# click mission shortcut
shortcut = self.make_sure_loaded('./base/shop/use_shop.png', device, data['shop']['1']['dms'], data['shop']['1']['shell']+position, loop=20, cutoff=20, sleep_duration=10)
if shortcut == 'loop':
self.shop_ = True
return 'not'
logging.info(device.serial+': loaded from mission shortcut')
# open shop
self.make_sure_loaded('./base/shop/shop.png', device, data['shop']['2']['dms'], data['shop']['2']['shell'])
logging.info(device.serial+': opened shop')
# click a random item in shop
lst = data['shop']['3-0']['shell']
r = choice(lst)
device.shell(data['shop']['3-1']['shell']+str(r[0])+' '+str(r[1]))
logging.info(device.serial+': clicked a random stuff')
self.make_sure_loaded('./base/shop/buy.png', device, data['shop']['3-2']['dms'], data['shop']['3-2']['shell']+str(r[0])+' '+str(r[1]), cutoff=1)
logging.info(device.serial+': clicked a random stuff second time')
# click buy item
self.make_sure_loaded('./base/shop/bought.png', device, data['shop']['4']['dms'], data['shop']['4']['shell'], shell_first=True, cutoff=3)
logging.info(device.serial+': bought stuff')
# click exit
self.make_sure_loaded('./base/shop/my_info.png', device, data['shop']['5']['dms'], data['shop']['5']['shell'], sleep_duration=0.5)
device.shell(data['shop']['6']['shell'])
logging.info(device.serial+': successfully bought stuffs in shop in inn mission')
self.shop_ = True
return 'success'
def stockage(self, device, position, data):
print(device.serial+': farming stuffs in stockage...')
logging.info(device.serial+': farming stuffs in stockage')
# click mission shortcut
shortcut = self.make_sure_loaded('./base/stockage/enter_dungeons.png', device, data['stockage']['1']['dms'], data['stockage']['1']['shell']+position, loop=20, cutoff=20, sleep_duration=10)
if shortcut == 'loop':
self.stockage_ = True
return 'not'
logging.info(device.serial+': loaded from mission shortcut')
# open stockage
self.make_sure_loaded('./base/stockage/stockage.png', device, data['stockage']['2']['dms'], data['stockage']['2']['shell'], cutoff=9, sleep_duration=0.5)
logging.info(device.serial+': opened stockage')
def check_team(device, data):
# slot 1
self.make_sure_loaded('./base/tower/party_4.png', device, data['tower']['7']['dms'], data['tower']['7']['shell'], sleep_duration=1, cutoff=20, oposite=True)
# slot 2
self.make_sure_loaded('./base/tower/party_3.png', device, data['tower']['8']['dms'], data['tower']['8']['shell'], sleep_duration=1, cutoff=20, oposite=True)
# slot 3
self.make_sure_loaded('./base/tower/party_2.png', device, data['tower']['9']['dms'], data['tower']['9']['shell'], sleep_duration=1, cutoff=20, oposite=True)
# slot 4
self.make_sure_loaded('./base/tower/party_1.png', device, data['tower']['10']['dms'], data['tower']['10']['shell'], sleep_duration=1, cutoff=20, oposite=True)
# fragment dungeons
self.make_sure_loaded('./base/stockage/fragment_d.png', device, data['stockage']['3']['dms'], data['stockage']['3']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked fragment dungeons')
# party
self.make_sure_loaded('./base/stockage/party.png', device, data['stockage']['4']['dms'], data['stockage']['4']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked to party setup')
# check avalible team
check_team(device, data)
# start battle
self.make_sure_loaded('./base/stockage/select_battle.png', device, data['stockage']['5']['dms'], data['stockage']['5']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked start battle')
# auto repeat
self.make_sure_loaded('./base/stockage/notice.png', device, data['stockage']['6']['dms'], data['stockage']['6']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked auto repeat')
# select reward
self.make_sure_loaded('./base/stockage/fragment_select_reward.png', device, data['stockage']['7']['dms'], data['stockage']['7']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked to select reward')
# click random reward
lst = data['stockage']['8-0']['shell']
r = choice(lst)
# self.make_sure_loaded('./base/stockage/ok.png', device, data['stockage']['8-1']['dms'], data['stockage']['8-1']['shell']+str(r[0])+' '+str(r[1]), shell_first=True, sleep_duration=0.5)
# logging.info(device.serial+': selected random reward')
# click ok
self.make_sure_loaded('./base/stockage/loading_r.png', device, data['stockage']['9']['dms'], data['stockage']['9']['shell'], cutoff=10, loop=15, sleep_duration=0.5, second_shell=data['stockage']['8-1']['shell']+str(r[0])+' '+str(r[1]))
logging.info(device.serial+': selected random reward and clicked ok to enter battle')
slp(5)
# wait until finish
self.make_sure_loaded('./base/stockage/end.png', device, data['stockage']['10']['dms'], data['stockage']['10']['shell'], sleep_duration=15)
logging.info(device.serial+': battle completed')
# click exit
self.make_sure_loaded('./base/stockage/loading.png', device, data['stockage']['11']['dms'], data['stockage']['11']['shell'])
logging.info(device.serial+': exited from fragment dungeons')
# skill book dungeon
self.make_sure_loaded('./base/stockage/skill_book_d.png', device, data['stockage']['12']['dms'], data['stockage']['12']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked skill book dungeons')
# party
self.make_sure_loaded('./base/stockage/party.png', device, data['stockage']['13']['dms'], data['stockage']['13']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked to party setup')
# check avalible team
check_team(device, data)
# start battle
self.make_sure_loaded('./base/stockage/select_battle.png', device, data['stockage']['14']['dms'], data['stockage']['14']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked start battle')
# auto repeat
self.make_sure_loaded('./base/stockage/notice.png', device, data['stockage']['15']['dms'], data['stockage']['15']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked auto repeat')
# select reward
self.make_sure_loaded('./base/stockage/exp_select_reward.png', device, data['stockage']['16']['dms'], data['stockage']['16']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked to select reward')
# click random type of book
lst = data['stockage']['17-0']['shell']
r = choice(lst)
device.shell(data['stockage']['17-1']['shell']+str(r[0])+' '+str(r[1]))
logging.info(device.serial+': selected random book type')
# click random book
lst = data['stockage']['18-0']['shell']
r = choice(lst)
# self.make_sure_loaded('./base/stockage/ok_.png', device, data['stockage']['18-1']['dms'], data['stockage']['18-1']['shell']+str(r[0])+' '+str(r[1]), shell_first=True, sleep_duration=0.5)
# logging.info(device.serial+': selected random book reward')
# click ok
self.make_sure_loaded('./base/stockage/loading_r.png', device, data['stockage']['19']['dms'], data['stockage']['19']['shell'], cutoff=10, loop=15, sleep_duration=0.5, second_shell=data['stockage']['18-1']['shell']+str(r[0])+' '+str(r[1]))
logging.info(device.serial+': selected random book reward and clicked ok to enter battle')
slp(5)
# wait until finish
self.make_sure_loaded('./base/stockage/end.png', device, data['stockage']['20']['dms'], data['stockage']['20']['shell'], sleep_duration=15)
logging.info(device.serial+': battle completed')
# click exit
self.make_sure_loaded('./base/stockage/loading.png', device, data['stockage']['21']['dms'], data['stockage']['21']['shell'])
logging.info(device.serial+': exited from skill book dungeons')
# exp dungeon
self.make_sure_loaded('./base/stockage/exp_d.png', device, data['stockage']['22']['dms'], data['stockage']['22']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked exp dungeons')
# party
self.make_sure_loaded('./base/stockage/party.png', device, data['stockage']['23']['dms'], data['stockage']['23']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked to party setup')
# check avalible team
check_team(device, data)
# start battle
self.make_sure_loaded('./base/stockage/select_battle.png', device, data['stockage']['24']['dms'], data['stockage']['24']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked start battle')
# auto repeat
self.make_sure_loaded('./base/stockage/notice.png', device, data['stockage']['25']['dms'], data['stockage']['25']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked auto repeat')
# click ok
self.make_sure_loaded('./base/stockage/loading_r.png', device, data['stockage']['26']['dms'], data['stockage']['26']['shell'], loop=10, sleep_duration=0.5)
logging.info(device.serial+': clicked ok to enter battle')
slp(5)
# wait until finish
self.make_sure_loaded('./base/stockage/end.png', device, data['stockage']['27']['dms'], data['stockage']['27']['shell'], sleep_duration=15)
logging.info(device.serial+': battle completed')
# click exit
self.make_sure_loaded('./base/stockage/loading.png', device, data['stockage']['28']['dms'], data['stockage']['28']['shell'])
logging.info(device.serial+': exited from exp dungeons')
# gold dungeon
self.make_sure_loaded('./base/stockage/gold_d.png', device, data['stockage']['29']['dms'], data['stockage']['29']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked exp dungeons')
# party
self.make_sure_loaded('./base/stockage/party.png', device, data['stockage']['30']['dms'], data['stockage']['30']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked to party setup')
# check avalible team
check_team(device, data)
# start battle
self.make_sure_loaded('./base/stockage/select_battle.png', device, data['stockage']['31']['dms'], data['stockage']['31']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked start battle')
# auto repeat
self.make_sure_loaded('./base/stockage/notice.png', device, data['stockage']['32']['dms'], data['stockage']['32']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked auto repeat')
# click ok
self.make_sure_loaded('./base/stockage/loading_r.png', device, data['stockage']['33']['dms'], data['stockage']['33']['shell'], loop=10, sleep_duration=0.5)
logging.info(device.serial+': clicked ok to enter battle')
slp(5)
# wait until finish
self.make_sure_loaded('./base/stockage/end.png', device, data['stockage']['34']['dms'], data['stockage']['34']['shell'], sleep_duration=15)
logging.info(device.serial+': battle completed')
# click exit
self.make_sure_loaded('./base/stockage/loading.png', device, data['stockage']['35']['dms'], data['stockage']['35']['shell'])
logging.info(device.serial+': exited from gold dungeons')
# click exit
self.make_sure_loaded('./base/stockage/my_info.png', device, data['stockage']['36']['dms'], data['stockage']['36']['shell'], sleep_duration=0.5)
device.shell(data['stockage']['37']['shell'])
logging.info(device.serial+': successfully did stockage mission')
self.stockage_ = True
return 'success'
def tower(self, device, position, data, lang):
print(device.serial+': battling in tower...')
logging.info(device.serial+': battling in tower')
# click mission shortcut
shortcut = self.make_sure_loaded('./base/tower/tower.png', device, data['tower']['1']['dms'], data['tower']['1']['shell']+position, loop=20, cutoff=20, sleep_duration=10)
if shortcut == 'loop':
self.tower_ = True
return 'not'
logging.info(device.serial+': loaded from mission shortcut')
# click tower of challenge
self.make_sure_loaded('./base/tower/toc.png', device, data['tower']['2']['dms'], data['tower']['2']['shell'], sleep_duration=1)
logging.info(device.serial+': clicked toc')
# change to floor 1
with open('./languages.json', encoding='utf-8') as j:
floor = json.load(j)[lang]['tower']
while True:
im, device = self.update_cache(device)
pil_image = crop(im, data['tower']['3']['dms'])
img = filter(pil_image)
text = image_to_string(img, lang).replace('♀', '')
if lang == 'jpn':
text = text.replace(' ', '')
text = text.splitlines()[0].lower().replace(' ','')
if SequenceMatcher(None, text, floor).ratio() > 0.9:
device.shell(data['tower']['5']['shell'])
break
else:
device.shell(data['tower']['4']['shell'])
slp(1)
logging.info(device.serial+': changed floor level to 1')
# click ready for battle
self.make_sure_loaded('./base/tower/party.png', device, data['tower']['6']['dms'], data['tower']['6']['shell'])
logging.info(device.serial+': clicked ready for battle')
# check avalible team
# slot 1
self.make_sure_loaded('./base/tower/party_4.png', device, data['tower']['7']['dms'], data['tower']['7']['shell'], sleep_duration=1, cutoff=20, oposite=True)
# slot 2
self.make_sure_loaded('./base/tower/party_3.png', device, data['tower']['8']['dms'], data['tower']['8']['shell'], sleep_duration=1, cutoff=20, oposite=True)
# slot 3
self.make_sure_loaded('./base/tower/party_2.png', device, data['tower']['9']['dms'], data['tower']['9']['shell'], sleep_duration=1, cutoff=20, oposite=True)
# slot 4
self.make_sure_loaded('./base/tower/party_1.png', device, data['tower']['10']['dms'], data['tower']['10']['shell'], sleep_duration=1, cutoff=20, oposite=True)
logging.info(device.serial+': checked all avalible slots')
# click start battle to open select battle board
self.make_sure_loaded('./base/tower/select_battle.png', device, data['tower']['11']['dms'], data['tower']['11']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked start battle and opened select battle board')
# click start battle
self.make_sure_loaded('./base/tower/end.png', device, data['tower']['12']['dms'], data['tower']['12']['shell'], sleep_duration=0.5, cutoff=10)
logging.info(device.serial+': clicked start battle')
# click exit battle
self.make_sure_loaded('./base/tower/toc.png', device, data['tower']['2']['dms'], data['tower']['13']['shell'])
logging.info(device.serial+': exited battle')
# click exit
self.make_sure_loaded('./base/tower/my_info.png', device, data['tower']['14']['dms'], data['tower']['14']['shell'], sleep_duration=0.5)
device.shell(data['tower']['15']['shell'])
logging.info(device.serial+': successfully did toc mission')
self.tower_ = True
return 'success'
def wb(self, device, position, data):
print(device.serial+': battling world boss...')
logging.info(device.serial+': battling world boss')
# click mission shortcut
shortcut = self.make_sure_loaded('./base/wb/wb.png', device, data['wb']['1']['dms'], data['wb']['1']['shell']+position, loop=20, cutoff=20, sleep_duration=10)
if shortcut == 'loop':
self.wb_ = True
return 'not'
logging.info(device.serial+': loaded from mission shortcut')
# click get ready for battle
close = self.make_sure_loaded('./base/wb/party.png', device, data['wb']['2']['dms'], data['wb']['2']['shell'], sleep_duration=2, cutoff=20, loop=20)
# wb close
if close == 'loop':
self.wb_ = True
# click exit
self.make_sure_loaded('./base/wb/my_info.png', device, data['wb']['8']['dms'], data['wb']['8']['shell'], sleep_duration=0.5)
device.shell(data['wb']['9']['shell'])
return 'success'
logging.info(device.serial+': loaded from get ready for battle')
# check avalible team
self.make_sure_loaded('./base/wb/a_party.png', device, data['wb']['3']['dms'], data['wb']['3']['shell'], cutoff=20, oposite=True, sleep_duration=0.5)
logging.info(device.serial+': checked avalible party')
# click set sub team
self.make_sure_loaded('./base/wb/sub_party.png', device, data['wb']['4']['dms'], data['wb']['4']['shell'], sleep_duration=0.5, cutoff=2)
logging.info(device.serial+': clicked set up sub team')
# click start battle
self.make_sure_loaded('./base/wb/loading.png', device, data['wb']['5']['dms'], data['wb']['5']['shell'], cutoff=10, \
sleep_duration=0.5, second_shell=data['wb']['5']['second_shell'], loop=10)
logging.info(device.serial+': clicked start battle')
# wait until finish
self.make_sure_loaded('./base/wb/end.png', device, data['wb']['6']['dms'], sleep_duration=15, cutoff=20)
logging.info(device.serial+': battle completed')
# click exit battle
self.make_sure_loaded('./base/wb/wb.png', device, data['wb']['7']['dms'], data['wb']['7']['shell'])
logging.info(device.serial+': exited battle')
# click exit
self.make_sure_loaded('./base/wb/my_info.png', device, data['wb']['8']['dms'], data['wb']['8']['shell'], sleep_duration=0.5)
device.shell(data['wb']['9']['shell'])
logging.info(device.serial+': successfully did world boss mission')
self.wb_ = True
return 'success'
def lil(self, device, position, data, res):
print(device.serial+': feeding lil raider...')
logging.info(device.serial+': feeding lil raider')
# click mission shortcut
shortcut = self.make_sure_loaded('./base/lil/lil.png', device, data['lil']['1']['dms'], data['lil']['1']['shell']+position, cutoff=20, loop=20, sleep_duration=10)
if shortcut == 'loop':
self.lil_ = True
return 'not'
logging.info(device.serial+': loaded from mission shortcut')
# click treats
self.make_sure_loaded('./base/lil/treats.png', device, data['lil']['2']['dms'], data['lil']['2']['shell'], cutoff=10, sleep_duration=0.5)
logging.info(device.serial+': clicked treats')
# click feed first lil raider
self.make_sure_loaded('./base/lil/feeded.png', device, data['lil']['3']['dms'], data['lil']['3']['shell'], second_shell=data['lil']['4']['shell'], shell_first=True, cutoff=20, sleep_duration=0.5)
logging.info(device.serial+': clicked feed')
# click exit feeding
self.make_sure_loaded('./base/lil/lil.png', device, data['lil']['5']['dms'], data['lil']['5']['shell'], cutoff=10, sleep_duration=0.5)
logging.info(device.serial+': exit treats')
# click exit
self.make_sure_loaded('./base/lil/my_info.png', device, data['lil']['6']['dms'], data['lil']['6']['shell'], sleep_duration=0.5)
device.shell(data['lil']['7']['shell'])
logging.info(device.serial+': successfully did lil raider mission')
self.lil_ = True
return 'success'
def weekly(self, device, data):
logging.info(device.serial+': claiming weekly rewards')
def claim():
# claim rewards
count = 0
while True:
if count == 9:
break
device.shell(data['claim'][0])
device.shell(data['claim'][1])
count+=1
# change to weekly mission board
self.make_sure_loaded('./base/other/daily.png', device, data['daily']['dms'], data['daily']['third_shell'], shell_first=True, sleep_duration=0.5, cutoff=8, loop=20)
claim()
def mails(self, device, data):
print(device.serial+': mails is enabled, claiming all mails...')
logging.info(device.serial+': claiming mails')
# exit from mission board
self.make_sure_loaded('./base/mails/my_info.png', device, data['mails']['1']['dms'], data['mails']['1']['shell'], sleep_duration=0.5)
device.shell(data['mails']['2']['shell'])
logging.info(device.serial+': exit to main screen (1)')
# click mailbox
self.make_sure_loaded('./base/mails/mailbox.png', device, data['mails']['3']['dms'], data['mails']['3']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked mailbox')
# click claim all
self.make_sure_loaded('./base/mails/claim_all.png', device, data['mails']['4']['dms'], data['mails']['4']['shell'], sleep_duration=0.5)
logging.info(device.serial+': clicked claim all')
# exit to main screen
self.make_sure_loaded('./base/mails/my_info.png', device, data['mails']['1']['dms'], data['mails']['1']['shell'], sleep_duration=0.5)
device.shell(data['mails']['2']['shell'])
logging.info(device.serial+': exit to main screen (2)')
def loh(self, device, data, lang):
print(device.serial+': loh is enabled, suiciding in loh...')
logging.info(device.serial+': suiciding in loh')
# exit from mission board
self.make_sure_loaded('./base/loh/my_info.png', device, data['loh']['1']['dms'], data['loh']['1']['shell'], sleep_duration=0.5)
device.shell(data['loh']['2']['shell'])
logging.info(device.serial+': exit to main screen')
# click portal
self.make_sure_loaded('./base/loh/portal.png', device, data['loh']['3']['dms'], data['loh']['3']['shell'], sleep_duration=1, ck_special_shop=False)
logging.info(device.serial+': clicked portal')
# click arena in portal
self.make_sure_loaded('./base/loh/arena.png', device, data['loh']['4']['dms'], data['loh']['4']['shell'], cutoff=15, sleep_duration=0.5, ck_special_shop=False)
logging.info(device.serial+': clicked arenas')
# click loh in arena
self.make_sure_loaded('./base/loh/notice.png', device, data['loh']['5']['dms'], data['loh']['5']['shell'], cutoff=20, sleep_duration=0.5, ck_special_shop=False)
logging.info(device.serial+': clicked loh')
# click ok in notice
self.make_sure_loaded('./base/loh/loh.png', device, data['loh']['6']['dms'], data['loh']['6']['shell'],
second_img='./base/loh/previous_result.png', third_img='./base/loh/rewards.png', sleep_duration=10, loop=10, ck_special_shop=False)
logging.info(device.serial+': clicked ok in notice')
# check
def check_keys(device):
while True:
try:
device.shell(data['loh']['7']['second_shell'])
slp(3)
device.shell(data['loh']['7']['shell'])
slp(5)
im, device = self.update_cache(device)
im = crop(im, data['loh']['7']['dms'])
text = image_to_string(im, lang).lower().replace('♀', '')
detect(text)
if lang == 'jpn':
text = text.replace(' ', '')
with open('./languages.json', encoding='utf-8') as j:
re = json.load(j)
if SequenceMatcher(None, re[lang]['loh'], text).ratio() > 0.9:
return 'not enough currency'
return 'continue'
except:
continue
re = check_keys(device)
if re == 'not enough currency':
return 'not enough currency'
# push script and continuosly execute
device.shell('rm /sdcard/loh_script.sh')
slp(5)
device.push(data['loh']['scripts']['sh'], '/sdcard/loh_script.sh')
start_time = tiime()
seconds = 4000
count = 0
slp(5)
device.shell(data['loh']['scripts']['confirm'])
while True:
current_time = tiime()
elapsed_time = current_time - start_time
if elapsed_time > seconds:
break
if count == 50:
re = check_keys(device)
if re == 'not enough currency':
return 'not enough currency'
count = 0
device.shell(data['loh']['scripts']['get_ready'])
device.shell(data['loh']['scripts']['confirm'])
device.shell('sh /sdcard/loh_script.sh')
count+=1
logging.info(device.serial+': successfully suiciding in loh')
return 'success'
def extra_dailies(self, device):
pass
def load_devices():
working_dir = getcwd()
adb_dir = '"'+working_dir+'\\adb" '
run_(adb_dir+'kill-server')
adb = Client(host="127.0.0.1", port=5037)
try:
devices = adb.devices()
except:
run_(adb_dir+'devices')
slp(5)
run_(adb_dir+'devices')
devices = adb.devices()
return devices, adb_dir, adb
def run():
with open('./config.json') as j:
re = json.load(j)
path = re['ldconsole'].replace('|', '"')
quit_all = False
if re['quit_all'] == True:
quit_all = True
try:
run_(path+' quitall')
except FileNotFoundError:
text = "path to LDPlayer is wrong, please config and try again"
logging.info(text)
print(text)
input('press any key to exit...')
return
latest = get("https://api.github.com/repos/faber6/kings-raid-daily/releases/latest")
with open('./sets.json') as t:
this = json.load(t)
if latest.json()["tag_name"] != this['version']:
text = (f'\nThere is a new version ({latest.json()["tag_name"]}) of script on https://github.com/faber6/kings-raid-daily/releases'+
'\nIf this version not working as expected, please update to a newer version\n')
logging.info(text)
print(text)
def msg_box(this):
answ = ctypes.windll.user32.MessageBoxW(0,
text[1:].replace('\n','\n\n')+'do you want to remind this later?',
f"kings-raid-daily new version ({latest.json()['tag_name']})", 4)
if answ == 6: # yes
this['remind'] = True
elif answ == 7: # no
this['remind'] = False
with open('./sets.json', 'w') as w:
json.dump(this, w, indent=4)
return
msg_box_thread = Thread(target=msg_box, name='msg_box', args=(this,))
if this['remind'] == True:
msg_box_thread.start()
else:
if latest.json()["tag_name"] != this['latest']:
this['latest'] = latest.json()["tag_name"]
with open('./sets.json', 'w') as w:
json.dump(this, w, indent=4)
msg_box_thread.start()
devices, adb_dir, adb = load_devices()
count = 0
while True:
if count == 49:
text = 'no device was found after 50 retries, script ended'
logging.info(text)
print(text)
break
if devices == []:
if re['devices'] != []:
if count == 4 or quit_all == True:
if quit_all == True:
text = 'quit all emulators, launching from config...'
logging.info(text)
print(text)
else:
text = 'no device was found after 5 retries, launching from config and retrying...'
logging.info(text)
print(text)
break_ = False
devices_dexist = 0
if re['max_devices'] == 1:
for device_ in re['devices']:
try:
re_ = run_(path+' launch --index '+str(device_), capture_output=True).stdout
if str(re_)+'/' == """b"player don't exist!"/""":
devices_dexist += 1
text = 'device with index '+str(device_)+" doesn't exist"
logging.info(text)
print(text)
else:
text = 'launched device with index '+str(device_)
logging.info(text)
print(text)
print('waiting 30 secs for fully boot up')
slp(30)
while True:
devices, adb_dir, adb = load_devices()
if devices != []:
if len(devices) == 1:
for device in devices:
if str(device.serial).startswith('127'):
continue
thread = Thread(target=Missions().run_execute, args=(device, device_,))
text = 'executing on device '+device.serial
logging.info(text)
print(text)
thread.start()
start_time = tiime()
seconds = 10800
while True:
current_time = tiime()
elapsed_time = current_time - start_time
if elapsed_time > seconds:
break
if thread.is_alive() == False:
break
else:
text = "'max_devices' set to 1 but 'adb devices' returns "+str(len(devices))+' devices, retrying...'
logging.info(text)
print(text)
continue
break
slp(5)
break_ = True
except FileNotFoundError:
break_ = True
text = "path to LDPlayer is wrong, please config and try again"
logging.info(text)
print(text)
input('press any key to exit...')
break
if devices_dexist == len(re['devices']):
text = "all configured devices don't exit"
logging.info(text)
print(text)
input('press any key to exit...')
break_ = True
break
else:
running = 0
_devices_ = {}
for device_ in re['devices']:
_devices_[device_] = False
if len(_devices_) % 2 == 0:
last_run = 0
else:
run_times = ceil(len(_devices_) / 2)
last_run = len(_devices_) - run_times
threads = []
launched = []
launched_ = []
done = []
devices_ = []
_break_ = False
i = 0
while True:
if len(done) == len(re['devices']):
break_ = True
_break_ = True
break
if running != 0:
if running == re['max_devices'] or running == last_run:
slp(10)
for thread_ in threads:
if int(thread_.name) not in done:
start_time = tiime()
seconds = 10800
while True:
current_time = tiime()
elapsed_time = current_time - start_time
if elapsed_time > seconds:
break
if thread_.is_alive() == False:
break
done.append(int(thread_.name))
# for thread_ in threads:
# if int(thread_.name) not in done:
# thread_.join(9000)
# done.append(int(thread_.name))
running = running - len(done)
else:
for device_ in _devices_:
if running == re['max_devices']:
break
elif _devices_[device_] == False:
try:
path = re['ldconsole'].replace('|', '"')
re_ = run_(path+' launch --index '+str(device_), capture_output=True).stdout
if str(re_)+'/' == """b"player don't exist!"/""":
devices_dexist += 1
text = 'device with index '+str(device_)+" doesn't exist"
logging.info(text)
print(text)
else:
text = 'launched device with index '+str(device_)
logging.info(text)
print(text)
launched.append(int(device_))
running += 1
_devices_[device_] = True
except FileNotFoundError:
break_ = True
_break_ = True
text = "path to LDPlayer is wrong, please config and try again"
logging.info(text)
print(text)
input('press any key to exit...')
break
if devices_dexist == len(re['devices']):
text = "all configured device(s) don't exit"
logging.info(text)
print(text)
input('press any key to exit...')
break_ = True
_break_ = True
break
print('waiting 30 secs for fully boot up')
slp(30)
while True:
devices, adb_dir, adb = load_devices()
if devices != []:
if len(devices) == running:
pass
else:
text = "'max_devices' set to "+str(re['max_devices'])+" but 'adb devices' returns "+str(len(devices))+' devices, retrying...'
logging.info(text)
print(text)
continue
for device in devices:
if str(device.serial).startswith('127'):
continue
if device not in devices_:
devices_.append(device)
while True:
try:
device = devices_[i]
device_ = launched[i]
except:
break
if str(device.serial).startswith('127'):
continue
if int(device_) not in launched_:
thread = Thread(target=Missions().run_execute, name=str(device_), args=(device,device_,))
threads.append(thread)
launched_.append(int(device_))
text = 'executing on device '+device.serial
logging.info(text)
print(text)
thread.start()
i+=1
break
slp(5)
if _break_ == True:
break
if break_ == True:
break
print('no device was found, retrying...')
run_(adb_dir+'devices')
devices = adb.devices()
elif str(devices[0].serial).startswith('127'):
print('no device was found, retrying...')
devices, adb_dir, adb = load_devices()
else:
slp(10)
run_(adb_dir+'devices')
devices = adb.devices()
print('device(s) detected')
for device in devices:
thread = Thread(target=Missions().run_execute, args=(device,))
text = 'executing on device '+device.serial
logging.info(text)
print(text)
thread.start()
break
slp(5)
count+=1 | 48.980466 | 246 | 0.519503 | 60,036 | 0.772207 | 0 | 0 | 0 | 0 | 0 | 0 | 21,848 | 0.281018 |
df193d81eacd388f5ab18d96aacd2c5b1b7b5976 | 548 | py | Python | sarikasama/0012/0012.py | saurabh896/python-1 | f8d3aedf4c0fe6e24dfa3269ea7e642c9f7dd9b7 | [
"MIT"
] | 3,976 | 2015-01-01T15:49:39.000Z | 2022-03-31T03:47:56.000Z | sarikasama/0012/0012.py | oyesam7/python-1 | 220734af09fa09a6f615d4f1b4612a0ab75d91d1 | [
"MIT"
] | 97 | 2015-01-11T02:59:46.000Z | 2022-03-16T14:01:56.000Z | sarikasama/0012/0012.py | oyesam7/python-1 | 220734af09fa09a6f615d4f1b4612a0ab75d91d1 | [
"MIT"
] | 3,533 | 2015-01-01T06:19:30.000Z | 2022-03-28T13:14:54.000Z | #!/usr/bin/env python3
#filter sensitive words in user's input
def replace_sensitive_words(input_word):
s_words = []
with open('filtered_words','r') as f:
line = f.readline()
while line != '':
s_words.append(line.strip())
line = f.readline()
for word in s_words:
if word in input_word:
input_word = input_word.replace(word, "**")
print(input_word)
if __name__ == '__main__':
while True:
input_word = input('--> ')
replace_sensitive_words(input_word)
| 27.4 | 55 | 0.600365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.186131 |
df1b5b5a58e2e52b261ca94c9d20882cbf00caf2 | 1,172 | py | Python | moceansdk/modules/command/mc_object/tg_request_contact.py | d3no/mocean-sdk-python | cbc215a0eb8aa26c04afb940eab6482f23150c75 | [
"MIT"
] | 2 | 2019-10-31T02:37:43.000Z | 2021-07-25T02:45:27.000Z | moceansdk/modules/command/mc_object/tg_request_contact.py | d3no/mocean-sdk-python | cbc215a0eb8aa26c04afb940eab6482f23150c75 | [
"MIT"
] | 18 | 2019-05-30T01:09:34.000Z | 2022-01-04T07:31:47.000Z | moceansdk/modules/command/mc_object/tg_request_contact.py | d3no/mocean-sdk-python | cbc215a0eb8aa26c04afb940eab6482f23150c75 | [
"MIT"
] | 4 | 2019-04-19T08:34:47.000Z | 2021-07-21T02:02:07.000Z | from builtins import super
from moceansdk.modules.command.mc_object import AbstractMc
class TgRequestContact(AbstractMc):
def __init__(self, param=None):
super().__init__(param)
self.set_button_text('Share button')
def action(self):
return 'send-telegram'
def required_key(self):
return ('to', 'from')
def set_to(self, _to, _contact_type='chat_id'):
self._params['to'] = {}
self._params['to']['type'] = _contact_type
self._params['to']['id'] = _to
return self
def set_from(self, _from, _contact_type='bot_username'):
self._params['from'] = {}
self._params['from']['type'] = _contact_type
self._params['from']['id'] = _from
return self
def set_content(self, _text):
self._params['content'] = {}
self._params['content']['type'] = 'text'
self._params['content']['text'] = _text
return self
def set_button_text(self, _text):
self._params['tg_keyboard'] = {}
self._params['tg_keyboard']['button_request'] = 'contact'
self._params['tg_keyboard']['button_text'] = _text
return self
| 29.3 | 65 | 0.610068 | 1,083 | 0.924061 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.199659 |
df1b80f7d7fc1e3bfcdf6202b6edd9c8fbec5f5f | 5,418 | py | Python | tests/engine/backend/test_multiprocess_backend.py | sanchitcop19/web-api-async | a3fff70c3b62678f3c8ef8cc07f7c9b4fe155c69 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/engine/backend/test_multiprocess_backend.py | sanchitcop19/web-api-async | a3fff70c3b62678f3c8ef8cc07f7c9b4fe155c69 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/engine/backend/test_multiprocess_backend.py | sanchitcop19/web-api-async | a3fff70c3b62678f3c8ef8cc07f7c9b4fe155c69 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """Test the execute method of the synchronous backend."""
import os
import shutil
import time
import unittest
from vizier.datastore.fs.factory import FileSystemDatastoreFactory
from vizier.engine.backend.multiprocess import MultiProcessBackend
from vizier.engine.controller import WorkflowController
from vizier.engine.packages.pycell.base import PACKAGE_PYTHON, PYTHON_CODE
from vizier.engine.packages.pycell.processor import PyCellTaskProcessor
from vizier.engine.packages.vizual.api.fs import DefaultVizualApi
from vizier.engine.packages.vizual.base import PACKAGE_VIZUAL, VIZUAL_LOAD, VIZUAL_UPD_CELL
from vizier.engine.packages.vizual.processor import VizualTaskProcessor
from vizier.engine.project.base import ProjectHandle
from vizier.engine.project.cache.common import CommonProjectCache
from vizier.engine.task.base import TaskHandle
from vizier.engine.task.processor import TaskProcessor
from vizier.filestore.fs.factory import FileSystemFilestoreFactory
from vizier.viztrail.objectstore.repository import OSViztrailRepository
from vizier.viztrail.command import ModuleCommand
import vizier.engine.packages.vizual.command as vizual
import vizier.engine.packages.pycell.command as pycell
import vizier.engine.packages.base as pckg
SERVER_DIR = './.tmp'
DATASTORES_DIR = SERVER_DIR + '/ds'
FILESTORES_DIR = SERVER_DIR + '/fs'
VIZTRAILS_DIR = SERVER_DIR + '/vt'
CSV_FILE = './.files/dataset.csv'
PROJECT_ID = '111'
class FakeTaskProcessor(TaskProcessor):
def __init__(self):
pass
class FakeWorkflowController(WorkflowController):
def __init__(self):
self.state = None
self.outputs = None
self.task_id = None
def set_error(self, task_id, finished_at=None, outputs=None):
self.task_id = task_id
self.outputs = outputs
self.state = 'ERROR'
def set_success(self, task_id, finished_at=None, datasets=None, outputs=None, provenance=None):
self.task_id = task_id
self.outputs = outputs
self.state = 'SUCCESS'
class TestMultiprocessBackend(unittest.TestCase):
def setUp(self):
"""Create an instance of the default vizier processor for an empty server
directory.
"""
# Drop directory if it exists
if os.path.isdir(SERVER_DIR):
shutil.rmtree(SERVER_DIR)
os.makedirs(SERVER_DIR)
projects = CommonProjectCache(
datastores=FileSystemDatastoreFactory(DATASTORES_DIR),
filestores=FileSystemFilestoreFactory(FILESTORES_DIR),
viztrails=OSViztrailRepository(base_path=VIZTRAILS_DIR)
)
self.PROJECT_ID = projects.create_project().identifier
self.backend = MultiProcessBackend(
processors={
PACKAGE_PYTHON: PyCellTaskProcessor(),
PACKAGE_VIZUAL: VizualTaskProcessor(api=DefaultVizualApi()),
'error': FakeTaskProcessor()
},
projects=projects
)
def tearDown(self):
"""Clean-up by dropping the server directory.
"""
if os.path.isdir(SERVER_DIR):
shutil.rmtree(SERVER_DIR)
def test_cancel(self):
"""Test executing a sequence of supported commands."""
context = dict()
cmd = pycell.python_cell(
source='import time\ntime.sleep(5)',
validate=True
)
controller = FakeWorkflowController()
self.backend.execute_async(
task=TaskHandle(
task_id='000',
project_id=self.PROJECT_ID,
controller=controller
),
command=cmd,
context=context
)
time.sleep(1)
self.backend.cancel_task('000')
time.sleep(5)
self.assertIsNone(controller.task_id)
self.assertIsNone(controller.state)
def test_error(self):
"""Test executing a command with processor that raises an exception
instead of returning an execution result.
"""
context = dict()
cmd = ModuleCommand(package_id='error', command_id='error')
controller = FakeWorkflowController()
self.backend.execute_async(
task=TaskHandle(
task_id='000',
project_id=self.PROJECT_ID,
controller=controller
),
command=cmd,
context=context
)
time.sleep(2)
self.assertEqual(controller.task_id, '000')
self.assertEqual(controller.state, 'ERROR')
self.assertEqual(len(controller.outputs.stdout), 0)
self.assertNotEqual(len(controller.outputs.stderr), 0)
def test_execute(self):
"""Test executing a sequence of supported commands."""
context = dict()
cmd = pycell.python_cell(
source='print 2+2',
validate=True
)
controller = FakeWorkflowController()
self.backend.execute_async(
task=TaskHandle(
task_id='000',
project_id=self.PROJECT_ID,
controller=controller
),
command=cmd,
context=context
)
time.sleep(3)
self.assertEqual(controller.task_id, '000')
self.assertEqual(controller.state, 'SUCCESS')
self.assertEqual(controller.outputs.stdout[0].value, '4')
if __name__ == '__main__':
unittest.main()
| 33.444444 | 99 | 0.658546 | 3,931 | 0.725544 | 0 | 0 | 0 | 0 | 0 | 0 | 669 | 0.123477 |
df1b9a8dd49a7cba3147ab1d0bd9b08eace69dbb | 1,496 | py | Python | main.py | Stormjotne/oslomet-disease-model | 6292d5b6f30fd385fccb3815994700cadd75c9fb | [
"MIT"
] | 2 | 2021-09-06T11:43:39.000Z | 2022-02-18T11:49:21.000Z | main.py | Stormjotne/oslomet-disease-model | 6292d5b6f30fd385fccb3815994700cadd75c9fb | [
"MIT"
] | 3 | 2021-09-15T12:10:42.000Z | 2021-11-08T19:32:21.000Z | main.py | Stormjotne/oslomet-disease-model | 6292d5b6f30fd385fccb3815994700cadd75c9fb | [
"MIT"
] | null | null | null | from time import sleep
from random import random, uniform
from OMDM import Evolution
"""
Run the model optimization program we're creating from this script.
"""
hyper_parameters = {
"number_of_generations": 10,
"genome_length": 6,
"mutation_probability": 0.2,
"do_crossover": True,
"soft_mutation": True,
"population_size": 10,
"surviving_individuals": 2,
"number_of_parents": 2,
"desired_agent_population": 500,
"desired_agent_population_weight": 1,
"relative_spread_weight": 1
}
# Code after this conditional is only executes if the python process originates from this script.
if __name__ == "__main__":
name_of_experiment = "Soft_Mutation_Test"
Evo = Evolution.Evolution(hyper_parameters, printout=True, name=name_of_experiment)
# Run evolution.
result = Evo.evolve()
print("The evolution is complete.\nThe best individual ended up with the following properties:\n")
sleep(1)
print("Fitness: {}".format(result["best_individual"].fitness))
sleep(1)
print("Fitness Trend: {}".format(result["fitness_trend"]))
sleep(1)
print("Parameter Trend: {}".format(result["parameter_trend"]))
sleep(1)
print("Genotype: {}".format(result["best_individual"].genome.genome))
sleep(1)
print("Phenotype: {}".format(result["best_individual"].phenotype))
| 36.487805 | 102 | 0.635695 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 709 | 0.47393 |
df1f060044c4759d5a15d50dfadd88be86823530 | 688 | py | Python | pyoat/__init__.py | berkanlafci/pyoat | b0cee99adde3c14c5d94f26f6e893a0b2e1fcae2 | [
"MIT"
] | 5 | 2022-03-07T16:30:58.000Z | 2022-03-28T13:42:01.000Z | pyoat/__init__.py | berkanlafci/pyoat | b0cee99adde3c14c5d94f26f6e893a0b2e1fcae2 | [
"MIT"
] | 1 | 2022-03-28T13:41:42.000Z | 2022-03-28T13:41:42.000Z | pyoat/__init__.py | berkanlafci/pyoat | b0cee99adde3c14c5d94f26f6e893a0b2e1fcae2 | [
"MIT"
] | null | null | null | #-----
# Description : Import classes/functions from subfolders
# Date : February 2021
# Author : Berkan Lafci
# E-mail : lafciberkan@gmail.com
#-----
# package version
__version__ = "1.0.0"
# oa recon codes
from pyoat.reconstruction import cpuBP, cpuMB, modelOA
# data readers
from pyoat.readers import oaReader
# preprocessing tools
from pyoat.preprocessing import sigMatFilter
from pyoat.preprocessing import sigMatNormalize
# simulation
from pyoat.simulation import forward
# utils
from pyoat.utils import saveImagePng, saveImageMat, saveSignalPng, saveSignalMat, saveImageH5
from pyoat.utils import calculateDelay
from pyoat.utils import averageSignals | 25.481481 | 93 | 0.767442 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.383721 |
df1f23556aeeff57ddaf501491d64b2becebf8b3 | 680 | py | Python | game/cactus.py | gmunumel/trex404 | 3821e8695a0d9e85d70127a1c8b6bdb2a13f1e38 | [
"MIT"
] | null | null | null | game/cactus.py | gmunumel/trex404 | 3821e8695a0d9e85d70127a1c8b6bdb2a13f1e38 | [
"MIT"
] | null | null | null | game/cactus.py | gmunumel/trex404 | 3821e8695a0d9e85d70127a1c8b6bdb2a13f1e38 | [
"MIT"
] | null | null | null | from game.globals import *
from game.lib import load_sprite_sheet
import pygame, random
class Cactus(pygame.sprite.Sprite):
def __init__(self, speed = 5, sizeX = -1, sizeY = -1):
pygame.sprite.Sprite.__init__(self, self.containers)
self.images, self.rect = load_sprite_sheet('cacti-small.png', 3, 1, sizeX, sizeY)
self.rect.bottom = int(0.97 * WIN_HEIGHT)
self.rect.left = WIN_WIDTH + self.rect.width
self.image = self.images[random.randrange(0, 3)]
self.movement = [-1 * speed, 0]
def draw(self):
screen.blit(self.image, self.rect)
def update(self):
self.rect = self.rect.move(self.movement)
if self.rect.right < 0:
self.kill() | 32.380952 | 85 | 0.683824 | 591 | 0.869118 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.025 |
df1f50b2cee471c44a5439e1ae4ca45519f52060 | 15,624 | py | Python | src/api/auth/views/token_views.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 84 | 2021-06-30T06:20:23.000Z | 2022-03-22T03:05:49.000Z | src/api/auth/views/token_views.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 7 | 2021-06-30T06:21:16.000Z | 2022-03-29T07:36:13.000Z | src/api/auth/views/token_views.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 40 | 2021-06-30T06:21:26.000Z | 2022-03-29T12:42:26.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from auth.audit.audit_stats import AUDIT_TYPE, Stats
from auth.core.permission import TokenPermission, UserPermission
from auth.exceptions import TokenNotExistErr
from auth.handlers.object_classes import oFactory
from auth.models.auth_models import AuthDataToken, DataTokenQueueUser
from auth.models.base_models import ObjectConfig
from auth.permissions import TokenPermission as TokenViewSetPermission
from auth.services.token import TokenGenerator
from auth.utils.filtersets import get_filterset
from auth.utils.serializer import DataPageNumberPagination
from auth.views.auth_serializers import (
AuthDataTokenSerializer,
DataTokenCheckSerializer,
DataTokenCreateSerializer,
DataTokenRetrieveSerializer,
DataTokenUpdateSerializer,
)
from common.decorators import detail_route, list_route, params_valid
from common.views import APIModelViewSet
from django.core.cache import cache
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from rest_framework.response import Response
from common import local
CACHE_TIME_600 = 600
CACHE_TIME_60 = 60
class TokenPermViewSet(APIModelViewSet):
queryset = AuthDataToken.objects.all()
serializer_class = AuthDataTokenSerializer
pagination_class = DataPageNumberPagination
permission_classes = (TokenViewSetPermission,)
filter_backends = (filters.SearchFilter, DjangoFilterBackend, filters.OrderingFilter)
filter_class = get_filterset(AuthDataToken)
@list_route(methods=["POST"])
@params_valid(DataTokenCheckSerializer, add_params=False)
def check(self, request):
"""
@api {post} /auth/tokens/check/ 校验 Token 权限
@apiName check_perm
@apiGroup TokenPerm
@apiParam {string} check_app_code 应用ID
@apiParam {string} check_data_token 授权码,一般为 30 位
@apiParam {string} action_id 操作类型,例如 result_table.query_data
@apiParam {string} object_id 对象ID
@apiParamExample {json} 校验授权码对结果表是否有查询数据权限
{
"check_app_code": "data",
"check_data_token": "xxxxxxxxxx",
"action_id": "result_table.query_data",
"object_id": "100107_cpu_parse"
}
@apiParamExample {json} 校验授权码对资源组是否有使用权限
{
"check_app_code": "data",
"check_data_token": "xxxxxxxxxx",
"action_id": "resource_group.use",
"object_id": "gem"
}
@apiSuccessExample {json} Succees-Response.data
True | False
"""
app_code = request.cleaned_params["check_app_code"]
data_token = request.cleaned_params["check_data_token"]
action_id = request.cleaned_params["action_id"]
object_id = request.cleaned_params.get("object_id", None)
cache_key = "token_perm_check:{app_code}_{data_token}_{action_id}_{object_id}".format(
app_code=app_code, data_token=data_token, action_id=action_id, object_id=object_id
)
cache_value = cache.get(cache_key)
if cache_value is not None:
ret = cache_value
else:
ret = TokenPermission(token=data_token).check(
action_id, object_id, bk_app_code=app_code, raise_exception=False
)
if ret is True:
# 校验结果为真时缓存10分钟
cache_time = CACHE_TIME_600
else:
cache_time = CACHE_TIME_60
cache.set(cache_key, ret, cache_time)
return Response(ret)
@list_route(methods=["get"])
def retrive_by_data_token(self, request):
"""
@api {get} /auth/tokens/retrive_by_data_token/ 通过 data_token 查询详情
@apiName retrive_by_data_token
@apiGroup TokenPerm
@apiSuccessExample {json} 返回样例
{
id: 70,
data_token: 'xxxxxxxx',
data_token_bk_app_code: "dataweb",
status: "enabled",
created_by: "xxx",
created_at: "2019-02-20 17:29:23",
expired_at: "2019-02-27 17:29:24"
}
"""
data_token = request.query_params["search_data_token"]
o_token = AuthDataToken.objects.get(data_token=data_token)
return Response(
{
"id": o_token.pk,
"data_token": o_token.data_token,
"data_token_bk_app_code": o_token.data_token_bk_app_code,
"status": o_token.status,
"created_by": o_token.created_by,
"created_at": o_token.created_at,
"expired_at": o_token.expired_at,
}
)
@params_valid(DataTokenCreateSerializer, add_params=False)
def create(self, request, *args, **kwargs):
"""
@api {post} /auth/token_perm/ 创建token
@apiName create_token
@apiGroup TokenPerm
@apiParam {string} bk_app_code 应用ID
@apiParam {dict} data_scope 数据范围
@apiParam {string} expire 过期时间
@apiParamExample {json} 校验授权码对结果表是否有查询数据权限
{
"data_token_bk_app_code":"2018secretary_h5",
"data_scope":{
"is_all":false,
"permissions":[
{
"action_id":"result_table.query",
"object_class":"result_table",
"scope_id_key":"result_table_id",
"scope_name_key":"result_table_name",
"scope_object_class":"result_table",
"scope":{
"result_table_id":"591_result_table",
"result_table_name":"结果表1"
}
}
]
},
"reason":"dsff",
"expire":7
}
@apiSuccessExample {json} Succees-Response.data
True | False
"""
bk_username = local.get_request_username()
bk_app_code = request.cleaned_params["data_token_bk_app_code"]
data_scope = request.cleaned_params["data_scope"]
expire = request.cleaned_params["expire"]
token_generator = TokenGenerator(bk_username, bk_app_code, data_scope, expire)
return Response(token_generator.create_token(request.cleaned_params["reason"]).data_token)
@params_valid(DataTokenUpdateSerializer, add_params=False)
def update(self, request, *args, **kwargs):
"""
@api {put} /auth/token_perm/:id/ 校验 Token 权限 参数同创建
@apiName update_token
@apiGroup TokenPerm
"""
bk_username = local.get_request_username()
data_scope = request.cleaned_params["data_scope"]
expire = request.cleaned_params["expire"]
o_token = AuthDataToken.objects.get(pk=kwargs.get("pk"))
TokenGenerator(bk_username, o_token.data_token_bk_app_code, data_scope, expire).update_token(
o_token, request.cleaned_params["reason"]
)
return Response()
@params_valid(DataTokenRetrieveSerializer, add_params=False)
def retrieve(self, request, *args, **kwargs):
"""
@api {get} /auth/tokens/:id/ 查询授权码详情
@apiName retrieve_token
@apiParam {String} permission_status 按照权限状态过滤,多个使用逗号隔开,目前有 active,applying,inactive,默认 active
@apiParam {String} show_display 展示权限对象属性,默认 True,如果想快速获取关键数据,建议设置为 False
@apiParam {String} show_scope_structure 显示授权码组织结构,默认 True,如果想快速获取关键数据,建议设置为 False
@apiGroup TokenPerm
@apiSuccessExample {json} 简单返回样例
{
status: "enabled",
created_at: "2019-02-20 17:29:23",
updated_by: "",
permissions: [
"status": "active",
"scope_id_key": "result_table_id",
"updated_by": "user01",
"created_at": "2019-09-25 21:47:50",
"updated_at": "2019-09-25 21:48:37",
"created_by": "user01",
"scope": {
"result_table_id": "591_abcddeeffgg_2"
},
"action_id": "result_table.query_queue",
"object_class": "result_table",
"id": 145,
"scope_object_class": "result_table",
"description": null,
],
data_token_bk_app_code: "dataweb",
description: null,
data_token: "ul60********z36z",
updated_at: null,
created_by: "xxx",
_status: "enabled",
status_display: "已启用",
id: 70,
expired_at: "2019-02-27 17:29:24"
}
"""
permission_status_arr = request.cleaned_params["permission_status"].split(",")
show_display = request.cleaned_params["show_display"]
show_scope_structure = request.cleaned_params["show_scope_structure"]
o_token = AuthDataToken.objects.get(pk=kwargs.get("pk"))
data = super().retrieve(request, *args, **kwargs).data
data["permissions"] = o_token.list_permissions(permission_status_arr)
if show_display:
data["permissions"] = oFactory.wrap_display_all(data["permissions"], key_path=["scope"])
if show_scope_structure:
data["scopes"] = o_token.scopes
scope_key_map = ObjectConfig.get_object_scope_key_map()
# 调整数据结构与前端匹配
for perm in data["permissions"]:
scope_name_key = scope_key_map[perm["scope_object_class"]]["scope_name_key"]
perm["scope_name_key"] = scope_name_key
perm["scope_display"] = {scope_name_key: perm["scope"].get(scope_name_key)}
return Response(data)
def list(self, request, *args, **kwargs):
"""
@api {post} /v3/auth/tokens/ 返回用户有权限的 DataToken
@apiName list_data_token
@apiGroup TokenPerm
@apiSuccessExample {json} 返回样例
[
{
updated_at: "2019-07-04 18:03:35"
id: 428
data_token_bk_app_code: "northernlights"
data_token: "uKkR********cNgh"
updated_by: ""
_status: "enabled"
created_by: "user01"
description: "测试"
status_display: "已启用"
status: "enabled"
expired_nearly: True,
created_at: "2019-07-04 18:03:35"
expired_at: "2020-07-03 18:03:35"
}
]
"""
scopes = UserPermission(local.get_request_username()).get_scopes("data_token.manage")
data_token_ids = [scope["data_token_id"] for scope in scopes if "data_token_id" in scope]
_queryset = AuthDataToken.get_status_queryset(request.GET.getlist("status__in"))
# 过滤用户有权限的内容
self.queryset = _queryset.filter(id__in=data_token_ids)
return super().list(request, *args, **kwargs)
@detail_route(methods=["GET"])
def plaintext(self, request, *args, **kwargs):
o_token = AuthDataToken.objects.get(pk=kwargs.get("pk"))
return Response(o_token.data_token)
@detail_route(methods=["POST"])
def renewal(self, request, pk):
"""
@api {post} /v3/auth/tokens/:id/renewal/ 对 DataToken 进行续期
@apiName renewal_data_token
@apiGroup TokenPerm
@apiParam {string} expire 过期时间,可选的有 7, 30, 90, 365
@apiSuccessExample {json} 返回样例
'ok'
"""
expire = request.data["expire"]
o_token = AuthDataToken.objects.get(pk=pk)
if o_token.renewal(expire):
Stats.add_to_audit_action(
Stats.gene_audit_id(),
o_token.id,
AUDIT_TYPE.TOKEN_RENEWAL,
1,
[o_token],
operator=local.get_request_username(),
)
return Response("ok")
@list_route(methods=["POST"])
def exchange_queue_user(self, request):
"""
@api {post} /auth/tokens/exchange_queue_user/ 置换队列服务账号
@apiName exchange_queue_user
@apiGroup TokenPerm
@apiParam {string} data_token
@apiSuccessExample {json} 成功返回
{
"data_token": "fadfakfdsakfak",
"queue_user": "test_app&xxxxx",
"queue_password": "fdafdasfdas"
}
"""
data_token = request.data["data_token"]
try:
AuthDataToken.objects.get(data_token=data_token)
except AuthDataToken.DoesNotExist:
raise TokenNotExistErr()
queue_user = DataTokenQueueUser.get_or_init(data_token=data_token)
return Response({"queue_user": queue_user.queue_user, "queue_password": queue_user.queue_password})
@list_route(methods=["POST"], url_path="exchange_default_data_token")
def exchange_default_data_token(self, request):
"""
@api {POST} /auth/tokens/exchange_default_data_token/ 置换默认DataToken
@apiName exchange_default_data_token
@apiGroup TokenPerm
@apiParam {string} data_token_bk_app_code
@apiSuccessExample {json} 成功返回
{
id: 70,
data_token: 'xxxxxxxx',
data_token_bk_app_code: "dataweb",
status: "enabled",
created_by: "xxx",
created_at: "2019-02-20 17:29:23",
expired_at: "2019-02-27 17:29:24"
}
@apiErrorExample {json} 成功返回
None
"""
data_token_bk_app_code = request.data["data_token_bk_app_code"]
o_tokens = AuthDataToken.objects.filter(data_token_bk_app_code=data_token_bk_app_code).order_by("pk")
if o_tokens.count() == 0:
return Response(None)
o_token = o_tokens[0]
return Response(
{
"id": o_token.pk,
"data_token": o_token.data_token,
"data_token_bk_app_code": o_token.data_token_bk_app_code,
"status": o_token.status,
"created_by": o_token.created_by,
"created_at": o_token.created_at,
"expired_at": o_token.expired_at,
}
)
| 40.268041 | 111 | 0.609319 | 13,740 | 0.845122 | 0 | 0 | 11,971 | 0.736314 | 0 | 0 | 9,148 | 0.562677 |
df1fe7022c513f9daa84d71c86c0ba7d7afec1ce | 1,396 | py | Python | fdrtd/builtins/util/kvstorage.py | chart21/fdrtd | c5f8ba84f8e367580572f2759683dfd3789b0b80 | [
"MIT"
] | null | null | null | fdrtd/builtins/util/kvstorage.py | chart21/fdrtd | c5f8ba84f8e367580572f2759683dfd3789b0b80 | [
"MIT"
] | 10 | 2021-08-29T00:03:36.000Z | 2022-02-18T09:19:38.000Z | fdrtd/builtins/util/kvstorage.py | chart21/fdrtd | c5f8ba84f8e367580572f2759683dfd3789b0b80 | [
"MIT"
] | 2 | 2021-08-28T21:44:10.000Z | 2021-12-16T23:23:58.000Z | """
contains microservice KeyValueStorage
"""
import uuid as _uuid
from fdrtd.server.microservice import Microservice
class KeyValueStorage(Microservice):
"""stores and retrieves values by key"""
def __init__(self, bus, endpoint):
super().__init__(bus, endpoint)
self.storages = {'default': {}}
def create(self, value, storage='default'):
"""create a storage; store value, return key"""
if storage not in self.storages:
self.storages[storage] = {}
uuid = str(_uuid.uuid4())
callback = {'uuid': uuid, 'storage': storage}
self.store(value, callback)
return self.callback(callback)
def store(self, value, callback):
"""store value, return key"""
kvstorage = self.storages[callback['storage']]
kvstorage[callback['uuid']] = value
def retrieve(self, callback):
"""retrieve value from storage"""
kvstorage = self.storages[callback['storage']]
value = kvstorage[callback['uuid']]
return value
def exists(self, callback):
"""return true if key exists in storage"""
kvstorage = self.storages[callback['storage']]
return callback['uuid'] in kvstorage
def delete(self, callback):
"""delete key from storage"""
kvstorage = self.storages[callback['storage']]
del kvstorage[callback['uuid']]
| 30.347826 | 55 | 0.626791 | 1,273 | 0.911891 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.256447 |
df22c7df4259c94d4e73cee307d40b33d12c2f03 | 739 | py | Python | tests/unit/handlers/test_HTTPSClientAuthHandler.py | sivel/requisitor | c6b10d400111998591ea9e21148c9cd7e905a453 | [
"Apache-2.0"
] | 7 | 2020-04-17T04:05:19.000Z | 2020-05-15T14:35:21.000Z | tests/unit/handlers/test_HTTPSClientAuthHandler.py | sivel/requisitor | c6b10d400111998591ea9e21148c9cd7e905a453 | [
"Apache-2.0"
] | null | null | null | tests/unit/handlers/test_HTTPSClientAuthHandler.py | sivel/requisitor | c6b10d400111998591ea9e21148c9cd7e905a453 | [
"Apache-2.0"
] | null | null | null | import pytest
from requisitor.session import Session
def test_client_cert_auth(mocker):
conn = mocker.patch('http.client.HTTPSConnection',
side_effect=RuntimeError)
s = Session()
with pytest.raises(RuntimeError):
s.request('GET', 'https://foo.bar', cert=('cert', 'key'))
args, kwargs = conn.call_args
assert kwargs['cert_file'] == 'cert'
assert kwargs['key_file'] == 'key'
def test_unix_socket(mocker):
conn = mocker.patch('requisitor.handlers.UnixHTTPSConnection',
side_effect=RuntimeError)
s = Session()
with pytest.raises(RuntimeError):
s.request('GET', 'https://foo.bar', unix_socket='/foo/bar')
conn.assert_called_once()
| 26.392857 | 67 | 0.644114 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.225981 |
df2344b9d30d0dc68153136f3c88de8e8dd0db2a | 370 | py | Python | Dataset/Leetcode/valid/35/31.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/valid/35/31.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/valid/35/31.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
def XXX(self, nums: List[int], target: int) -> int:
l, r = 0, len(nums)-1
# 找到第一个大于或等于target的位置
while l<r:
m = (l+r) // 2
if nums[m] >= target:
r = m
else:
l = m + 1
if nums[r] >= target:
return r
else:
return r+1
| 20.555556 | 55 | 0.381081 | 393 | 0.992424 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.118687 |
df2437ca766dc3a07e7da531239e5dd32c32401c | 1,773 | py | Python | gracc-oneoffs/remove-odd-procs/remove-odd-procs.py | opensciencegrid/gracc-tools | 86297d0a8b6b208b6c661473647fdc69f438afec | [
"Apache-2.0"
] | null | null | null | gracc-oneoffs/remove-odd-procs/remove-odd-procs.py | opensciencegrid/gracc-tools | 86297d0a8b6b208b6c661473647fdc69f438afec | [
"Apache-2.0"
] | 1 | 2017-06-17T19:25:51.000Z | 2017-06-19T15:33:22.000Z | gracc-oneoffs/remove-odd-procs/remove-odd-procs.py | opensciencegrid/gracc-tools | 86297d0a8b6b208b6c661473647fdc69f438afec | [
"Apache-2.0"
] | 2 | 2017-06-16T20:07:38.000Z | 2017-06-17T15:03:18.000Z | #!/usr/bin/python
import elasticsearch
from elasticsearch_dsl import Search, A, Q
#import logging
import sys
import os
#logging.basicConfig(level=logging.WARN)
#es = elasticsearch.Elasticsearch(
# ['https://gracc.opensciencegrid.org/q'],
# timeout=300, use_ssl=True, verify_certs=False)
es = elasticsearch.Elasticsearch(
['localhost:9200'],
timeout=300)
osg_raw_index = 'gracc.osg.raw-*'
s = Search(using=es, index=osg_raw_index)
# Match the records by ProbeName and processors = 0.
s = s.query("match", ProbeName="htcondor-ce:hosted-ce18.grid.uchicago.edu")
s = s.query("match", Processors=0)
s = s.filter('range', EndTime={'from': 'now-12M', 'to': 'now'})
response = s.execute()
print "Query took %i milliseconds" % response.took
print "Query got %i hits" % response.hits.total
#update_id = "8c5816978fee6fc17718bcf81350d1f4"
#print "About to update record with id: %s" % update_id
#es.update(index="gracc.osg.raw3-2017.07", doc_type='JobUsageRecord', id=update_id, body={'doc': {'VOName': 'UserSchool2017'}})
update_buffer = []
for hit in s.scan():
# Calculate the new CoreHours (cores = 1):
core_hours = hit.WallDuration / 3600.0
updated_doc = {
"doc": {
"CoreHours": core_hours,
"Processors": 1
},
"_index": hit.meta.index,
"_id": hit.meta.id,
"_type": hit.meta.doc_type,
"_op_type": "update"
}
update_buffer.append(updated_doc)
print "Update %s" % updated_doc
if len(update_buffer) > 200:
elasticsearch.helpers.bulk(es, update_buffer)
update_buffer = []
elasticsearch.helpers.bulk(es, update_buffer)
#es.update(index=hit.meta.index, doc_type=hit.meta.doc_type, id=hit.meta.id, body={'doc': updated_doc})
| 30.568966 | 128 | 0.667795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 882 | 0.497462 |
df2507bcc55adf702b1c6993e0b76f7f643ef8d6 | 849 | py | Python | Hasoc/create_folds.py | tezike/Hasoc | b29c5ec877a1751b04f86227a6ad264be8c06d81 | [
"Apache-2.0"
] | 1 | 2020-11-24T07:48:55.000Z | 2020-11-24T07:48:55.000Z | Hasoc/create_folds.py | tezike/Hasoc | b29c5ec877a1751b04f86227a6ad264be8c06d81 | [
"Apache-2.0"
] | null | null | null | Hasoc/create_folds.py | tezike/Hasoc | b29c5ec877a1751b04f86227a6ad264be8c06d81 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/create_folds.ipynb (unless otherwise specified).
__all__ = ['df', 'df', 'y', 'kf', 'df', 'y']
# Cell
import os
import pandas as pd
from sklearn.model_selection import StratifiedKFold
# Cell
df = pd.read_csv(os.path.join('../data', 'en_task_a', 'hasoc_2020_en_train_new.csv'), sep='\t')
# Cell
df['kfold_task1'] = -1
df['kfold_task2'] = -1
df = df.sample(frac=1.,random_state=SEED).reset_index(drop=True)
y = df['task1'].values
kf = StratifiedKFold(n_splits=5)
for fold,(t_,v_) in enumerate(kf.split(X=df,y=y)):
df.loc[v_,'kfold_task1'] = fold
df = df.sample(frac=1.,random_state=SEED).reset_index(drop=True)
y = df['task2'].values
for fold,(t_,v_) in enumerate(kf.split(X=df,y=y)):
df.loc[v_,'kfold_task2'] = fold
# Cell
df.to_csv(os.path.join('..', 'data', 'fold_df.csv'), index=False) | 29.275862 | 96 | 0.684335 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.334511 |
df25b7352e71e86d55c9c932e24f93a40dcbb932 | 6,976 | py | Python | heat/tests/test_components.py | citrix-openstack-build/heat | fa31873529481472e037e3ce157b87f8057fe622 | [
"Apache-2.0"
] | null | null | null | heat/tests/test_components.py | citrix-openstack-build/heat | fa31873529481472e037e3ce157b87f8057fe622 | [
"Apache-2.0"
] | null | null | null | heat/tests/test_components.py | citrix-openstack-build/heat | fa31873529481472e037e3ce157b87f8057fe622 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.engine.components import Component
from heat.engine.components import Components
from heat.tests.common import HeatTestCase
class ComponentTest(HeatTestCase):
def test_init(self):
comp = Component()
self.assertEqual(comp.type, 'OS::Heat::SoftwareConfig')
self.assertEqual(comp.properties, {})
self.assertEqual(comp.scripts, {})
self.assertEqual(comp.relations, [])
self.assertEqual(comp.hosted_on(), None)
self.assertEqual(comp.depends(), [])
def test_hosted_on(self):
schema = {
'relationships': [
{'hosted_on': 'wordpress'}
]
}
comp = Component(schema)
self.assertEqual(comp.hosted_on(), 'wordpress')
def test_depends(self):
schema = {
'relationships': [
{'depends_on': 'config_mysql'}
]
}
comp = Component(schema)
self.assertEqual(comp.depends(), ['config_mysql'])
comp['relationships'].append({'depends_on': 'config_wordpress'})
self.assertEqual(comp.depends(),
['config_mysql', 'config_wordpress'])
class ComponentsTest(HeatTestCase):
def test_init(self):
schema = {}
comps = Components(schema)
self.assertEqual(0, len(comps))
schema['config_mysql'] = {}
comps = Components(schema)
self.assertEqual(1, len(comps))
comp = comps['config_mysql']
self.assertIsInstance(comp, Component)
def test_depends(self):
schema = {
'install_mysql': {
},
'config_mysql': {
'relationships': [
{'depends_on': 'install_mysql'}
]
},
'start_mysql': {
'relationships': [
{'depends_on': 'config_mysql'}
]
}
}
comps = Components(schema)
self.assertEqual(3, len(comps))
deps = comps.depends()
self.assertEqual(2, len(deps))
self.assertIn('install_mysql', deps)
self.assertIn('config_mysql', deps)
def test_multi_depends(self):
schema = {
'install_mysql': {
},
'config_mysql': {
'relationships': [
{'depends_on': 'install_mysql'}
]
},
'start_mysql': {
'relationships': [
{'depends_on': 'config_mysql'}
]
},
'install_wordpress': {},
'config_wordpress': {
'relationships': [
{'depends_on': 'install_wordpress'}
]
},
'start_wordpress': {
'relationships': [
{'depends_on': 'config_wordpress'},
{'depends_on': 'start_mysql'}
]
}
}
comps = Components(schema)
deps = comps.depends()
self.assertEqual(5, len(deps))
self.assertNotIn('start_wordpress', deps)
self.assertIn('install_wordpress', deps)
self.assertIn('config_wordpress', deps)
self.assertIn('start_mysql', deps)
self.assertIn('config_mysql', deps)
self.assertIn('install_mysql', deps)
def test_filter(self):
schema = {
'install_mysql': {
'relationships': [
{'hosted_on': 'mysql'}
]
},
'config_mysql': {
'relationships': [
{'hosted_on': 'mysql'},
{'depends_on': 'install_mysql'}
]
},
'start_mysql': {
'relationships': [
{'hosted_on': 'mysql'},
{'depends_on': 'config_mysql'}
]
},
'install_wordpress': {
'relationships': [
{'hosted_on': 'wordpress'}
]
},
'config_wordpress': {
'relationships': [
{'hosted_on': 'wordpress'},
{'depends_on': 'install_wordpress'}
]
},
'start_wordpress': {
'relationships': [
{'hosted_on': 'wordpress'},
{'depends_on': 'config_wordpress'},
{'depends_on': 'start_mysql'}
]
}
}
comps = Components(schema)
names = comps.filter('mysql')
self.assertEqual(3, len(names))
self.assertIn('config_mysql', names)
self.assertIn('install_mysql', names)
self.assertIn('start_mysql', names)
names = comps.filter('wordpress')
self.assertEqual(3, len(names))
self.assertIn('config_wordpress', names)
self.assertIn('install_wordpress', names)
self.assertIn('start_wordpress', names)
def test_validate(self):
schema = {'install_mysql': {}}
comps = Components(schema)
self.assertTrue(comps.validate())
schema = {
'config_mysql': {
'relationships': [
{'depends_on': 'config_mysql'}
]
}
}
comps = Components(schema)
err = self.assertRaises(ValueError, comps.validate)
self.assertIn('component config_mysql depends on itself.', str(err))
schema = {
'config_mysql': {
'relationships': [
{'depends_on': 'install_mysql'}
]
}
}
comps = Components(schema)
err = self.assertRaises(ValueError, comps.validate)
self.assertIn('component install_mysql is not defined.', str(err))
schema = {
'install_mysql': {
},
'config_mysql': {
'relationships': [
{'depends_on': 'install_mysql'},
{'depends_on': 'install_mysql'}
]
}
}
comps = Components(schema)
err = self.assertRaises(ValueError, comps.validate)
self.assertIn('duplicated install_mysql in config_mysql depends on.',
str(err))
| 31.853881 | 78 | 0.502437 | 6,218 | 0.891342 | 0 | 0 | 0 | 0 | 0 | 0 | 2,304 | 0.330275 |
df2669595a23ceb9c2346db6112762dd8108f2e5 | 55,075 | py | Python | pycstruct/pycstruct.py | midstar/pycstruct | 3430507e24a84ceb3ad409ddb43ed7f0934d48d2 | [
"MIT"
] | 14 | 2020-07-05T08:58:48.000Z | 2022-01-25T04:43:41.000Z | pycstruct/pycstruct.py | zbx911/pycstruct | 3430507e24a84ceb3ad409ddb43ed7f0934d48d2 | [
"MIT"
] | 15 | 2020-11-15T10:52:46.000Z | 2021-05-21T08:54:05.000Z | pycstruct/pycstruct.py | zbx911/pycstruct | 3430507e24a84ceb3ad409ddb43ed7f0934d48d2 | [
"MIT"
] | 2 | 2021-02-13T13:34:40.000Z | 2022-01-25T04:40:14.000Z | """pycstruct definitions
Copyright 2021 by Joel Midstjärna.
All rights reserved.
This file is part of the pycstruct python library and is
released under the "MIT License Agreement". Please see the LICENSE
file that should have been included as part of this package.
"""
# pylint: disable=too-many-lines, protected-access
import collections
import math
import struct
import sys
###############################################################################
# Global constants
# Basic Types
_TYPE = {
"int8": {"format": "b", "bytes": 1, "dtype": "i1"},
"uint8": {"format": "B", "bytes": 1, "dtype": "u1"},
"bool8": {"format": "B", "bytes": 1, "dtype": "b1"},
"int16": {"format": "h", "bytes": 2, "dtype": "i2"},
"uint16": {"format": "H", "bytes": 2, "dtype": "u2"},
"bool16": {"format": "H", "bytes": 2},
"float16": {"format": "e", "bytes": 2, "dtype": "f2"},
"int32": {"format": "i", "bytes": 4, "dtype": "i4"},
"uint32": {"format": "I", "bytes": 4, "dtype": "u4"},
"bool32": {"format": "I", "bytes": 4},
"float32": {"format": "f", "bytes": 4, "dtype": "f4"},
"int64": {"format": "q", "bytes": 8, "dtype": "i8"},
"uint64": {"format": "Q", "bytes": 8, "dtype": "u8"},
"bool64": {"format": "Q", "bytes": 8},
"float64": {"format": "d", "bytes": 8, "dtype": "f8"},
}
_BYTEORDER = {
"native": {"format": "="},
"little": {"format": "<"},
"big": {"format": ">"},
}
###############################################################################
# Internal functions
def _get_padding(alignment, current_size, next_element_size):
"""Calculate number of padding bytes required to get next element in
the correct alignment
"""
if alignment == 1:
return 0 # Always aligned
elem_size = min(alignment, next_element_size)
remainder = current_size % elem_size
if remainder == 0:
return 0
return elem_size - remainder
def _round_pow_2(value):
"""Round value to next power of 2 value - max 16"""
if value > 8:
return 16
if value > 4:
return 8
if value > 2:
return 4
return value
###############################################################################
# _BaseDef Class
class _BaseDef:
"""This is an abstract base class for definitions"""
def size(self):
"""Returns size in bytes"""
raise NotImplementedError
def serialize(self, data, buffer=None, offset=0):
"""Serialize a python object into a binary buffer.
If a `buffer` is specified, it will be updated using an optional
`offset` instead of creating and returning a new `bytearray`.
:param buffer: If not None, the serialization will feed this
buffer instead of creating and returning a new `bytearray`.
:param offset: If a `buffer` is specified the offset can be set
to specify the location of the serialization inside the buffer.
:returns: A new bytearray if `buffer` is None, else returns `buffer`
"""
raise NotImplementedError
def deserialize(self, buffer, offset=0):
"""Deserialize a `buffer` at an optional `offset` into a python object
:param buffer: buffer containing the data to deserialize.
:param offset: Specify the place of the buffer to deserialize.
:returns: A python object
"""
raise NotImplementedError
def _largest_member(self):
raise NotImplementedError
def _type_name(self):
raise NotImplementedError
def __getitem__(self, length):
"""Create an array type from this base type.
This make array type easy to create:
.. code-block:: python
basetype = pycstruct.pycstruct.BaseTypeDef("uint16")
arraytype = basetype[10]
Be careful that multi dimentional arrays will be defined in the revert
from a C declaration:
.. code-block:: python
basetype = pycstruct.pycstruct.BaseTypeDef("uint16")
arraytype = basetype[10][5][2]
# The fast axis is the first one (of size 10)
"""
if not isinstance(length, int):
raise TypeError("An integer is expected for a length of array")
return ArrayDef(self, length)
def dtype(self):
"""Returns the numpy dtype of this definition"""
raise Exception("dtype not implemented for %s" % type(self))
###############################################################################
# BasicTypeDef Class
class BasicTypeDef(_BaseDef):
"""This class represents the basic types (int, float and bool)"""
def __init__(self, datatype, byteorder):
self.type = datatype
self.byteorder = byteorder
self.size_bytes = _TYPE[datatype]["bytes"]
self.format = _TYPE[datatype]["format"]
def serialize(self, data, buffer=None, offset=0):
""" Data needs to be an integer, floating point or boolean value """
if buffer is None:
assert offset == 0, "When buffer is None, offset have to be unset"
buffer = bytearray(self.size())
else:
assert len(buffer) >= offset + self.size(), "Specified buffer too small"
dataformat = _BYTEORDER[self.byteorder]["format"] + self.format
struct.pack_into(dataformat, buffer, offset, data)
return buffer
def deserialize(self, buffer, offset=0):
""" Result is an integer, floating point or boolean value """
dataformat = _BYTEORDER[self.byteorder]["format"] + self.format
value = struct.unpack_from(dataformat, buffer, offset)[0]
if self.type.startswith("bool"):
if value == 0:
value = False
else:
value = True
return value
def size(self):
return self.size_bytes
def _largest_member(self):
return self.size_bytes
def _type_name(self):
return self.type
def dtype(self):
dtype = _TYPE[self.type].get("dtype")
if dtype is None:
raise NotImplementedError(
'Basic type "%s" is not implemented as dtype' % self.type
)
byteorder = _BYTEORDER[self.byteorder]["format"]
return byteorder + dtype
###############################################################################
# StringDef Class
class StringDef(_BaseDef):
"""This class represents UTF-8 strings"""
def __init__(self, length):
self.length = length
def serialize(self, data, buffer=None, offset=0):
""" Data needs to be a string """
if buffer is None:
assert offset == 0, "When buffer is None, offset have to be unset"
buffer = bytearray(self.size())
else:
assert len(buffer) >= offset + self.size(), "Specified buffer too small"
if not isinstance(data, str):
raise Exception("Not a valid string: {0}".format(data))
utf8_bytes = data.encode("utf-8")
if len(utf8_bytes) > self.length:
raise Exception(
"String overflow. Produced size {0} but max is {1}".format(
len(utf8_bytes), self.length
)
)
for i, value in enumerate(utf8_bytes):
buffer[offset + i] = value
return buffer
def deserialize(self, buffer, offset=0):
""" Result is a string """
size = self.size()
# Find null termination
index = buffer.find(0, offset, offset + size)
if index >= 0:
buffer = buffer[offset:index]
else:
buffer = buffer[offset : offset + size]
return buffer.decode("utf-8")
def size(self):
return self.length # Each element 1 byte
def _largest_member(self):
return 1 # 1 byte
def _type_name(self):
return "utf-8"
def dtype(self):
return ("S", self.length)
###############################################################################
# ArrayDef Class
class ArrayDef(_BaseDef):
"""This class represents a fixed size array of a type"""
def __init__(self, element_type, length):
self.type = element_type
self.length = length
def serialize(self, data, buffer=None, offset=0):
"""Serialize a python type into a binary type following this array type"""
if not isinstance(data, collections.abc.Iterable):
raise Exception("Data shall be a list")
if len(data) > self.length:
raise Exception("List is larger than {1}".format(self.length))
if buffer is None:
assert offset == 0, "When buffer is None, offset have to be unset"
buffer = bytearray(self.size())
else:
assert len(buffer) >= offset + self.size(), "Specified buffer too small"
size = self.type.size()
for item in data:
self.type.serialize(item, buffer=buffer, offset=offset)
offset += size
return buffer
def _serialize_element(self, index, value, buffer, buffer_offset=0):
"""Serialize one element into the buffer
:param index: Index of the element
:type data: int
:param value: Value of element
:type data: varies
:param buffer: Buffer that contains the data to serialize data into. This is an output.
:type buffer: bytearray
:param buffer_offset: Start address in buffer
:type buffer: int
:param index: If this is a list (array) which index to deserialize?
:type buffer: int
"""
size = self.type.size()
offset = buffer_offset + size * index
self.type.serialize(value, buffer=buffer, offset=offset)
def deserialize(self, buffer, offset=0):
"""Deserialize a binary buffer into a python list following this array
type"""
size = self.type.size()
if len(buffer) < offset + size * self.length:
raise ValueError(
"A buffer size of at least {} is expected".format(size * self.length)
)
result = []
for _ in range(self.length):
item = self.type.deserialize(buffer=buffer, offset=offset)
result.append(item)
offset += size
return result
def _deserialize_element(self, index, buffer, buffer_offset=0):
"""Deserialize one element from buffer
:param index: Index of element
:type data: int
:param buffer: Buffer that contains the data to deserialize data from.
:type buffer: bytearray
:param buffer_offset: Start address in buffer
:type buffer: int
:param index: If this is a list (array) which index to deserialize?
:type buffer: int
:return: The value of the element
:rtype: varies
"""
size = self.type.size()
offset = buffer_offset + size * index
value = self.type.deserialize(buffer=buffer, offset=offset)
return value
def instance(self, buffer=None, buffer_offset=0):
"""Create an instance of this array.
This is an alternative of using dictionaries and the :meth:`ArrayDef.serialize`/
:meth:`ArrayDef.deserialize` methods for representing the data.
:param buffer: Byte buffer where data is stored. If no buffer is provided a new byte
buffer will be created and the instance will be 'empty'.
:type buffer: bytearray, optional
:param buffer_offset: Start offset in the buffer. This means that you
can have multiple Instances (or other data) that
shares the same buffer.
:type buffer_offset: int, optional
:return: A new Instance object
:rtype: :meth:`Instance`
"""
# I know. This is cyclic import of _InstanceList, since instance depends
# on classes within this file. However, it should not be any problem
# since this file will be full imported once this method is called.
# pylint: disable=cyclic-import, import-outside-toplevel
from pycstruct.instance import _InstanceList
return _InstanceList(self, buffer, buffer_offset)
def size(self):
return self.length * self.type.size()
def _largest_member(self):
return self.type._largest_member()
def _type_name(self):
return "{}[{}]".format(self.type._type_name(), self.length)
def dtype(self):
return (self.type.dtype(), self.length)
###############################################################################
# StructDef Class
class StructDef(_BaseDef):
"""This class represents a struct or a union definition
:param default_byteorder: Byte order of each element unless explicitly set
for the element. Valid values are 'native',
'little' and 'big'.
:type default_byteorder: str, optional
:param alignment: Alignment of elements in bytes. If set to a value > 1
padding will be added between elements when necessary.
Use 4 for 32 bit architectures, 8 for 64 bit
architectures unless packing is performed.
:type alignment: str, optional
:param union: If this is set the True, the instance will behave like
a union instead of a struct, i.e. all elements share the
same data (same start address). Default is False.
:type union: boolean, optional
"""
# pylint: disable=too-many-instance-attributes, too-many-arguments
def __init__(self, default_byteorder="native", alignment=1, union=False):
"""Constructor method"""
if default_byteorder not in _BYTEORDER:
raise Exception("Invalid byteorder: {0}.".format(default_byteorder))
self.__default_byteorder = default_byteorder
self.__alignment = alignment
self.__union = union
self.__pad_count = 0
self.__fields = collections.OrderedDict()
self.__fields_same_level = collections.OrderedDict()
self.__dtype = None
# Add end padding of 0 size
self.__pad_byte = BasicTypeDef("uint8", default_byteorder)
self.__pad_end = ArrayDef(self.__pad_byte, 0)
@staticmethod
def _normalize_shape(length, shape):
"""Sanity check and normalization for length and shape.
The `length` is used to define a string size, and `shape` is used to
define an array shape. Both can be used at the same time.
Returns the final size of the array, as a tuple of int.
"""
if shape is None:
shape = tuple()
elif isinstance(shape, int):
shape = (shape,)
elif isinstance(shape, collections.abc.Iterable):
shape = tuple(shape)
for dim in shape:
if not isinstance(dim, int) or dim < 1:
raise ValueError(
"Strict positive dimensions are expected: {0}.".format(shape)
)
if length == 1:
# It's just the type without array
pass
elif isinstance(length, int):
if length < 1:
raise ValueError(
"Strict positive dimension is expected: {0}.".format(length)
)
shape = shape + (length,)
return shape
def add(self, datatype, name, length=1, byteorder="", same_level=False, shape=None):
"""Add a new element in the struct/union definition. The element will be added
directly after the previous element if a struct or in parallel with the
previous element if union. Padding might be added depending on the alignment
setting.
- Supported data types:
+------------+---------------+--------------------------------------+
| Name | Size in bytes | Comment |
+============+===============+======================================+
| int8 | 1 | Integer |
+------------+---------------+--------------------------------------+
| uint8 | 1 | Unsigned integer |
+------------+---------------+--------------------------------------+
| bool8 | 1 | True (<>0) or False (0) |
+------------+---------------+--------------------------------------+
| int16 | 2 | Integer |
+------------+---------------+--------------------------------------+
| uint16 | 2 | Unsigned integer |
+------------+---------------+--------------------------------------+
| bool16 | 2 | True (<>0) or False (0) |
+------------+---------------+--------------------------------------+
| float16 | 2 | Floating point number |
+------------+---------------+--------------------------------------+
| int32 | 4 | Integer |
+------------+---------------+--------------------------------------+
| uint32 | 4 | Unsigned integer |
+------------+---------------+--------------------------------------+
| bool32 | 4 | True (<>0) or False (0) |
+------------+---------------+--------------------------------------+
| float32 | 4 | Floating point number |
+------------+---------------+--------------------------------------+
| int64 | 8 | Integer |
+------------+---------------+--------------------------------------+
| uint64 | 8 | Unsigned integer |
+------------+---------------+--------------------------------------+
| bool64 | 8 | True (<>0) or False (0) |
+------------+---------------+--------------------------------------+
| float64 | 8 | Floating point number |
+------------+---------------+--------------------------------------+
| utf-8 | 1 | UTF-8/ASCII string. Use length |
| | | parameter to set the length of the |
| | | string including null termination |
+------------+---------------+--------------------------------------+
| struct | struct size | Embedded struct. The actual |
| | | StructDef object shall be set as |
| | | type and not 'struct' string. |
+------------+---------------+--------------------------------------+
| bitfield | bitfield size | Bitfield. The actual |
| | | :meth:`BitfieldDef` object shall be |
| | | set as type and not 'bitfield' |
| | | string. |
+------------+---------------+--------------------------------------+
| enum | enum size | Enum. The actual :meth:`EnumDef` |
| | | object shall be set as type and not |
| | | 'enum' string. |
+------------+---------------+--------------------------------------+
:param datatype: Element data type. See above.
:type datatype: str
:param name: Name of element. Needs to be unique.
:type name: str
:param length: Number of elements. If > 1 this is an array/list of
elements with equal size. Default is 1. This should only
be specified for string size. Use `shape` for arrays.
:type length: int, optional
:param shape: If specified an array of this shape is defined. It
supported, int, and tuple of int for multi-dimentional
arrays (the last is the fast axis)
:type shape: int, tuple, optional
:param byteorder: Byteorder of this element. Valid values are 'native',
'little' and 'big'. If not specified the default
byteorder is used.
:type byteorder: str, optional
:param same_level: Relevant if adding embedded bitfield. If True, the
serialized or deserialized dictionary keys will be
on the same level as the parent. Default is False.
:type same_level: bool, optional
"""
# pylint: disable=too-many-branches
# Sanity checks
shape = self._normalize_shape(length, shape)
if name in self.__fields:
raise Exception("Field name already exist: {0}.".format(name))
if byteorder == "":
byteorder = self.__default_byteorder
elif byteorder not in _BYTEORDER:
raise Exception("Invalid byteorder: {0}.".format(byteorder))
if same_level and len(shape) != 0:
raise Exception("same_level not allowed in combination with arrays")
if same_level and not isinstance(datatype, BitfieldDef):
raise Exception("same_level only allowed in combination with BitfieldDef")
# Invalidate the dtype cache
self.__dtype = None
# Create objects when necessary
if datatype == "utf-8":
if shape == tuple():
shape = (1,)
datatype = StringDef(shape[-1])
# Remaining dimensions for arrays of string
shape = shape[0:-1]
elif datatype in _TYPE:
datatype = BasicTypeDef(datatype, byteorder)
elif not isinstance(datatype, _BaseDef):
raise Exception("Invalid datatype: {0}.".format(datatype))
if len(shape) > 0:
for dim in reversed(shape):
datatype = ArrayDef(datatype, dim)
# Remove end padding if it exists
self.__fields.pop("__pad_end", "")
# Offset in buffer (for unions always 0)
offset = 0
# Check if padding between elements is required (only struct not union)
if not self.__union:
offset = self.size()
padding = _get_padding(
self.__alignment, self.size(), datatype._largest_member()
)
if padding > 0:
padtype = ArrayDef(self.__pad_byte, padding)
self.__fields["__pad_{0}".format(self.__pad_count)] = {
"type": padtype,
"same_level": False,
"offset": offset,
}
offset += padding
self.__pad_count += 1
# Add the element
self.__fields[name] = {
"type": datatype,
"same_level": same_level,
"offset": offset,
}
# Check if end padding is required
padding = _get_padding(self.__alignment, self.size(), self._largest_member())
if padding > 0:
offset += datatype.size()
self.__pad_end.length = padding
self.__fields["__pad_end"] = {
"type": self.__pad_end,
"offset": offset,
"same_level": False,
}
# If same_level, store the bitfield elements
if same_level:
for subname in datatype._element_names():
self.__fields_same_level[subname] = name
def size(self):
"""Get size of structure or union.
:return: Number of bytes this structure represents alternatively largest
of the elements (including end padding) if this is a union.
:rtype: int
"""
all_elem_size = 0
largest_size = 0
for name, field in self.__fields.items():
fieldtype = field["type"]
elem_size = fieldtype.size()
if not name.startswith("__pad") and elem_size > largest_size:
largest_size = elem_size
all_elem_size += elem_size
if self.__union:
return largest_size + self.__pad_end.length # Union
return all_elem_size # Struct
def _largest_member(self):
"""Used for struct/union padding
:return: Largest member
:rtype: int
"""
largest = 0
for field in self.__fields.values():
current_largest = field["type"]._largest_member()
if current_largest > largest:
largest = current_largest
return largest
def deserialize(self, buffer, offset=0):
"""Deserialize buffer into dictionary"""
result = {}
if len(buffer) < self.size() + offset:
raise Exception(
"Invalid buffer size: {0}. Expected: {1}".format(
len(buffer), self.size()
)
)
# for name, field in self.__fields.items():
for name in self._element_names():
if name.startswith("__pad"):
continue
data = self._deserialize_element(name, buffer, buffer_offset=offset)
result[name] = data
return result
def _deserialize_element(self, name, buffer, buffer_offset=0):
"""Deserialize one element from buffer
:param name: Name of element
:type data: str
:param buffer: Buffer that contains the data to deserialize data from.
:type buffer: bytearray
:param buffer_offset: Start address in buffer
:type buffer: int
:param index: If this is a list (array) which index to deserialize?
:type buffer: int
:return: The value of the element
:rtype: varies
"""
if name in self.__fields_same_level:
# This is a bitfield on same level
field = self.__fields[self.__fields_same_level[name]]
bitfield = field["type"]
return bitfield._deserialize_element(
name, buffer, buffer_offset + field["offset"]
)
field = self.__fields[name]
datatype = field["type"]
offset = field["offset"]
try:
value = datatype.deserialize(buffer, buffer_offset + offset)
except Exception as exception:
raise Exception(
"Unable to deserialize {} {}. Reason:\n{}".format(
datatype._type_name(), name, exception.args[0]
)
) from exception
return value
def serialize(self, data, buffer=None, offset=0):
"""Serialize dictionary into buffer
NOTE! If this is a union the method will try to serialize all the
elements into the buffer (at the same position in the buffer).
It is quite possible that the elements in the dictionary have
contradicting data and the buffer of the last serialized element
will be ok while the others might be wrong. Thus you should only define
the element that you want to serialize in the dictionary.
:param data: A dictionary keyed with element names. Elements can be
omitted from the dictionary (defaults to value 0).
:type data: dict
:return: A buffer that contains data
:rtype: bytearray
"""
if buffer is None:
assert offset == 0, "When buffer is None, offset have to be unset"
buffer = bytearray(self.size())
else:
assert len(buffer) >= offset + self.size(), "Specified buffer too small"
for name in self._element_names():
if name in data and not name.startswith("__pad"):
self._serialize_element(name, data[name], buffer, buffer_offset=offset)
return buffer
def _serialize_element(self, name, value, buffer, buffer_offset=0):
"""Serialize one element into the buffer
:param name: Name of element
:type data: str
:param value: Value of element
:type data: varies
:param buffer: Buffer that contains the data to serialize data into. This is an output.
:type buffer: bytearray
:param buffer_offset: Start address in buffer
:type buffer: int
:param index: If this is a list (array) which index to deserialize?
:type buffer: int
"""
if name in self.__fields_same_level:
# This is a bitfield on same level
field = self.__fields[self.__fields_same_level[name]]
bitfield = field["type"]
bitfield._serialize_element(
name, value, buffer, buffer_offset + field["offset"]
)
return # We are done
field = self.__fields[name]
datatype = field["type"]
offset = field["offset"]
next_offset = buffer_offset + offset
try:
datatype.serialize(value, buffer, next_offset)
except Exception as exception:
raise Exception(
"Unable to serialize {} {}. Reason:\n{}".format(
datatype._type_name(), name, exception.args[0]
)
) from exception
def instance(self, buffer=None, buffer_offset=0):
"""Create an instance of this struct / union.
This is an alternative of using dictionaries and the :meth:`StructDef.serialize`/
:meth:`StructDef.deserialize` methods for representing the data.
:param buffer: Byte buffer where data is stored. If no buffer is provided a new byte
buffer will be created and the instance will be 'empty'.
:type buffer: bytearray, optional
:param buffer_offset: Start offset in the buffer. This means that you
can have multiple Instances (or other data) that
shares the same buffer.
:type buffer_offset: int, optional
:return: A new Instance object
:rtype: :meth:`Instance`
"""
# I know. This is cyclic import of Instance, since instance depends
# on classes within this file. However, it should not be any problem
# since this file will be full imported once this method is called.
# pylint: disable=cyclic-import, import-outside-toplevel
from pycstruct.instance import Instance
return Instance(self, buffer, buffer_offset)
def create_empty_data(self):
"""Create an empty dictionary with all keys
:return: A dictionary keyed with the element names. Values are "empty" or 0.
:rtype: dict
"""
buffer = bytearray(self.size())
return self.deserialize(buffer)
def __str__(self):
"""Create string representation
:return: A string illustrating all members (not Bitfield fields with same_level = True)
:rtype: string
"""
result = []
result.append(
"{:<30}{:<15}{:<10}{:<10}{:<10}{:<10}".format(
"Name", "Type", "Size", "Length", "Offset", "Largest type"
)
)
for name, field in self.__fields.items():
datatype = field["type"]
if isinstance(datatype, ArrayDef):
length = []
while isinstance(datatype, ArrayDef):
length.append(datatype.length)
datatype = datatype.type
length = ",".join([str(l) for l in length])
else:
length = ""
result.append(
"{:<30}{:<15}{:<10}{:<10}{:<10}{:<10}".format(
name,
datatype._type_name(),
datatype.size(),
length,
field["offset"],
datatype._largest_member(),
)
)
return "\n".join(result)
def _type_name(self):
if self.__union:
return "union"
return "struct"
def remove_from(self, name):
"""Remove all elements from a specific element
This function is useful to create a sub-set of a struct.
:param name: Name of element to remove and all after this element
:type name: str
"""
self._remove_from_or_to(name, to_criteria=False)
def remove_to(self, name):
"""Remove all elements from beginning to a specific element
This function is useful to create a sub-set of a struct.
:param name: Name of element to remove and all before element
:type name: str
"""
self._remove_from_or_to(name, to_criteria=True)
def _remove_from_or_to(self, name, to_criteria=True):
if name not in self.__fields:
raise Exception("Element {} does not exist".format(name))
# Invalidate the dtype cache
self.__dtype = None
keys = list(self.__fields)
if not to_criteria:
keys.reverse()
for key in keys:
del self.__fields[key]
if key == name:
break # Done
if len(self.__fields) > 0:
# Update offset of all elements
keys = list(self.__fields)
adjust_offset = self.__fields[keys[0]]["offset"]
for _, field in self.__fields.items():
field["offset"] -= adjust_offset
def _element_names(self):
"""Get a list of all element names (in correct order)
Note that this method also include elements of bitfields with same_level = True
:return: A list of all elements
:rtype: list
"""
result = []
for name, field in self.__fields.items():
if field["same_level"]:
for subname, parent_name in self.__fields_same_level.items():
if name == parent_name:
result.append(subname)
else:
result.append(name)
return result
def _element_type(self, name):
"""Returns the type of element.
Note that elements of bitfields with same_level = True will be returned as None.
:return: Type of element or None
:rtype: pycstruct class
"""
if name in self.__fields:
return self.__fields[name]["type"]
return None
def _element_offset(self, name):
"""Returns the offset of the element.
:return: Offset of element
:rtype: int
"""
if name in self.__fields:
return self.__fields[name]["offset"]
raise Exception("Invalid element {}".format(name))
def get_field_type(self, name):
"""Returns the type of a field of this struct.
:return: Type if the field
:rtype: _BaseDef
"""
return self._element_type(name)
def dtype(self):
"""Returns the dtype of this structure as defined by numpy.
This allows to use the pycstruct modelization together with numpy
to read C structures from buffers.
.. code-block:: python
color_t = StructDef()
color_t.add("uint8", "r")
color_t.add("uint8", "g")
color_t.add("uint8", "b")
color_t.add("uint8", "a")
raw = b"\x01\x02\x03\x00"
color = numpy.frombuffer(raw, dtype=color_t.dtype())
:return: a python dict representing a numpy dtype
:rtype: dict
"""
if self.__dtype is not None:
return self.__dtype
names = []
formats = []
offsets = []
for name in self._element_names():
if name.startswith("__pad"):
continue
if name not in self.__fields:
continue
datatype = self.__fields[name]["type"]
offset = self.__fields[name]["offset"]
dtype = datatype.dtype()
names.append(name)
formats.append(dtype)
offsets.append(offset)
dtype_def = {
"names": names,
"formats": formats,
"offsets": offsets,
"itemsize": self.size(),
}
self.__dtype = dtype_def
return dtype_def
###############################################################################
# BitfieldDef Class
class BitfieldDef(_BaseDef):
"""This class represents a bit field definition
The size of the bit field is 1, 2, 3, .., 8 bytes depending on the number of
elements added to the bit field. You can also force the bitfield size by
setting the size argument. When forcing the size larger bitfields than
8 bytes are allowed.
:param byteorder: Byte order of the bitfield. Valid values are 'native',
'little' and 'big'.
:type byteorder: str, optional
:param size: Force bitfield to be a certain size. By default it will expand
when new elements are added.
:type size: int, optional
"""
def __init__(self, byteorder="native", size=-1):
if byteorder not in _BYTEORDER:
raise Exception("Invalid byteorder: {0}.".format(byteorder))
if byteorder == "native":
byteorder = sys.byteorder
self.__byteorder = byteorder
self.__size = size
self.__fields = collections.OrderedDict()
def add(self, name, nbr_of_bits=1, signed=False):
"""Add a new element in the bitfield definition. The element will be added
directly after the previous element.
The size of the bitfield will expand when required, but adding more than
in total 64 bits (8 bytes) will generate an exception.
:param name: Name of element. Needs to be unique.
:type name: str
:param nbr_of_bits: Number of bits this element represents. Default is 1.
:type nbr_of_bits: int, optional
:param signed: Should the bit field be signed or not. Default is False.
:type signed: bool, optional"""
# Check for same bitfield name
if name in self.__fields:
raise Exception("Field with name {0} already exists.".format(name))
# Check that new size is not too large
assigned_bits = self.assigned_bits()
total_nbr_of_bits = assigned_bits + nbr_of_bits
if total_nbr_of_bits > self._max_bits():
raise Exception(
"Maximum number of bits ({}) exceeded: {}.".format(
self._max_bits(), total_nbr_of_bits
)
)
self.__fields[name] = {
"nbr_of_bits": nbr_of_bits,
"signed": signed,
"offset": assigned_bits,
}
def deserialize(self, buffer, offset=0):
"""Deserialize buffer into dictionary
:param buffer: Buffer that contains the data to deserialize (1 - 8 bytes)
:type buffer: bytearray
:param buffer_offset: Start address in buffer
:type buffer: int
:return: A dictionary keyed with the element names
:rtype: dict
"""
result = {}
if len(buffer) < self.size() + offset:
raise Exception(
"Invalid buffer size: {0}. Expected at least: {1}".format(
len(buffer), self.size()
)
)
for name in self._element_names():
result[name] = self._deserialize_element(name, buffer, buffer_offset=offset)
return result
def _deserialize_element(self, name, buffer, buffer_offset=0):
"""Deserialize one element from buffer
:param name: Name of element
:type data: str
:param buffer: Buffer that contains the data to deserialize data from (1 - 8 bytes).
:type buffer: bytearray
:param buffer_offset: Start address in buffer
:type buffer: int
:return: The value of the element
:rtype: int
"""
buffer = buffer[buffer_offset : buffer_offset + self.size()]
full_value = int.from_bytes(buffer, self.__byteorder, signed=False)
field = self.__fields[name]
return self._get_subvalue(
full_value, field["nbr_of_bits"], field["offset"], field["signed"]
)
def serialize(self, data, buffer=None, offset=0):
"""Serialize dictionary into buffer
:param data: A dictionary keyed with element names. Elements can be
omitted from the dictionary (defaults to value 0).
:type data: dict
:return: A buffer that contains data
:rtype: bytearray
"""
if buffer is None:
assert offset == 0, "When buffer is None, offset have to be unset"
buffer = bytearray(self.size())
else:
assert len(buffer) >= offset + self.size(), "Specified buffer too small"
for name in self._element_names():
if name in data:
self._serialize_element(name, data[name], buffer, buffer_offset=offset)
return buffer
def _serialize_element(self, name, value, buffer, buffer_offset=0):
"""Serialize one element into the buffer
:param name: Name of element
:type data: str
:param value: Value of element
:type data: int
:param buffer: Buffer that contains the data to serialize data into
(1 - 8 bytes). This is an output.
:type buffer: bytearray
:param buffer_offset: Start address in buffer
:type buffer: int
"""
full_value = int.from_bytes(
buffer[buffer_offset : buffer_offset + self.size()],
self.__byteorder,
signed=False,
)
field = self.__fields[name]
value = self._set_subvalue(
full_value, value, field["nbr_of_bits"], field["offset"], field["signed"]
)
buffer[buffer_offset : buffer_offset + self.size()] = value.to_bytes(
self.size(), self.__byteorder, signed=False
)
def instance(self, buffer=None, buffer_offset=0):
"""Create an instance of this bitfield.
This is an alternative of using dictionaries and the :meth:`BitfieldDef.serialize`/
:meth:`BitfieldDef.deserialize` methods for representing the data.
:param buffer: Byte buffer where data is stored. If no buffer is provided a new byte
buffer will be created and the instance will be 'empty'.
:type buffer: bytearray, optional
:param buffer_offset: Start offset in the buffer. This means that you
can have multiple Instances (or other data) that
shares the same buffer.
:type buffer_offset: int, optional
:return: A new Instance object
:rtype: :meth:`Instance`
"""
# I know. This is cyclic import of Instance, since instance depends
# on classes within this file. However, it should not be any problem
# since this file will be full imported once this method is called.
# pylint: disable=cyclic-import, import-outside-toplevel
from pycstruct.instance import Instance
return Instance(self, buffer, buffer_offset)
def assigned_bits(self):
"""Get size of bitfield in bits excluding padding bits
:return: Number of bits this bitfield represents excluding padding bits
:rtype: int
"""
total_nbr_of_bits = 0
for _, field in self.__fields.items():
total_nbr_of_bits += field["nbr_of_bits"]
return total_nbr_of_bits
def size(self):
"""Get size of bitfield in bytes
:return: Number of bytes this bitfield represents
:rtype: int
"""
if self.__size >= 0:
return self.__size # Force size
return int(math.ceil(self.assigned_bits() / 8.0))
def _max_bits(self):
if self.__size >= 0:
return self.__size * 8 # Force size
return 64
def _largest_member(self):
"""Used for struct padding
:return: Closest power of 2 value of size
:rtype: int
"""
return _round_pow_2(self.size())
def _get_subvalue(self, value, nbr_of_bits, start_bit, signed):
"""Get subvalue of value
:return: The subvalue
:rtype: int
"""
# pylint: disable=no-self-use
shifted_value = value >> start_bit
mask = 0xFFFFFFFFFFFFFFFF >> (64 - nbr_of_bits)
non_signed_value = shifted_value & mask
if not signed:
return non_signed_value
sign_bit = 0x1 << (nbr_of_bits - 1)
if non_signed_value & sign_bit == 0:
# Value is positive
return non_signed_value
# Convert to negative value using Two's complement
signed_value = -1 * ((~non_signed_value & mask) + 1)
return signed_value
def _set_subvalue(self, value, subvalue, nbr_of_bits, start_bit, signed):
"""Set subvalue of value
:return: New value where subvalue is included
:rtype: int
"""
# pylint: disable=too-many-arguments,no-self-use
# Validate size according to nbr_of_bits
max_value = 2 ** nbr_of_bits - 1
min_value = 0
if signed:
max_value = 2 ** (nbr_of_bits - 1) - 1
min_value = -1 * (2 ** (nbr_of_bits - 1))
signed_str = "Unsigned"
if signed:
signed_str = "Signed"
if subvalue > max_value:
raise Exception(
"{0} value {1} is too large to fit in {2} bits. Max value is {3}.".format(
signed_str, subvalue, nbr_of_bits, max_value
)
)
if subvalue < min_value:
raise Exception(
"{0} value {1} is too small to fit in {2} bits. Min value is {3}.".format(
signed_str, subvalue, nbr_of_bits, min_value
)
)
if signed and subvalue < 0:
# Convert from negative value using Two's complement
sign_bit = 0x1 << (nbr_of_bits - 1)
subvalue = sign_bit | ~(-1 * subvalue - 1)
mask = 0xFFFFFFFFFFFFFFFF >> (64 - nbr_of_bits)
shifted_subvalue = (subvalue & mask) << start_bit
return value | shifted_subvalue
def create_empty_data(self):
"""Create an empty dictionary with all keys
:return: A dictionary keyed with the element names. Values are "empty" or 0.
:rtype: dict
"""
buffer = bytearray(self.size())
return self.deserialize(buffer)
def _type_name(self):
return "bitfield"
def __str__(self):
"""Create string representation
:return: A string illustrating all members
:rtype: string
"""
result = []
result.append(
"{:<30}{:<10}{:<10}{:<10}".format("Name", "Bits", "Offset", "Signed")
)
for name, field in self.__fields.items():
signed = "-"
if field["signed"]:
signed = "x"
result.append(
"{:<30}{:<10}{:<10}{:<10}".format(
name, field["nbr_of_bits"], field["offset"], signed
)
)
return "\n".join(result)
def _element_names(self):
"""Get a list of all element names (in correct order)
:return: A list of all elements
:rtype: list
"""
result = []
for name in self.__fields.keys():
result.append(name)
return result
###############################################################################
# EnumDef Class
class EnumDef(_BaseDef):
"""This class represents an enum definition
The size of the enum is 1, 2, 3, .., 8 bytes depending on the value of the
largest enum constant. You can also force the enum size by setting
the size argument.
:param byteorder: Byte order of the enum. Valid values are 'native',
'little' and 'big'.
:type byteorder: str, optional
:param size: Force enum to be a certain size. By default it will expand
when new elements are added.
:type size: int, optional
:param signed: True if enum is signed (may contain negative values)
:type signed: bool, optional
"""
def __init__(self, byteorder="native", size=-1, signed=False):
if byteorder not in _BYTEORDER:
raise Exception("Invalid byteorder: {0}.".format(byteorder))
if byteorder == "native":
byteorder = sys.byteorder
self.__byteorder = byteorder
self.__size = size
self.__signed = signed
self.__constants = collections.OrderedDict()
def add(self, name, value=None):
"""Add a new constant in the enum definition. Multiple constant might
be assigned to the same value.
The size of the enum will expand when required, but adding a value
requiring a size larger than 64 bits will generate an exception.
:param name: Name of constant. Needs to be unique.
:type name: str
:param value: Value of the constant. Automatically assigned to next
available value (0, 1, 2, ...) if not provided.
:type value: int, optional"""
# pylint: disable=bare-except
# Check for same bitfield name
if name in self.__constants:
raise Exception("Constant with name {0} already exists.".format(name))
# Automatically assigned to next available value
index = 0
while value is None:
try:
self.get_name(index)
index += 1
except:
value = index
# Secure that no negative number are added to signed enum
if not self.__signed and value < 0:
raise Exception(
"Negative value, {0}, not supported in unsigned enums.".format(value)
)
# Check that new size is not too large
if self._bit_length(value) > self._max_bits():
raise Exception(
"Maximum number of bits ({}) exceeded: {}.".format(
self._max_bits(), self._bit_length(value)
)
)
self.__constants[name] = value
def deserialize(self, buffer, offset=0):
"""Deserialize buffer into a string (constant name)
If no constant name is defined for the value following name will be returned::
__VALUE__<value>
Where <value> is the integer stored in the buffer.
:param buffer: Buffer that contains the data to deserialize (1 - 8 bytes)
:type buffer: bytearray
:return: The constant name (string)
:rtype: str
"""
# pylint: disable=bare-except
if len(buffer) < self.size() + offset:
raise Exception(
"Invalid buffer size: {0}. Expected: {1}".format(
len(buffer), self.size()
)
)
value = int.from_bytes(
buffer[offset : offset + self.size()],
self.__byteorder,
signed=self.__signed,
)
name = ""
try:
name = self.get_name(value)
except:
# No constant name exist, generate a new
name = "__VALUE__{}".format(value)
return name
def serialize(self, data, buffer=None, offset=0):
"""Serialize string (constant name) into buffer
:param data: A string representing the constant name.
:type data: str
:return: A buffer that contains data
:rtype: bytearray
"""
if buffer is None:
assert offset == 0, "When buffer is None, offset have to be unset"
value = self.get_value(data)
result = value.to_bytes(self.size(), self.__byteorder, signed=self.__signed)
if buffer is not None:
buffer[offset : offset + len(result)] = result
return buffer
return result
def size(self):
"""Get size of enum in bytes
:return: Number of bytes this enum represents
:rtype: int
"""
if self.__size >= 0:
return self.__size # Force size
max_length = 1 # To avoid 0 size
for _, value in self.__constants.items():
bit_length = self._bit_length(value)
if bit_length > max_length:
max_length = bit_length
return int(math.ceil(max_length / 8.0))
def _max_bits(self):
if self.__size >= 0:
return self.__size * 8 # Force size
return 64
def _largest_member(self):
"""Used for struct padding
:return: Closest power of 2 value of size
:rtype: int
"""
return _round_pow_2(self.size())
def get_name(self, value):
"""Get the name representation of the value
:return: The constant name
:rtype: str
"""
for constant, item_value in self.__constants.items():
if value == item_value:
return constant
raise Exception("Value {0} is not a valid value for this enum.".format(value))
def get_value(self, name):
"""Get the value representation of the name
:return: The value
:rtype: int
"""
if name not in self.__constants:
raise Exception("{0} is not a valid name in this enum".format(name))
return self.__constants[name]
def _type_name(self):
return "enum"
def _bit_length(self, value):
"""Get number of bits a value represents.
Works for negative values based on two's complement,
which is not considered in the python built in
bit_length method.
If enum is signed the extra sign bit is included
in the returned result.
"""
if value < 0:
value += 1 # Two's complement reverse
bit_length = value.bit_length()
if self.__signed:
bit_length += 1
return bit_length
def __str__(self):
"""Create string representation
:return: A string illustrating all constants
:rtype: string
"""
result = []
result.append("{:<30}{:<10}".format("Name", "Value"))
for name, value in self.__constants.items():
result.append("{:<30}{:<10}".format(name, value))
return "\n".join(result)
| 36.692205 | 95 | 0.544458 | 52,213 | 0.948017 | 0 | 0 | 1,129 | 0.020499 | 0 | 0 | 29,171 | 0.52965 |
df271127c105441127867b0671133734b58e3154 | 1,103 | py | Python | cosymlib/simulation/__init__.py | GrupEstructuraElectronicaSimetria/cosymlib | 04ac536c7c75b638e84079bacd0ece70226fdc03 | [
"MIT"
] | null | null | null | cosymlib/simulation/__init__.py | GrupEstructuraElectronicaSimetria/cosymlib | 04ac536c7c75b638e84079bacd0ece70226fdc03 | [
"MIT"
] | 8 | 2020-12-11T00:58:46.000Z | 2021-07-16T11:27:46.000Z | cosymlib/simulation/__init__.py | GrupEstructuraElectronicaSimetria/cosymlib | 04ac536c7c75b638e84079bacd0ece70226fdc03 | [
"MIT"
] | null | null | null | import huckelpy
from huckelpy import file_io
class ExtendedHuckel:
def __init__(self, geometry, charge=0):
self._EH = huckelpy.ExtendedHuckel(geometry.get_positions(), geometry.get_symbols(), charge=charge)
self._alpha_electrons = None
self._beta_electrons = None
self._total_electrons = self._EH.get_number_of_electrons()
def get_mo_coefficients(self):
return self._EH.get_eigenvectors()
def get_basis(self):
return self._EH.get_molecular_basis()
def get_mo_energies(self):
return self._EH.get_mo_energies()
def get_multiplicity(self):
return self._EH.get_multiplicity()
def get_alpha_electrons(self):
if self._alpha_electrons is None:
self._alpha_electrons = self._total_electrons // 2 + self.get_multiplicity() - 1
return self._alpha_electrons
def get_beta_electrons(self):
return self._total_electrons - self._alpha_electrons
def build_fchk_file(self, name):
txt_fchk = file_io.build_fchk(self._EH)
open(name + '.fchk', 'w').write(txt_fchk)
| 30.638889 | 107 | 0.696283 | 1,055 | 0.956482 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.009066 |
df2add4292563c4782461ccc1a9f4bd18f1826e0 | 293 | py | Python | ex012.py | sml07/Meus-Estudos-Python | 8f06ec8ad170674cd0cc5cf792b5647dbb894a1c | [
"MIT"
] | null | null | null | ex012.py | sml07/Meus-Estudos-Python | 8f06ec8ad170674cd0cc5cf792b5647dbb894a1c | [
"MIT"
] | null | null | null | ex012.py | sml07/Meus-Estudos-Python | 8f06ec8ad170674cd0cc5cf792b5647dbb894a1c | [
"MIT"
] | null | null | null | #Faça um algoritimo que leia o preço de um produto e mostre o seu novo preço com 5% de desconto.
price = float(input("Digite o preço do produto: "))
sale = price - (price * 0.05)
print("O valor bruto do produto é: {:.2f}R$.".format(price))
print("Com o desconto de 5%: {:.2f}R$".format(sale)) | 48.833333 | 96 | 0.682594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.674497 |
df2c143e3b261adc27b58c068491f0de1cd09b82 | 514 | py | Python | tests/r/test_bcdeter.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 199 | 2017-07-24T01:34:27.000Z | 2022-01-29T00:50:55.000Z | tests/r/test_bcdeter.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 46 | 2017-09-05T19:27:20.000Z | 2019-01-07T09:47:26.000Z | tests/r/test_bcdeter.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 45 | 2017-07-26T00:10:44.000Z | 2022-03-16T20:44:59.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.bcdeter import bcdeter
def test_bcdeter():
"""Test module bcdeter.py by downloading
bcdeter.csv and testing shape of
extracted data has 95 rows and 3 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = bcdeter(test_path)
try:
assert x_train.shape == (95, 3)
except:
shutil.rmtree(test_path)
raise()
| 21.416667 | 43 | 0.752918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.245136 |
df2cb555b3dc2db771abca035af0535436996ced | 47 | py | Python | multi_parser/shared/__init__.py | ilya-mezentsev/multi-parser | 2d418f38a102fdad826912d4335242a269a26602 | [
"MIT"
] | 14 | 2020-08-09T06:12:06.000Z | 2022-03-10T13:16:57.000Z | multi_parser/shared/__init__.py | ilya-mezentsev/multi-parser | 2d418f38a102fdad826912d4335242a269a26602 | [
"MIT"
] | 14 | 2020-08-05T06:18:30.000Z | 2021-12-13T21:19:38.000Z | example/store/serializers/__init__.py | defineimpossible/django-rest-batteries | d83dc67b6e91ae1a9c7625606a66b59d83936947 | [
"MIT"
] | null | null | null | from .request import *
from .response import *
| 15.666667 | 23 | 0.744681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
df2d3c177128f55f32693c0a0f67186e9894a707 | 4,900 | py | Python | wmf_embed/core/lang_embedding.py | shilad/wmf-embeddings | 353556dfbb36e1f70eec7e618cfd989d7ae08de5 | [
"Apache-2.0"
] | null | null | null | wmf_embed/core/lang_embedding.py | shilad/wmf-embeddings | 353556dfbb36e1f70eec7e618cfd989d7ae08de5 | [
"Apache-2.0"
] | null | null | null | wmf_embed/core/lang_embedding.py | shilad/wmf-embeddings | 353556dfbb36e1f70eec7e618cfd989d7ae08de5 | [
"Apache-2.0"
] | null | null | null | import logging
import math
import os.path
import re
import annoy
import numpy as np
from gensim.models import KeyedVectors
from gensim.utils import to_unicode
from smart_open import smart_open
from .utils import NP_FLOAT
def from_mikolov(lang, inpath, outpath):
if not os.path.isdir(outpath): os.makedirs(outpath)
enc = 'utf8'
words = []
vectors = []
with smart_open(inpath) as fin:
header = fin.readline()
while True:
line = fin.readline()
if line == b'':
break
parts = to_unicode(line.rstrip(), encoding=enc, errors='ignore').split(" ")
word, weights = parts[0], [np.float32(x) for x in parts[1:]]
words.append(words)
weights.append(np.array(weights))
with open(outpath + '/ids.txt', 'w', encoding='utf-8') as f:
for id in words:
f.write(lang + '.wikipedia:')
f.write(id)
f.write('\n')
with open(outpath + '/titles.csv', 'w', encoding='utf-8') as f:
pass
np.save(outpath + '/vectors.npy', np.array(vectors))
return LangEmbedding(lang, outpath)
MIN_SIZE = 10000
class LangEmbedding(object):
def __init__(self, lang, dir, titles=None, aligned=False, model_name=None):
logging.info('initializing embedding in %s for %s', dir, lang)
if model_name:
pass # use it
elif aligned:
model_name = 'vectors.aligned.npy'
else:
model_name = 'vectors.npy'
self.lang = lang
self.dir = dir
self.ids = []
self.titles = titles
path_id = os.path.join(dir, 'ids.txt')
with open(path_id, 'r', encoding='utf-8') as f:
for line in f:
self.ids.append(line.strip())
self.ids_to_index = {id: i for i, id in enumerate(self.ids)}
self.vector_path = os.path.join(dir, model_name)
self.embedding = np.load(self.vector_path)
self.embedding /= np.linalg.norm(self.embedding, axis=1)[:,None] # Unit vectors
N = self.embedding.shape[0]
pop_weight = max(1, math.log(N / MIN_SIZE))
self.pop = 1.0 * pop_weight / (np.arange(N) + 1)
self.index = None # fast knn index
def submatrix(self, ids):
weights = np.zeros(len(ids), dtype=NP_FLOAT)
sub = np.zeros((len(ids), self.dims()), dtype=NP_FLOAT)
for i, id in enumerate(ids):
if id in self.ids_to_index:
index = self.ids_to_index[id]
sub[i, :] = self.embedding[index, :]
weights[i] = self.pop[index]
return weights, sub
def indexes(self, ids):
return [self.ids_to_index[id] for id in ids ]
def dims(self):
return self.embedding.shape[1]
def nrows(self):
return self.embedding.shape[0]
def build_fast_knn(self):
index = annoy.AnnoyIndex(self.dims(), metric='angular')
ann_path = self.vector_path + '.ann'
if (os.path.isfile(ann_path)
and os.path.getmtime(ann_path) >= os.path.getmtime(self.vector_path)):
logging.info('loading accelerated knn tree from %s', ann_path)
index.load(ann_path)
self.index = index
return
logging.info('building accelerated knn tree')
# Build the approximate-nearest-neighbor index
for i in range(self.embedding.shape[0]):
index.add_item(i, self.embedding[i,:])
index.build(10)
index.save(self.vector_path + '.ann')
self.index = index
def popularity(self):
return self.pop
def map(self, basis):
self.embedding.dot(basis, out=self.embedding)
def neighbors(self, id, n=5, include_distances=False, use_indexes=True):
assert(self.index)
if id not in self.ids_to_index: return []
i = self.ids_to_index[id]
indexes, dists = self.index.get_nns_by_item(i, n, include_distances=True)
if use_indexes:
result = indexes
else:
result = [self.ids[j] for j in indexes]
if include_distances:
return list(zip(result, dists))
else:
return result
def dense_words(self):
word_to_sparse_index = {}
MATCH_WORD = re.compile(r'^.*?\.wikipedia:(.*)$').match
for (id, i) in self.ids_to_index.items():
m = MATCH_WORD(id)
if m:
word = m.group(1)
word.replace('_', ' ')
word_to_sparse_index[word] = i
sparse_indexes = np.sort(list(word_to_sparse_index.values()))
sparse_to_dense = { s : d for d, s in enumerate(sparse_indexes) }
word_matrix = self.embedding[sparse_indexes, :]
word2id = { w : sparse_to_dense[word_to_sparse_index[w]] for w in word_to_sparse_index }
return word2id, word_matrix | 31.612903 | 96 | 0.584898 | 3,732 | 0.761633 | 0 | 0 | 0 | 0 | 0 | 0 | 388 | 0.079184 |
df2e07d7991cf110f3461188f42f2cc6d3136eb8 | 13,819 | py | Python | test/util/test_function_factory.py | ediphy-dwild/gpytorch | 559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a | [
"MIT"
] | null | null | null | test/util/test_function_factory.py | ediphy-dwild/gpytorch | 559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a | [
"MIT"
] | null | null | null | test/util/test_function_factory.py | ediphy-dwild/gpytorch | 559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a | [
"MIT"
] | null | null | null | import math
import torch
import unittest
import gpytorch
import numpy as np
from torch.autograd import Variable
from gpytorch.utils import approx_equal, function_factory
from gpytorch.lazy import NonLazyVariable
_exact_gp_mll_class = function_factory.exact_gp_mll_factory()
class TestFunctionFactory(unittest.TestCase):
def test_forward_inv_mm(self):
for n_cols in [2, 3, 4]:
a = torch.Tensor([
[5, -3, 0],
[-3, 5, 0],
[0, 0, 2],
])
b = torch.randn(3, n_cols)
actual = a.inverse().mm(b)
a_var = Variable(a)
b_var = Variable(b)
out_var = gpytorch.inv_matmul(a_var, b_var)
res = out_var.data
self.assertLess(torch.norm(actual - res), 1e-4)
def test_backward_inv_mm(self):
for n_cols in [2, 3, 4]:
a = torch.Tensor([
[5, -3, 0],
[-3, 5, 0],
[0, 0, 2],
])
b = torch.ones(3, 3).fill_(2)
c = torch.randn(3, n_cols)
actual_a_grad = -torch.mm(
a.inverse().mul_(0.5).mm(torch.eye(3, n_cols)),
a.inverse().mul_(0.5).mm(c).t()
) * 2 * 2
actual_c_grad = (a.inverse() / 2).t().mm(torch.eye(3, n_cols)) * 2
a_var = Variable(a, requires_grad=True)
c_var = Variable(c, requires_grad=True)
out_var = a_var.mul(Variable(b))
out_var = gpytorch.inv_matmul(out_var, c_var)
out_var = out_var.mul(Variable(torch.eye(3, n_cols))).sum() * 2
out_var.backward()
a_res = a_var.grad.data
c_res = c_var.grad.data
self.assertLess(torch.norm(actual_a_grad - a_res), 1e-4)
self.assertLess(torch.norm(actual_c_grad - c_res), 1e-4)
def test_forward_inv_mv(self):
a = torch.Tensor([
[5, -3, 0],
[-3, 5, 0],
[0, 0, 2],
])
b = torch.randn(3)
actual = a.inverse().mv(b)
a_var = Variable(a)
b_var = Variable(b)
out_var = gpytorch.inv_matmul(a_var, b_var)
res = out_var.data
self.assertLess(torch.norm(actual - res), 1e-4)
def test_backward_inv_mv(self):
a = torch.Tensor([
[5, -3, 0],
[-3, 5, 0],
[0, 0, 2],
])
b = torch.ones(3, 3).fill_(2)
c = torch.randn(3)
actual_a_grad = -(
torch.ger(
a.inverse().mul_(0.5).mv(torch.ones(3)),
a.inverse().mul_(0.5).mv(c)
) * 2 * 2
)
actual_c_grad = (a.inverse() / 2).t().mv(torch.ones(3)) * 2
a_var = Variable(a, requires_grad=True)
c_var = Variable(c, requires_grad=True)
out_var = a_var.mul(Variable(b))
out_var = gpytorch.inv_matmul(out_var, c_var)
out_var = out_var.sum() * 2
out_var.backward()
a_res = a_var.grad.data
c_res = c_var.grad.data
self.assertLess(torch.norm(actual_a_grad - a_res), 1e-4)
self.assertLess(torch.norm(actual_c_grad - c_res), 1e-4)
def test_normal_gp_mll_forward(self):
covar = torch.Tensor([
[3, -1, 0],
[-1, 3, 0],
[0, 0, 3],
])
y = torch.randn(3)
actual = y.dot(covar.inverse().mv(y))
actual += math.log(np.linalg.det(covar.numpy()))
actual += math.log(2 * math.pi) * len(y)
actual *= -0.5
covarvar = Variable(covar)
yvar = Variable(y)
res = _exact_gp_mll_class()(covarvar, yvar)
for d in torch.abs(actual - res.data).div(res.data):
self.assertLess(d, 0.1)
def test_normal_gp_mll_backward(self):
covar = torch.Tensor([
[3, -1, 0],
[-1, 3, 0],
[0, 0, 3],
])
y = torch.randn(3)
covarvar = Variable(covar, requires_grad=True)
yvar = Variable(y, requires_grad=True)
actual_mat_grad = torch.ger(covar.inverse().mv(y), covar.inverse().mv(y))
actual_mat_grad -= covar.inverse()
actual_mat_grad *= 0.5
actual_mat_grad *= 3 # For grad output
actual_y_grad = -covar.inverse().mv(y)
actual_y_grad *= 3 # For grad output
covarvar = Variable(covar, requires_grad=True)
yvar = Variable(y, requires_grad=True)
with gpytorch.settings.num_trace_samples(1000):
output = _exact_gp_mll_class()(covarvar, yvar) * 3
output.backward()
self.assertLess(torch.norm(actual_mat_grad - covarvar.grad.data), 1e-1)
self.assertLess(torch.norm(actual_y_grad - yvar.grad.data), 1e-4)
with gpytorch.settings.num_trace_samples(0):
covarvar = Variable(covar, requires_grad=True)
yvar = Variable(y, requires_grad=True)
with gpytorch.settings.num_trace_samples(1000):
output = _exact_gp_mll_class()(covarvar, yvar) * 3
output.backward()
self.assertLess(torch.norm(actual_mat_grad - covarvar.grad.data), 1e-1)
self.assertLess(torch.norm(actual_y_grad - yvar.grad.data), 1e-4)
def test_normal_trace_log_det_quad_form_forward(self):
covar = torch.Tensor([
[3, -1, 0],
[-1, 3, 0],
[0, 0, 3],
])
mu_diffs = torch.Tensor([0, -1, 1])
chol_covar = torch.Tensor([
[1, -2, 0],
[0, 1, -2],
[0, 0, 1],
])
actual = mu_diffs.dot(covar.inverse().matmul(mu_diffs))
actual += math.log(np.linalg.det(covar.numpy()))
actual += (covar.inverse().matmul(chol_covar.t().matmul(chol_covar))).trace()
covarvar = Variable(covar)
chol_covarvar = Variable(chol_covar)
mu_diffsvar = Variable(mu_diffs)
res = gpytorch.trace_logdet_quad_form(mu_diffsvar, chol_covarvar, covarvar)
self.assertTrue((torch.abs(actual - res.data).div(res.data) < 0.1).all())
def test_normal_trace_log_det_quad_form_backward(self):
covar = Variable(torch.Tensor([
[3, -1, 0],
[-1, 3, 0],
[0, 0, 3],
]), requires_grad=True)
mu_diffs = Variable(torch.Tensor([0, -1, 1]), requires_grad=True)
chol_covar = Variable(torch.Tensor([
[1, -2, 0],
[0, 1, -2],
[0, 0, 1],
]), requires_grad=True)
actual = mu_diffs.dot(covar.inverse().matmul(mu_diffs))
actual += (covar.inverse().matmul(chol_covar.t().matmul(chol_covar))).trace()
actual.backward()
actual_covar_grad = covar.grad.data.clone() + covar.data.inverse()
actual_mu_diffs_grad = mu_diffs.grad.data.clone()
actual_chol_covar_grad = chol_covar.grad.data.clone()
covar = Variable(torch.Tensor([
[3, -1, 0],
[-1, 3, 0],
[0, 0, 3],
]), requires_grad=True)
mu_diffs = Variable(torch.Tensor([0, -1, 1]), requires_grad=True)
chol_covar = Variable(torch.Tensor([
[1, -2, 0],
[0, 1, -2],
[0, 0, 1],
]), requires_grad=True)
with gpytorch.settings.num_trace_samples(1000):
res = gpytorch.trace_logdet_quad_form(mu_diffs, chol_covar, covar)
res.backward()
res_covar_grad = covar.grad.data
res_mu_diffs_grad = mu_diffs.grad.data
res_chol_covar_grad = chol_covar.grad.data
self.assertLess(
torch.norm(actual_covar_grad - res_covar_grad),
1e-1,
)
self.assertLess(
torch.norm(actual_mu_diffs_grad - res_mu_diffs_grad),
1e-1,
)
self.assertLess(
torch.norm(actual_chol_covar_grad - res_chol_covar_grad),
1e-1,
)
def test_batch_trace_log_det_quad_form_forward(self):
covar = torch.Tensor([
[
[3, -1, 0],
[-1, 3, 0],
[0, 0, 3],
], [
[10, -2, 1],
[-2, 10, 0],
[1, 0, 10],
]
])
mu_diffs = torch.Tensor([
[0, -1, 1],
[1, 2, 3]
])
chol_covar = torch.Tensor([
[
[1, -2, 0],
[0, 1, -2],
[0, 0, 1],
], [
[2, -4, 0],
[0, 2, -4],
[0, 0, 2],
]
])
actual = mu_diffs[0].dot(covar[0].inverse().matmul(mu_diffs[0]))
actual += math.log(np.linalg.det(covar[0].numpy()))
actual += (
covar[0].inverse().matmul(chol_covar[0].t().matmul(chol_covar[0]))
).trace()
actual += mu_diffs[1].dot(covar[1].inverse().matmul(mu_diffs[1]))
actual += math.log(np.linalg.det(covar[1].numpy()))
actual += (
covar[1].inverse().matmul(chol_covar[1].t().matmul(chol_covar[1]))
).trace()
covarvar = Variable(covar)
chol_covarvar = Variable(chol_covar)
mu_diffsvar = Variable(mu_diffs)
res = gpytorch.trace_logdet_quad_form(mu_diffsvar, chol_covarvar, covarvar)
self.assertTrue((torch.abs(actual - res.data).div(res.data) < 0.1).all())
def test_batch_trace_log_det_quad_form_backward(self):
covar = Variable(torch.Tensor([
[
[3, -1, 0],
[-1, 3, 0],
[0, 0, 3],
], [
[10, -2, 1],
[-2, 10, 0],
[1, 0, 10],
]
]), requires_grad=True)
mu_diffs = Variable(torch.Tensor([
[0, -1, 1],
[1, 2, 3]
]), requires_grad=True)
chol_covar = Variable(torch.Tensor([
[
[1, -2, 0],
[0, 1, -2],
[0, 0, 1],
], [
[2, -4, 0],
[0, 2, -4],
[0, 0, 2],
]
]), requires_grad=True)
actual = mu_diffs[0].dot(covar[0].inverse().matmul(mu_diffs[0]))
actual += (
covar[0].inverse().matmul(chol_covar[0].t().matmul(chol_covar[0]))
).trace()
actual += mu_diffs[1].dot(covar[1].inverse().matmul(mu_diffs[1]))
actual += (
covar[1].inverse().matmul(chol_covar[1].t().matmul(chol_covar[1]))
).trace()
actual.backward()
actual_covar_grad = (
covar.grad.data.clone() +
torch.cat([
covar[0].data.inverse().unsqueeze(0),
covar[1].data.inverse().unsqueeze(0)]
)
)
actual_mu_diffs_grad = mu_diffs.grad.data.clone()
actual_chol_covar_grad = chol_covar.grad.data.clone()
covar.grad.data.fill_(0)
mu_diffs.grad.data.fill_(0)
chol_covar.grad.data.fill_(0)
with gpytorch.settings.num_trace_samples(1000):
res = gpytorch.trace_logdet_quad_form(mu_diffs, chol_covar, covar)
res.backward()
res_covar_grad = covar.grad.data
res_mu_diffs_grad = mu_diffs.grad.data
res_chol_covar_grad = chol_covar.grad.data
self.assertLess(torch.norm(actual_covar_grad - res_covar_grad), 1e-1)
self.assertLess(torch.norm(actual_mu_diffs_grad - res_mu_diffs_grad), 1e-1)
self.assertLess(torch.norm(actual_chol_covar_grad - res_chol_covar_grad), 1e-1)
def test_root_decomposition_forward(self):
a = torch.randn(5, 5)
a = torch.matmul(a, a.t())
a_lv = NonLazyVariable(Variable(a, requires_grad=True))
a_root = a_lv.root_decomposition()
self.assertLess(
torch.max(((a_root.matmul(a_root.transpose(-1, -2)).data - a)).abs()),
1e-2,
)
def test_root_decomposition_backward(self):
a = torch.Tensor([
[5.0212, 0.5504, -0.1810, 1.5414, 2.9611],
[0.5504, 2.8000, 1.9944, 0.6208, -0.8902],
[-0.1810, 1.9944, 3.0505, 1.0790, -1.1774],
[1.5414, 0.6208, 1.0790, 2.9430, 0.4170],
[2.9611, -0.8902, -1.1774, 0.4170, 3.3208],
])
a_var = Variable(a, requires_grad=True)
a_lv = NonLazyVariable(a_var)
a_root = a_lv.root_decomposition()
res = a_root.matmul(a_root.transpose(-1, -2))
res.trace().backward()
a_var_copy = Variable(a, requires_grad=True)
a_var_copy.trace().backward()
self.assertTrue(approx_equal(a_var.grad.data, a_var_copy.grad.data))
def test_root_decomposition_inv_forward(self):
a = torch.randn(5, 5)
a = torch.matmul(a, a.t())
a_lv = NonLazyVariable(Variable(a, requires_grad=True))
a_root = a_lv.root_inv_decomposition()
actual = a.inverse()
diff = (a_root.matmul(a_root.transpose(-1, -2)).data - actual).abs()
self.assertLess(torch.max(diff / actual), 1e-2)
def test_root_decomposition_inv_backward(self):
a = torch.Tensor([
[5.0212, 0.5504, -0.1810, 1.5414, 2.9611],
[0.5504, 2.8000, 1.9944, 0.6208, -0.8902],
[-0.1810, 1.9944, 3.0505, 1.0790, -1.1774],
[1.5414, 0.6208, 1.0790, 2.9430, 0.4170],
[2.9611, -0.8902, -1.1774, 0.4170, 3.3208],
])
a_var = Variable(a, requires_grad=True)
a_lv = NonLazyVariable(a_var)
a_root = a_lv.root_inv_decomposition()
res = a_root.matmul(a_root.transpose(-1, -2))
res.trace().backward()
a_var_copy = Variable(a, requires_grad=True)
a_var_copy.inverse().trace().backward()
self.assertTrue(approx_equal(a_var.grad.data, a_var_copy.grad.data))
if __name__ == '__main__':
unittest.main()
| 33.460048 | 87 | 0.530284 | 13,491 | 0.976265 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.003184 |
df2e0882b40078ee113337bd905da77e518c2210 | 5,016 | py | Python | tests/boardfarm_plugins/boardfarm_prplmesh/tests/ap_config_bss_tear_down.py | SWRT-dev/easymesh | 12d902edde77599e074c0535f7256499b08f7494 | [
"BSD-3-Clause",
"BSD-2-Clause-Patent",
"MIT"
] | null | null | null | tests/boardfarm_plugins/boardfarm_prplmesh/tests/ap_config_bss_tear_down.py | SWRT-dev/easymesh | 12d902edde77599e074c0535f7256499b08f7494 | [
"BSD-3-Clause",
"BSD-2-Clause-Patent",
"MIT"
] | null | null | null | tests/boardfarm_plugins/boardfarm_prplmesh/tests/ap_config_bss_tear_down.py | SWRT-dev/easymesh | 12d902edde77599e074c0535f7256499b08f7494 | [
"BSD-3-Clause",
"BSD-2-Clause-Patent",
"MIT"
] | null | null | null | # SPDX-License-Identifier: BSD-2-Clause-Patent
# SPDX-FileCopyrightText: 2020 the prplMesh contributors (see AUTHORS.md)
# This code is subject to the terms of the BSD+Patent license.
# See LICENSE file for more details.
from .prplmesh_base_test import PrplMeshBaseTest
from boardfarm.exceptions import SkipTest
from capi import tlv
class ApConfigBSSTeardown(PrplMeshBaseTest):
"""Check SSID is still available after being torn down
Devices used in test setup:
AP1 - Agent1 [DUT]
GW - Controller
"""
def runTest(self):
# Locate test participants
try:
agent = self.dev.DUT.agent_entity
controller = self.dev.lan.controller_entity
except AttributeError as ae:
raise SkipTest(ae)
self.dev.DUT.wired_sniffer.start(self.__class__.__name__ + "-" + self.dev.DUT.name)
# Configure the controller and send renew
self.device_reset_default()
controller.cmd_reply(
"DEV_SET_CONFIG,bss_info1,"
"{} 8x Boardfarm-Tests-24G-3 0x0020 0x0008 maprocks1 0 1".format(agent.mac))
controller.dev_send_1905(agent.mac,
self.ieee1905['eMessageType']
['AP_AUTOCONFIGURATION_RENEW_MESSAGE'],
tlv(self.ieee1905['eTlvType']['TLV_AL_MAC_ADDRESS'],
"{" + controller.mac + "}"),
tlv(self.ieee1905['eTlvType']['TLV_SUPPORTED_ROLE'],
"{" + f"""0x{self.ieee1905['tlvSupportedRole']
['eValue']['REGISTRAR']:02x}""" + "}"),
tlv(self.ieee1905['eTlvType']['TLV_SUPPORTED_FREQ_BAND'],
"{" + f"""0x{self.ieee1905['tlvSupportedFreqBand']
['eValue']['BAND_2_4G']:02x}""" + "}"))
# Wait until the connection map is updated:
self.check_log(controller,
rf"Setting node '{agent.radios[0].mac}' as active", timeout=10)
self.check_log(controller,
rf"Setting node '{agent.radios[1].mac}' as active", timeout=10)
self.check_log(agent.radios[0],
r"Autoconfiguration for ssid: Boardfarm-Tests-24G-3 .*"
r"fronthaul: true backhaul: false")
self.check_log(agent.radios[1], r".* tear down radio")
conn_map = controller.get_conn_map()
repeater1 = conn_map[agent.mac]
repeater1_wlan0 = repeater1.radios[agent.radios[0].mac]
for vap in repeater1_wlan0.vaps.values():
if vap.ssid not in ('Boardfarm-Tests-24G-3', 'N/A'):
self.fail('Wrong SSID: {vap.ssid} instead of Boardfarm-Tests-24G-3'.format(vap=vap))
repeater1_wlan2 = repeater1.radios[agent.radios[1].mac]
for vap in repeater1_wlan2.vaps.values():
if vap.ssid != 'N/A':
self.fail('Wrong SSID: {vap.ssid} instead torn down'.format(vap=vap))
self.checkpoint()
# SSIDs have been removed for the CTT Agent1's front radio
controller.cmd_reply(
"DEV_SET_CONFIG,bss_info1,{} 8x".format(agent.mac))
# Send renew message
controller.dev_send_1905(agent.mac,
self.ieee1905['eMessageType']
['AP_AUTOCONFIGURATION_RENEW_MESSAGE'],
tlv(self.ieee1905['eTlvType']['TLV_AL_MAC_ADDRESS'],
"{" + controller.mac + "}"),
tlv(self.ieee1905['eTlvType']['TLV_SUPPORTED_ROLE'],
"{" + f"""0x{self.ieee1905['tlvSupportedRole']
['eValue']['REGISTRAR']:02x}""" + "}"),
tlv(self.ieee1905['eTlvType']['TLV_SUPPORTED_FREQ_BAND'],
"{" + f"""0x{self.ieee1905['tlvSupportedFreqBand']
['eValue']['BAND_2_4G']:02x}""" + "}"))
self.check_log(controller,
rf"Setting node '{agent.radios[0].mac}' as active", timeout=10)
self.check_log(controller,
rf"Setting node '{agent.radios[1].mac}' as active", timeout=10)
self.check_log(agent.radios[0], r".* tear down radio")
conn_map = controller.get_conn_map()
repeater1 = conn_map[agent.mac]
repeater1_wlan0 = repeater1.radios[agent.radios[0].mac]
for vap in repeater1_wlan0.vaps.values():
if vap.ssid != 'N/A':
self.fail('Wrong SSID: {vap.ssid} instead torn down'.format(vap=vap))
repeater1_wlan2 = repeater1.radios[agent.radios[1].mac]
for vap in repeater1_wlan2.vaps.values():
if vap.ssid != 'N/A':
self.fail('Wrong SSID: {vap.ssid} instead torn down'.format(vap=vap))
| 50.16 | 100 | 0.547049 | 4,679 | 0.932815 | 0 | 0 | 0 | 0 | 0 | 0 | 2,014 | 0.401515 |
df2f4471a03807c5058e593384972868df67d33c | 709 | py | Python | taller_estructuras_de_control/codigo_python_ejercicios/ejercicio_9.py | JMosqueraM/algoritmos_y_programacion | 30dc179b976f1db24401b110496250fbcb98938e | [
"MIT"
] | null | null | null | taller_estructuras_de_control/codigo_python_ejercicios/ejercicio_9.py | JMosqueraM/algoritmos_y_programacion | 30dc179b976f1db24401b110496250fbcb98938e | [
"MIT"
] | null | null | null | taller_estructuras_de_control/codigo_python_ejercicios/ejercicio_9.py | JMosqueraM/algoritmos_y_programacion | 30dc179b976f1db24401b110496250fbcb98938e | [
"MIT"
] | null | null | null | #Calcular el salario neto de un tnrabajador en fucion del numero de horas trabajadas, el precio de la hora
#y el descuento fijo al sueldo base por concepto de impuestos del 20%
horas = float(input("Ingrese el numero de horas trabajadas: "))
precio_hora = float(input("Ingrese el precio por hora trabajada: "))
sueldo_base = float(input("Ingrese el valor del sueldo base: "))
pago_hora = horas * precio_hora
impuesto = 0.2
salario_neto = pago_hora + (sueldo_base * 0.8)
print(f"Si el trabajador tiene un sueldo base de {sueldo_base}$ (al cual se le descuenta un 20% por impuestos), trabaja {horas} horas, y la hora se le paga a {precio_hora}$.")
print(f"El salario neto del trabajador es de {salario_neto}$") | 59.083333 | 175 | 0.753173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 515 | 0.726375 |
df321ec897b131393422f40ec400fe5025edb45a | 3,189 | py | Python | cs15211/BattleshipsInABoard.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2021-07-05T01:53:30.000Z | 2021-07-05T01:53:30.000Z | cs15211/BattleshipsInABoard.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | null | null | null | cs15211/BattleshipsInABoard.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2018-01-08T07:14:08.000Z | 2018-01-08T07:14:08.000Z | __source__ = 'https://github.com/kamyu104/LeetCode/blob/master/Python/battleships-in-a-board.py'
# Time: O(m * n)
# Space: O(1)
#
#
# Description: 419. Battleships in a Board
#
# Given an 2D board, count how many different battleships are in it.
# The battleships are represented with 'X's, empty slots are represented with '.'s.
# You may assume the following rules:
#
# You receive a valid board, made of only battleships or empty slots.
# Battleships can only be placed horizontally or vertically. In other words,
# they can only be made of the shape 1xN (1 row, N columns) or Nx1 (N rows, 1 column),
# where N can be of any size.
# At least one horizontal or vertical cell separates between two battleships -
# there are no adjacent battleships.
#
# Example:
# X..X
# ...X
# ...X
# In the above board there are 2 battleships.
# Invalid Example:
# ...X
# XXXX
# ...X
# This is not a valid board - as battleships will always have a cell separating between them.
# Your algorithm should not modify the value of the board.
# Follow up:
# Could you do it in one-pass, using only O(1) extra memory and without modifying the value of the board?
#
# Hide Company Tags Microsoft
import unittest
# 32ms 94.99%
class Solution(object):
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
"""
if not board or not board[0]:
return 0
cnt = 0
for i in xrange(len(board)):
for j in xrange(len(board[0])):
cnt += int(board[i][j] == 'X' and \
(i == 0 or board[i - 1][j] != 'X') and \
(j == 0 or board[i][j - 1] != 'X'))
return cnt
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought:
Going over all cells, we can count only those that are the "first" cell of the battleship.
First cell will be defined as the most top-left cell. We can check for first cells by only counting cells
that do not have an 'X' to the left and do not have an 'X' above them.
#2ms 100%
class Solution {
public int countBattleships(char[][] board) {
if (board == null || board.length == 0) return 0;
int m = board.length, n = board[0].length;
int count = 0;
for (int i = 0; i < m ;i++) {
for (int j = 0; j < n; j++) {
if (board[i][j] == '.') continue;
if (i > 0 && board[i-1][j] == 'X') continue;
if (j > 0 && board[i][j-1] == 'X') continue;
count++;
}
}
return count;
}
public int countBattleships2(char[][] board) {
if (board == null || board.length == 0 || board[0].length == 0) return 0;
int R = board.length, C = board[0].length, cnt = 0;
for (int i = 0; i < R; i++) {
for (int j = 0; j < C; j++) {
if (board[i][j] == 'X'
&& (i == 0 || board[i - 1][j] == '.')
&& (j == 0 || board[i][j - 1] == '.'))
cnt++;
}
}
return cnt;
}
}
''' | 31.574257 | 105 | 0.558169 | 588 | 0.184384 | 0 | 0 | 0 | 0 | 0 | 0 | 2,568 | 0.805268 |
df32fca4523247442d3692d3063e54af001152c8 | 8,823 | py | Python | catch_video.py | ZXin0305/hri | b91d89158fc2d05ca4d3ea3ba4a7b9f69b0221a2 | [
"Apache-2.0"
] | null | null | null | catch_video.py | ZXin0305/hri | b91d89158fc2d05ca4d3ea3ba4a7b9f69b0221a2 | [
"Apache-2.0"
] | null | null | null | catch_video.py | ZXin0305/hri | b91d89158fc2d05ca4d3ea3ba4a7b9f69b0221a2 | [
"Apache-2.0"
] | null | null | null | import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import message_filters
import cv2
import torch
import torchvision.transforms as transforms
from exps.stage3_root2.config import cfg
# from demo import process_video
from model.main_model.smap import SMAP
from model.refine_model.refinenet import RefineNet
import argparse
import dapalib
import numpy as np
from lib.utils.tools import *
from exps.stage3_root2.test_util import *
from path import Path
class CameraTopic(object):
def __init__(self, topic_name, cfg, main_model, refine_model, device):
# rospy.init_node('Image_sub', anonymous=True)
self.main_model = main_model
self.refine_model = refine_model
self.cfg = cfg
self.device = device
self.image = None
self.cv_bridge = CvBridge()
self.topic = topic_name
self.image_sub = rospy.Subscriber(self.topic, Image, self.callback)
self.video_path = '/home/xuchengjun/Videos/output6.mp4'
self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.out = cv2.VideoWriter(self.video_path, self.fourcc, 20.0, (1920, 1080))
self.net_input_shape = (cfg.dataset.INPUT_SHAPE[1], cfg.dataset.INPUT_SHAPE[0])
normalize = transforms.Normalize(mean=cfg.INPUT.MEANS, std=cfg.INPUT.STDS)
transform = transforms.Compose([transforms.ToTensor(), normalize])
self.transform = transform
cam_data = read_json('/home/xuchengjun/ZXin/smap/cam_data/cam.json')
self.K = np.array(cam_data['K'])
#
self.main_model.eval()
if self.refine_model is not None:
self.refine_model.eval()
def callback(self, msg):
self.image = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8")
if self.image is not None:
net_input_image, scales = self.aug_croppad(self.image)
scales['K'] = self.K
scales['f_x'] = self.K[0,0]
scales['f_y'] = self.K[1,1]
scales['cx'] = self.K[0,2]
scales['cy'] = self.K[1,2]
net_input_image = self.transform(net_input_image)
net_input_image = net_input_image.unsqueeze(0)
net_input_image = net_input_image.to(self.device)
with torch.no_grad():
outputs_2d, outputs_3d, outputs_rd = self.main_model(net_input_image)
outputs_3d = outputs_3d.cpu()
outputs_rd = outputs_rd.cpu()
hmsIn = outputs_2d[0]
hmsIn[:cfg.DATASET.KEYPOINT.NUM] /= 255
hmsIn[cfg.DATASET.KEYPOINT.NUM:] /= 127
rDepth = outputs_rd[0][0]
pred_bodys_2d = dapalib.connect(hmsIn, rDepth, cfg.DATASET.ROOT_IDX, distFlag=True)
if len(pred_bodys_2d) > 0:
print('working ..')
pred_bodys_2d[:, :, :2] *= cfg.dataset.STRIDE # resize poses to the input-net shape
pred_bodys_2d = pred_bodys_2d.numpy()
# ori_resoulution_bodys = recover_origin_resolution(pred_bodys_2d, scales['scale'])
# draw_lines(self.image, pred_bodys_2d, cfg.SHOW.BODY_EADGES, (255,0,0))
K = scales['K']
pafs_3d = outputs_3d[0].numpy().transpose(1, 2, 0) #part relative depth map (c,h,w) --> (h,w,c) --> (128, 208)
root_d = outputs_rd[0][0].numpy() # --> (128, 208)
# upsample the outputs' shape to obtain more accurate results
# --> (256, 456)
paf_3d_upsamp = cv2.resize(
pafs_3d, (cfg.INPUT_SHAPE[1], cfg.INPUT_SHAPE[0]), interpolation=cv2.INTER_NEAREST) # (256,456,14)
root_d_upsamp = cv2.resize(
root_d, (cfg.INPUT_SHAPE[1], cfg.INPUT_SHAPE[0]), interpolation=cv2.INTER_NEAREST) # (256,456)
pred_rdepths = generate_relZ(pred_bodys_2d, paf_3d_upsamp, root_d_upsamp, scales) #
pred_bodys_3d = gen_3d_pose(pred_bodys_2d, pred_rdepths, scales, scales['pad_value'])
# """
# refine
# """
# # new_pred_bodys_3d --> numpy()
if self.refine_model is not None:
new_pred_bodys_3d = lift_and_refine_3d_pose(pred_bodys_2d, pred_bodys_3d, self.refine_model,
device=self.device, root_n=cfg.DATASET.ROOT_IDX)
else:
new_pred_bodys_3d = pred_bodys_3d # shape-->(pnum,15,4)
print(new_pred_bodys_3d[:,2,2])
if refine_model is not None:
refine_pred_2d = project_to_pixel(new_pred_bodys_3d, K)
draw_lines(self.image, refine_pred_2d, cfg.SHOW.BODY_EADGES, color=(0,0,255))
draw_cicles(refine_pred_2d, self.image)
else:
refine_pred_2d = project_to_pixel(new_pred_bodys_3d, K)
draw_lines(self.image, refine_pred_2d, cfg.SHOW.BODY_EADGES, color=(0,0,255))
draw_cicles(refine_pred_2d, self.image)
cv2.imwrite('./results/img.jpg', self.image)
self.out.write(self.image)
# cv2.imshow('img', self.image)
# cv2.waitKey(33)
else:
raise StopIteration
def aug_croppad(self, img):
scale = dict() #创建字典
crop_x = self.net_input_shape[0] # width 自己设定的
crop_y = self.net_input_shape[1] # height 512
scale['scale'] = min(crop_x / img.shape[1], crop_y / img.shape[0]) #返回的是最小值
img_scale = cv2.resize(img, (0, 0), fx=scale['scale'], fy=scale['scale'])
scale['img_width'] = img.shape[1]
scale['img_height'] = img.shape[0]
scale['net_width'] = crop_x
scale['net_height'] = crop_y
pad_value = [0,0] # left,up
center = np.array([img.shape[1]//2, img.shape[0]//2], dtype=np.int)
if img_scale.shape[1] < crop_x: # pad left and right
margin_l = (crop_x - img_scale.shape[1]) // 2
margin_r = crop_x - img_scale.shape[1] - margin_l
pad_l = np.ones((img_scale.shape[0], margin_l, 3), dtype=np.uint8) * 128
pad_r = np.ones((img_scale.shape[0], margin_r, 3), dtype=np.uint8) * 128
pad_value[0] = margin_l
img_scale = np.concatenate((pad_l, img_scale, pad_r), axis=1) #在1维进行拼接 也就是w
elif img_scale.shape[0] < crop_y: # pad up and down
margin_u = (crop_y - img_scale.shape[0]) // 2
margin_d = crop_y - img_scale.shape[0] - margin_u
pad_u = np.ones((margin_u, img_scale.shape[1], 3), dtype=np.uint8) * 128
pad_d = np.ones((margin_d, img_scale.shape[1], 3), dtype=np.uint8) * 128
pad_value[1] = margin_u
img_scale = np.concatenate((pad_u, img_scale, pad_d), axis=0) #在0维进行拼接 也就是h
scale['pad_value'] = pad_value
return img_scale, scale
if __name__ == '__main__':
rospy.init_node('Image_sub', anonymous=True)
parser = argparse.ArgumentParser()
# /home/xuchengjun/ZXin/human_pose/pretrained/SMAP_model.pth
# /media/xuchengjun/zx/human_pose/pth/main/12.16/train.pth
parser.add_argument('--SMAP_path', type=str,
default='/media/xuchengjun/zx/human_pose/pth/main/12.16/train.pth')
# /home/xuchengjun/ZXin/human_pose/pretrained/RefineNet.pth
# /media/xuchengjun/zx/human_pose/pth/main/12.8/RefineNet_epoch_300.pth
parser.add_argument('--RefineNet_path', type=str,
default='/media/xuchengjun/zx/human_pose/pth/main/12.8/RefineNet_epoch_300.pth')
parser.add_argument('--device', default='cuda:0')
args = parser.parse_args()
device = torch.device(args.device)
# main model
model = SMAP(cfg, run_efficient=cfg.RUN_EFFICIENT)
model.to(device)
# refine model
refine_model = RefineNet()
refine_model.to(device)
smap_model_path = args.SMAP_path
refine_model_path = args.RefineNet_path
# smap
state_dict = torch.load(smap_model_path, map_location=torch.device('cpu'))
state_dict = state_dict['model']
model.load_state_dict(state_dict)
if Path(refine_model_path).exists():
print('using refine net..')
refine_state_dict = torch.load(refine_model_path)
refine_model.load_state_dict(refine_state_dict)
else:
refine_model = None
cam = CameraTopic('/kinect2_1/hd/image_color', cfg, model, refine_model, device)
rospy.spin()
| 44.560606 | 131 | 0.591976 | 6,816 | 0.765757 | 0 | 0 | 0 | 0 | 0 | 0 | 1,539 | 0.172902 |
df33b007b3652a76cfc302baf9ffe1c7ec0e5ef1 | 3,176 | py | Python | bouncer.py | pard68/epub-bouncer | 6a4e09e24816ac4fc25450148665901db9166202 | [
"MIT"
] | null | null | null | bouncer.py | pard68/epub-bouncer | 6a4e09e24816ac4fc25450148665901db9166202 | [
"MIT"
] | null | null | null | bouncer.py | pard68/epub-bouncer | 6a4e09e24816ac4fc25450148665901db9166202 | [
"MIT"
] | null | null | null | from typing import Dict
import correct_spellings
import epub_handling
import xml_handling
import argparse
import string
import re
# ---------------------------------------------------------------------------------------------------
def correct_file_contents(corrections : Dict[str, str], file_contents : str):
file_contents_corrected = file_contents
for orig, corr in corrections.items():
# Replace (orig, corr) with (orig, corr), (Orig, Corr) and (ORIG, CORR)
file_contents_corrected = file_contents.replace(orig,corr)
file_contents_corrected = file_contents.replace(string.capwords(orig),string.capwords(corr))
file_contents_corrected = file_contents.replace(orig.upper(),corr.upper())
# Catch-all for MiXeD case words
file_contents_corrected = re.sub(orig, corr.upper(), file_contents_corrected, re.IGNORECASE)
return file_contents_corrected
def apply_corrections(corrections, text_files):
for text_file in text_files:
file_contents = ''
with open(text_file, 'r') as file:
file_contents = file.read()
file_contents_with_corrections = correct_file_contents(corrections, file_contents)
with open(text_file, 'w') as file:
file.write(file_contents_with_corrections)
def main():
# Read arguments
parser = argparse.ArgumentParser()
parser.add_argument('--epub-name', type=str, default='book', metavar='N',
help='the file name of the epub to be corrected')
parser.add_argument('--dict-lang', type=str, default='en_US', metavar='N',
help='the language code of the dictionary to use')
parser.add_argument('--temp-folder', type=str, default='temp', metavar='N',
help='name of the temporary folder created')
args = parser.parse_args()
# set path
path = args.epub_name + '.epub'
path_corrected = args.epub_name + '_corrected.epub'
if args.dict_lang != 'en_US':
print(f'Bouncer currently only supports the en_US dictionary')
quit()
epub_handling.extract_from_epub_file(path, args.temp_folder)
print('Extracted the ePub files')
opt_contents_path = xml_handling.get_contents_path_from_container_file(args.temp_folder)
text_files = xml_handling.get_text_file_paths_from_contents_file(opt_contents_path, args.temp_folder)
# Read dict of unique words (with occurance count) from text files
unique_words = xml_handling.unique_words_from_text_files(text_files)
print('Read all words from ePub')
# Get dict of corrections [original,correction] to be applied to all text_files
corrections = correct_spellings.corrections_for_words(unique_words, args.dict_lang)
print(f'Applying corrections to ePub extracted files')
# Apply corrections
apply_corrections(corrections, text_files)
print(f'Applied corrections, writing ePub extracted files back to .ePub file')
epub_handling.write_epub_file(path_corrected, args.temp_folder)
print('Wrote corrected ePub!')
if __name__ == '__main__':
main() | 39.209877 | 105 | 0.675378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 885 | 0.278652 |
df33dc1de2ad7edcc4ac043bb10df37be20e5ae7 | 2,119 | py | Python | detect.py | MahmudulAlam/Object-Detection-Using-GPM | 52662eb9f8bf98fc8b2282bb8edd409a49e99bf9 | [
"MIT"
] | 1 | 2022-02-15T21:18:18.000Z | 2022-02-15T21:18:18.000Z | detect.py | MahmudulAlam/Object-Detection-Using-GPM | 52662eb9f8bf98fc8b2282bb8edd409a49e99bf9 | [
"MIT"
] | 2 | 2022-01-14T16:09:36.000Z | 2022-03-07T14:54:36.000Z | detect.py | MahmudulAlam/Object-Detection-Using-GPM | 52662eb9f8bf98fc8b2282bb8edd409a49e99bf9 | [
"MIT"
] | null | null | null | import cv2
import pickle
import numpy as np
from flag import Flag
flag = Flag()
with open('assets/colors.h5', 'rb') as f:
colors = pickle.loads(f.read())
with open('label.txt', 'r') as f:
classes = f.readlines()
def detector(image, label):
image = np.asarray(image * 255., np.uint8)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
indices = np.squeeze(np.max(np.max(label, axis=0, keepdims=True), axis=1, keepdims=True))
indices = np.where(indices > 0.5)[0]
for i in indices:
output = np.asarray(label[:, :, i], dtype=np.float)
output[output > flag.threshold] = 255.
output[output <= flag.threshold] = 0.
output = np.asarray(output, dtype=np.uint8)
kernel = np.ones((2, 2), np.float32) / 4
output = cv2.filter2D(output, -1, kernel)
# cv2.imshow('out', cv2.resize(output, (256, 256)))
# cv2.waitKey(0)
_, contours, _ = cv2.findContours(output, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_L1)
for contour in contours:
# print(contour)
col_wise = contour[:, :, 0]
row_wise = contour[:, :, 1]
x1 = min(col_wise)[0] / flag.y_size * flag.x_size
y1 = min(row_wise)[0] / flag.y_size * flag.x_size
x2 = max(col_wise)[0] / flag.y_size * flag.x_size
y2 = max(row_wise)[0] / flag.y_size * flag.x_size
# print(x1, y1, x2, y2)
c = colors[i]
image = cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), (int(c[0]), int(c[1]), int(c[2])), 2)
# print('class =', classes[i-1])
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image, classes[i - 1][:-1], (int(x1), int(y1)), font, .8, (int(c[0]), int(c[1]), int(c[2])), 2,
cv2.LINE_AA)
return image
if __name__ == '__main__':
flag = Flag()
images = np.load('dataset/valid_x.npy')
labels = np.load('dataset/valid_y.npy')
# print(images.shape)
image = images[100]
label = labels[100]
image = detector(image, label)
cv2.imshow('image', image)
cv2.waitKey(0)
| 34.737705 | 119 | 0.568193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.119868 |
df340b5a3a3411a900f306d493ae8fc3d70d7cb5 | 760 | py | Python | instabot/liking.py | jakerobinson19/instabot | a5a2d23cc3791d79a2cfeb469144eb92c24e402c | [
"MIT"
] | 1 | 2020-03-03T03:54:56.000Z | 2020-03-03T03:54:56.000Z | instabot/liking.py | jakerobinson19/instabot | a5a2d23cc3791d79a2cfeb469144eb92c24e402c | [
"MIT"
] | null | null | null | instabot/liking.py | jakerobinson19/instabot | a5a2d23cc3791d79a2cfeb469144eb92c24e402c | [
"MIT"
] | null | null | null | import retrieve
import validation
from time_functions import time_delay
from selenium.webdriver import ActionChains
def like_pic(browser):
heart = retrieve.like_button(browser)
time_delay()
if validation.already_liked(heart):
heart.click()
def like_pic_in_feed(browser, number = 1):
loop = 1
while loop <= number:
hearts = retrieve.feed_like_buttons(browser)
for h in range(len(hearts)):
#print('liking the pic {}'.format(str(self.loop + 1)))
time_delay()
if validation.already_liked(hearts[h]):
actions = ActionChains(browser)
actions.move_to_element(hearts[h])
actions.click(hearts[h])
actions.perform()
loop = loop + 1
if loop > number:
break
| 24.516129 | 60 | 0.663158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.071053 |
df342c2f47ed154f18acbafb3221d18351eb0056 | 1,261 | py | Python | tollan/utils/qt/colors.py | toltec-astro/tollan | 36a78224ceef4145be1c5acca734b5c317eb7ba8 | [
"BSD-3-Clause"
] | null | null | null | tollan/utils/qt/colors.py | toltec-astro/tollan | 36a78224ceef4145be1c5acca734b5c317eb7ba8 | [
"BSD-3-Clause"
] | null | null | null | tollan/utils/qt/colors.py | toltec-astro/tollan | 36a78224ceef4145be1c5acca734b5c317eb7ba8 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
import matplotlib.colors as mc
import numpy as np
import re
class Palette(object):
black = "#000000"
white = "#ffffff"
blue = "#73cef4"
green = "#bdffbf"
orange = "#ffa500"
purple = "#af00ff"
red = "#ff6666"
yellow = "#ffffa0"
@staticmethod
def _is_hex(c):
m = re.match("^#(?:[0-9a-fA-F]{3}){1,2}$", c)
return m is not None
@classmethod
def _color(self, c):
if isinstance(c, str):
if self._is_hex(c):
return c
if hasattr(self, c) and self._is_hex(getattr(self, c)):
return getattr(self, c)
if isinstance(c, np.ndarray) and len(c) in (3, 4):
if c.dtype in (np.float_, np.double):
return c
else:
return c / 255.
raise ValueError(f"unknown color {c}")
def rgb(self, c):
return np.array(mc.to_rgb(self._color(c)))
def irgb(self, c):
return (self.rgb(c) * 255.).astype(np.uint8)
def hex(self, c):
return mc.to_hex(self._color(c))
def blend(self, c1, c2, a, fmt='hex'):
c1 = self.rgb(c1)
c2 = self.rgb(c2)
c = c1 * (1. - a) + c2 * a
return getattr(self, fmt)(c)
| 24.25 | 67 | 0.516257 | 1,174 | 0.931007 | 0 | 0 | 580 | 0.459952 | 0 | 0 | 147 | 0.116574 |
df3491f1b4f4fb22d702912bfddd659bd45a728d | 8,941 | py | Python | mom/itertools.py | ands904/ands904-tinypyclone | 6a5427940a03058abf08de2a20aa45a929fd21ad | [
"ECL-2.0",
"Apache-2.0"
] | 16 | 2015-01-06T19:21:19.000Z | 2021-11-17T16:57:26.000Z | mom/itertools.py | ands904/ands904-tinypyclone | 6a5427940a03058abf08de2a20aa45a929fd21ad | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-04-15T11:18:32.000Z | 2019-04-15T11:18:32.000Z | mom/itertools.py | ands904/ands904-tinypyclone | 6a5427940a03058abf08de2a20aa45a929fd21ad | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2015-06-17T04:15:35.000Z | 2019-04-15T11:16:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""":synopsis: Implements :mod:`itertools` for older versions of Python.
:module: mom.itertools
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, PSF
Borrowed from brownie.itools.
"""
from __future__ import absolute_import
import itertools
from mom import builtins
try:
# Python 2.x
from itertools import izip
except ImportError:
# Python 3.x
izip = zip
__author__ = "yesudeep@google.com (Yesudeep Mangalapilly)"
__all__ = [
"chain",
"combinations_with_replacement",
"compress",
"count",
"flatten",
"grouped",
"izip_longest",
"permutations",
"product",
"starmap",
"unique",
]
if getattr(itertools.chain, "from_iterable", None):
chain = itertools.chain
else:
class chain(object):
"""An iterator which yields elements from the given `iterables` until each
iterable is exhausted.
.. versionadded:: 0.2
"""
@classmethod
def from_iterable(cls, iterable):
"""Alternative constructor which takes an `iterable` yielding iterators,
this can be used to chain an infinite number of iterators.
"""
rv = object.__new__(cls)
rv._init(iterable)
return rv
def __init__(self, *iterables):
self._init(iterables)
def _init(self, iterables):
self.iterables = iter(iterables)
self.current_iterable = iter([])
def __iter__(self):
return self
def next(self):
try:
return self.current_iterable.next()
except StopIteration:
self.current_iterable = iter(self.iterables.next())
return self.current_iterable.next()
def izip_longest(*iterables, **kwargs):
"""Make an iterator that aggregates elements from each of the iterables. If
the iterables are of uneven length, missing values are filled-in with
`fillvalue`. Iteration continues until the longest iterable is exhausted.
If one of the iterables is potentially infinite, then the
:func:`izip_longest` function should be wrapped with something that limits
the number of calls (for example :func:`itertools.islice` or
:func:`itertools.takewhile`.) If not specified, `fillvalue` defaults to
``None``.
.. note:: Software and documentation for this function are taken from
CPython, :ref:`license details <psf-license>`.
"""
fillvalue = kwargs.get("fillvalue")
def sentinel(counter=([fillvalue] * (len(iterables) - 1)).pop):
yield counter()
fillers = itertools.repeat(fillvalue)
iters = [chain(it, sentinel(), fillers) for it in iterables]
try:
for tup in izip(*iters):
yield tup
except IndexError:
pass
def permutations(iterable, r=None):
"""Return successive `r` length permutations of elements in the `iterable`.
If `r` is not specified or is ``None``, then `r` defaults to the length of
the `iterable` and all possible full-length permutations are generated.
Permutations are emitted in lexicographic sort order. So, if the input
`iterable` is sorted, the permutation tuples will be produced in sorted
order.
Elements are treated as unique based on their position, not on their
value. So if the input elements are unique, there will be no repeating
value in each permutation.
The number of items returned is ``n! / (n - r)!`` when ``0 <= r <= n`` or
zero when `r > n`.
.. note:: Software and documentation for this function are taken from
CPython, :ref:`license details <psf-license>`.
"""
pool = tuple(iterable)
pool_length = len(pool)
r = pool_length if r is None else r
for indices in product(builtins.range(pool_length), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
def product(*iterables, **kwargs):
"""Cartesian product of input iterables.
Equivalent to nested for-loops in a generator expression. For example,
``product(A, B)`` returns the same as ``((x, y) for x in A for y in B)``.
The nested loops cycle like an odometer with the rightmost element
advancing on every iteration. The pattern creates a lexicographic ordering
so that if the input's iterables are sorted, the product tuples are emitted
in sorted order.
To compute the product of an iterable with itself, specify the number of
repetitions with the optional `repeat` keyword argument. For example,
``product(A, repeat=4)`` means the same as ``product(A, A, A, A)``.
.. note:: Software and documentation for this function are taken from
CPython, :ref:`license details <psf-license>`.
"""
pools = map(tuple, iterables) * kwargs.get("repeat", 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
try:
from itertools import starmap
starmap = starmap
except ImportError:
def starmap(function, iterable):
"""Make an iterator that computes the function using arguments obtained from
the iterable. Used instead of :func:`itertools.imap` when an argument
parameters are already grouped in tuples from a single iterable (the data
has been "pre-zipped"). The difference between :func:`itertools.imap` and
:func:`starmap` parallels the distinction between ``function(a, b)`` and
``function(*c)``.
.. note:: Software and documentation for this function are taken from
CPython, :ref:`license details <psf-license>`.
"""
for args in iterable:
yield function(*args)
def combinations_with_replacement(iterable, r):
"""Return `r` length sub-sequences of elements from the `iterable` allowing
individual elements to be replaced more than once.
Combinations are emitted in lexicographic sort order. So, if the input
`iterable` is sorted, the combinations tuples will be produced in sorted
order.
Elements are treated as unique based on their position, not on their value.
So if the input elements are unique, the generated combinations will also
be unique.
The number of items returned is ``(n + r - 1)! / r! / (n - 1)!`` when
``n > 0``.
.. note:: Software and documentation for this function are taken from
CPython, :ref:`license details <psf-license>`.
"""
pool = tuple(iterable)
n = len(pool)
for indices in product(builtins.range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def compress(data, selectors):
"""Make an iterator that filters elements from the `data` returning only those
that have a corresponding element in `selectors` that evaluates to ``True``.
Stops when either the `data` or `selectors` iterables have been exhausted.
.. note:: Software and documentation for this function are taken from
CPython, :ref:`license details <psf-license>`.
"""
return (d for d, s in izip(data, selectors) if s)
def count(start=0, step=1):
"""Make an iterator that returns evenly spaced values starting with `start`.
Often used as an argument to :func:`imap` to generate consecutive data points.
Also, used with :func:`izip` to add sequence numbers.
When counting with floating point numbers, better accuracy can sometimes be
achieved by substituting multiplicative code such as: ``(start + step * i for
i in count())``.
.. note:: Software and documentation for this function are taken from
CPython, :ref:`license details <psf-license>`.
"""
n = start
while True:
yield n
n += step
def grouped(n, iterable, fillvalue=None):
"""Groups the items in the given `iterable` to tuples of size `n`. In order
for groups to always be of the size `n` the `fillvalue` is used for padding.
"""
return izip_longest(fillvalue=fillvalue, *([iter(iterable)] * n))
def unique(iterable, seen=None):
"""Yields items from the given `iterable` of (hashable) items, once seen an
item is not yielded again.
:param seen:
An iterable specifying already "seen" items which will be excluded
from the result.
.. versionadded:: 0.5
.. versionchanged:: 0.5
Items don't have to be hashable any more.
"""
seen = set() if seen is None else set(seen)
seen_unhashable = []
for item in iterable:
try:
if item not in seen:
seen.add(item)
yield item
except TypeError:
if item not in seen_unhashable:
seen_unhashable.append(item)
yield item
def flatten(iterable, ignore=None):
"""Flattens a nested `iterable`.
:param ignore:
Types of iterable objects which should be yielded as-is.
.. versionadded:: 0.5
"""
stack = [iter(iterable)]
while stack:
try:
item = stack[-1].next()
if ignore and isinstance(item, ignore):
yield item
elif builtins.is_bytes_or_unicode(item) and len(item) == 1:
yield item
else:
try:
stack.append(iter(item))
except TypeError:
yield item
except StopIteration:
stack.pop()
| 30.515358 | 80 | 0.683369 | 870 | 0.097294 | 6,449 | 0.721203 | 276 | 0.030866 | 0 | 0 | 5,734 | 0.641244 |
df36ad0e12226dcc75dd2b44297a2b45a5264918 | 6,377 | py | Python | Sampling_based_Planning/rrt_3D/env3D.py | CodesHub/PathPlanning | 8271d9a0e30d7d9d0f20d61a2f85b8fe199209fa | [
"MIT"
] | 3,693 | 2020-07-15T15:41:07.000Z | 2022-03-31T17:26:46.000Z | Sampling_based_Planning/rrt_3D/env3D.py | Alanaab/PathPlanning | 8c12192d6952fcd2c3f8ba3c98e3593b27049a40 | [
"MIT"
] | 26 | 2020-08-27T04:56:59.000Z | 2022-03-14T02:17:05.000Z | Sampling_based_Planning/rrt_3D/env3D.py | Alanaab/PathPlanning | 8c12192d6952fcd2c3f8ba3c98e3593b27049a40 | [
"MIT"
] | 799 | 2020-07-17T04:02:05.000Z | 2022-03-31T12:56:29.000Z | # this is the three dimensional configuration space for rrt
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: yue qi
"""
import numpy as np
# from utils3D import OBB2AABB
def R_matrix(z_angle,y_angle,x_angle):
# s angle: row; y angle: pitch; z angle: yaw
# generate rotation matrix in SO3
# RzRyRx = R, ZYX intrinsic rotation
# also (r1,r2,r3) in R3*3 in {W} frame
# used in obb.O
# [[R p]
# [0T 1]] gives transformation from body to world
return np.array([[np.cos(z_angle), -np.sin(z_angle), 0.0], [np.sin(z_angle), np.cos(z_angle), 0.0], [0.0, 0.0, 1.0]])@ \
np.array([[np.cos(y_angle), 0.0, np.sin(y_angle)], [0.0, 1.0, 0.0], [-np.sin(y_angle), 0.0, np.cos(y_angle)]])@ \
np.array([[1.0, 0.0, 0.0], [0.0, np.cos(x_angle), -np.sin(x_angle)], [0.0, np.sin(x_angle), np.cos(x_angle)]])
def getblocks():
# AABBs
block = [[4.00e+00, 1.20e+01, 0.00e+00, 5.00e+00, 2.00e+01, 5.00e+00],
[5.5e+00, 1.20e+01, 0.00e+00, 1.00e+01, 1.30e+01, 5.00e+00],
[1.00e+01, 1.20e+01, 0.00e+00, 1.40e+01, 1.30e+01, 5.00e+00],
[1.00e+01, 9.00e+00, 0.00e+00, 2.00e+01, 1.00e+01, 5.00e+00],
[9.00e+00, 6.00e+00, 0.00e+00, 1.00e+01, 1.00e+01, 5.00e+00]]
Obstacles = []
for i in block:
i = np.array(i)
Obstacles.append([j for j in i])
return np.array(Obstacles)
def getballs():
spheres = [[2.0,6.0,2.5,1.0],[14.0,14.0,2.5,2]]
Obstacles = []
for i in spheres:
Obstacles.append([j for j in i])
return np.array(Obstacles)
def getAABB(blocks):
# used for Pyrr package for detecting collision
AABB = []
for i in blocks:
AABB.append(np.array([np.add(i[0:3], -0), np.add(i[3:6], 0)])) # make AABBs alittle bit of larger
return AABB
def getAABB2(blocks):
# used in lineAABB
AABB = []
for i in blocks:
AABB.append(aabb(i))
return AABB
def add_block(block = [1.51e+01, 0.00e+00, 2.10e+00, 1.59e+01, 5.00e+00, 6.00e+00]):
return block
class aabb(object):
# make AABB out of blocks,
# P: center point
# E: extents
# O: Rotation matrix in SO(3), in {w}
def __init__(self,AABB):
self.P = [(AABB[3] + AABB[0])/2, (AABB[4] + AABB[1])/2, (AABB[5] + AABB[2])/2]# center point
self.E = [(AABB[3] - AABB[0])/2, (AABB[4] - AABB[1])/2, (AABB[5] - AABB[2])/2]# extents
self.O = [[1,0,0],[0,1,0],[0,0,1]]
class obb(object):
# P: center point
# E: extents
# O: Rotation matrix in SO(3), in {w}
def __init__(self, P, E, O):
self.P = P
self.E = E
self.O = O
self.T = np.vstack([np.column_stack([self.O.T,-self.O.T@self.P]),[0,0,0,1]])
class env():
def __init__(self, xmin=0, ymin=0, zmin=0, xmax=20, ymax=20, zmax=5, resolution=1):
# def __init__(self, xmin=-5, ymin=0, zmin=-5, xmax=10, ymax=5, zmax=10, resolution=1):
self.resolution = resolution
self.boundary = np.array([xmin, ymin, zmin, xmax, ymax, zmax])
self.blocks = getblocks()
self.AABB = getAABB2(self.blocks)
self.AABB_pyrr = getAABB(self.blocks)
self.balls = getballs()
self.OBB = np.array([obb([5.0,7.0,2.5],[0.5,2.0,2.5],R_matrix(135,0,0)),
obb([12.0,4.0,2.5],[0.5,2.0,2.5],R_matrix(45,0,0))])
self.start = np.array([2.0, 2.0, 2.0])
self.goal = np.array([6.0, 16.0, 0.0])
self.t = 0 # time
def New_block(self):
newblock = add_block()
self.blocks = np.vstack([self.blocks,newblock])
self.AABB = getAABB2(self.blocks)
self.AABB_pyrr = getAABB(self.blocks)
def move_start(self, x):
self.start = x
def move_block(self, a = [0,0,0], s = 0, v = [0.1,0,0], block_to_move = 0, mode = 'translation'):
# t is time , v is velocity in R3, a is acceleration in R3, s is increment ini time,
# R is an orthorgonal transform in R3*3, is the rotation matrix
# (s',t') = (s + tv, t) is uniform transformation
# (s',t') = (s + a, t + s) is a translation
if mode == 'translation':
ori = np.array(self.blocks[block_to_move])
self.blocks[block_to_move] = \
np.array([ori[0] + a[0],\
ori[1] + a[1],\
ori[2] + a[2],\
ori[3] + a[0],\
ori[4] + a[1],\
ori[5] + a[2]])
self.AABB[block_to_move].P = \
[self.AABB[block_to_move].P[0] + a[0], \
self.AABB[block_to_move].P[1] + a[1], \
self.AABB[block_to_move].P[2] + a[2]]
self.t += s
# return a range of block that the block might moved
a = self.blocks[block_to_move]
return np.array([a[0] - self.resolution, a[1] - self.resolution, a[2] - self.resolution, \
a[3] + self.resolution, a[4] + self.resolution, a[5] + self.resolution]), \
np.array([ori[0] - self.resolution, ori[1] - self.resolution, ori[2] - self.resolution, \
ori[3] + self.resolution, ori[4] + self.resolution, ori[5] + self.resolution])
# return a,ori
# (s',t') = (Rx, t)
def move_OBB(self, obb_to_move = 0, theta=[0,0,0], translation=[0,0,0]):
# theta stands for rotational angles around three principle axis in world frame
# translation stands for translation in the world frame
ori = [self.OBB[obb_to_move]]
# move obb position
self.OBB[obb_to_move].P = \
[self.OBB[obb_to_move].P[0] + translation[0],
self.OBB[obb_to_move].P[1] + translation[1],
self.OBB[obb_to_move].P[2] + translation[2]]
# Calculate orientation
self.OBB[obb_to_move].O = R_matrix(z_angle=theta[0],y_angle=theta[1],x_angle=theta[2])
# generating transformation matrix
self.OBB[obb_to_move].T = np.vstack([np.column_stack([self.OBB[obb_to_move].O.T,\
-self.OBB[obb_to_move].O.T@self.OBB[obb_to_move].P]),[translation[0],translation[1],translation[2],1]])
return self.OBB[obb_to_move], ori[0]
if __name__ == '__main__':
newenv = env()
| 42.798658 | 125 | 0.538341 | 4,213 | 0.660655 | 0 | 0 | 0 | 0 | 0 | 0 | 1,377 | 0.215932 |
df3715d9a9064f79f11dad87ba56f6a3d27743a1 | 289 | py | Python | tests/test_wrong_url.py | alexella1/python-ascii_magic | df189da5dc59d4cd6f6fe003a75c99539247851f | [
"MIT"
] | null | null | null | tests/test_wrong_url.py | alexella1/python-ascii_magic | df189da5dc59d4cd6f6fe003a75c99539247851f | [
"MIT"
] | null | null | null | tests/test_wrong_url.py | alexella1/python-ascii_magic | df189da5dc59d4cd6f6fe003a75c99539247851f | [
"MIT"
] | null | null | null | from context import ascii_magic
try:
output = ascii_magic.from_url('https://wow.zamimg.com/uploads/blog/images/20516-afterlives-ardenweald-4k-desktop-wallpapers.jpg')
ascii_magic.to_terminal(output)
except OSError as e:
print(f'Could not load the image, server said: {e.code} {e.msg}') | 41.285714 | 130 | 0.782007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.539792 |
df38488be49b126da2ba8c4b38d9a97d5b60ae28 | 2,706 | py | Python | book/code/imdb - project4+5 scrape popular film list and poster.py | marcus-pham/test | bcbc7c34a672ae6c7e9bdc811934c4003134ae0d | [
"MIT"
] | null | null | null | book/code/imdb - project4+5 scrape popular film list and poster.py | marcus-pham/test | bcbc7c34a672ae6c7e9bdc811934c4003134ae0d | [
"MIT"
] | null | null | null | book/code/imdb - project4+5 scrape popular film list and poster.py | marcus-pham/test | bcbc7c34a672ae6c7e9bdc811934c4003134ae0d | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
from selenium import webdriver
import requests
import time
class Film(object):
"""docstring for film"""
def __init__(self):
self.title = ""
self.rank = ""
self.year_of_production = ""
self.link = ""
def create_phantom_driver():
driver = webdriver.PhantomJS(executable_path = r'C:\phantomjs-2.1.1-windows\bin\phantomjs.exe')
return driver
def get_popular_film_list(url):
driver = create_phantom_driver()
# url = 'http://www.imdb.com/chart/top?ref_=nv_mv_250_6'
# download html
driver.get(url)
# print driver.page_source
# create soup
soup = BeautifulSoup(driver.page_source,'lxml')
# soup = BeautifulSoup(open('imdb.html'),'lxml')
# search
table = soup.find('table',class_='chart')
film_list =[]
for td in table.find_all('td',class_='titleColumn'):
a = td.find('a')
# print a['href']
new_film = Film()
full_des = td.text.strip().replace('\n','')
# print full_des
title = full_des.split('(')[0]
# print title
year = full_des.split('(')[1][:4]
# print year
start_rank = full_des.find(')')
end_rank = full_des.find('(',start_rank,len(full_des))
rank = full_des[start_rank+1:end_rank]
# print rank
new_film.rank = rank
new_film.title = title
new_film.year_of_production = year
new_film.link = a['href'].strip()
film_list.append(new_film)
driver.quit()
for film in film_list:
print film.title
print film.rank
print film.year_of_production
print film.link
print "\n"
return film_list
# when ever we have the film list
def poster_scrap(film_list):
driver = create_phantom_driver()
for film in film_list:
url = 'http://www.imdb.com' + film.link
print film.title
driver.get(url)
soup = BeautifulSoup(driver.page_source, 'lxml')
div = soup.find('div', class_='poster')
# find the link lead to poster image
a = div.find('a')
# link to download poster image
poster_url = 'http://www.imdb.com' + a['href']
print poster_url
driver.get(poster_url)
soup = BeautifulSoup(driver.page_source, 'lxml')
# print soup.prettify()
divs = soup.find_all('div',class_='pswp__zoom-wrap')
try:
imgs = divs[1].find_all('img')
download_link = imgs[1]['src']
print download_link
except Exception, e:
imgs = divs[0].find_all('img')
download_link = imgs[1]['src']
print download_link
f = open('{0}.jpg'.format(film.title.encode('utf8').replace(':','')),'wb')
f.write(requests.get(download_link).content)
# time.sleep(2)
f.close()
driver.quit()
# url for current popular and hot film
url = 'http://www.imdb.com/chart/moviemeter/?ref_=nv_mv_mpm_7'
# get_popular_film_list(url)
poster_scrap(get_popular_film_list(url))
| 20.5 | 96 | 0.682927 | 149 | 0.055063 | 0 | 0 | 0 | 0 | 0 | 0 | 785 | 0.290096 |
df38d7c15904593405fbc7ec4f71c2079a0aa639 | 1,409 | py | Python | mqtt_io/modules/sensor/mcp3008.py | DominicWindisch/mqtt-io | f3f8b17232e4c11401d7b5bf124e8722ec3a0f3a | [
"MIT"
] | 231 | 2016-12-27T09:51:11.000Z | 2021-03-03T18:14:37.000Z | mqtt_io/modules/sensor/mcp3008.py | DominicWindisch/mqtt-io | f3f8b17232e4c11401d7b5bf124e8722ec3a0f3a | [
"MIT"
] | 173 | 2017-02-26T16:33:42.000Z | 2021-03-02T18:24:55.000Z | mqtt_io/modules/sensor/mcp3008.py | DominicWindisch/mqtt-io | f3f8b17232e4c11401d7b5bf124e8722ec3a0f3a | [
"MIT"
] | 92 | 2017-02-26T16:25:23.000Z | 2021-03-02T16:05:53.000Z | """
MCP3008 analog to digital converter
"""
import logging
from typing import cast
from mqtt_io.types import ConfigType, SensorValueType
from . import GenericSensor
REQUIREMENTS = ("adafruit-mcp3008",)
CONFIG_SCHEMA = {
"spi_port": dict(type="integer", required=False, empty=False, default=0),
"spi_device": dict(type="integer", required=False, empty=False, default=0),
"chip_addr": dict(type="integer", required=False, empty=False, default=0),
}
_LOG = logging.getLogger(__name__)
class Sensor(GenericSensor):
"""
Implementation of MCP3008 ADC sensor.
"""
SENSOR_SCHEMA = {
"channel": dict(
type="integer",
required=True,
min=0,
max=7,
)
}
def setup_module(self) -> None:
"""
Init the mcp on SPI CE0
"""
# pylint: disable=import-outside-toplevel,import-error
import Adafruit_GPIO.SPI as SPI # type: ignore
import Adafruit_MCP3008 # type: ignore
self.mcp = Adafruit_MCP3008.MCP3008(
spi=SPI.SpiDev(self.config["spi_port"], self.config["spi_device"])
)
def get_value(self, sens_conf: ConfigType) -> SensorValueType:
"""
Get the analog value from the adc for the configured channel
"""
# Returns an integer from 0-1023
return cast(int, self.mcp.read_adc(sens_conf["channel"]))
| 25.618182 | 79 | 0.625976 | 906 | 0.643009 | 0 | 0 | 0 | 0 | 0 | 0 | 468 | 0.33215 |
df3a00f301a09ed853ce444e464928c561356ba2 | 1,031 | py | Python | aristaflow/worklist_model.py | riuns/aristaflowpy | 58ad48310366484379fc519d649c564c762f89f9 | [
"MIT"
] | null | null | null | aristaflow/worklist_model.py | riuns/aristaflowpy | 58ad48310366484379fc519d649c564c762f89f9 | [
"MIT"
] | null | null | null | aristaflow/worklist_model.py | riuns/aristaflowpy | 58ad48310366484379fc519d649c564c762f89f9 | [
"MIT"
] | 1 | 2021-07-13T10:35:36.000Z | 2021-07-13T10:35:36.000Z | """
Worklist model classes
"""
# AristaFlow REST Libraries
from af_worklist_manager.models.qualified_agent import QualifiedAgent
from af_worklist_manager.models.worklist_revision import WorklistRevision
from af_worklist_manager.models.worklist_update_configuration import WorklistUpdateConfiguration
class Worklist(object):
"""Simply aggregates the few relevant properties of a worklist"""
worklist_id: str = None
revision: WorklistRevision = None
client_worklist_id: int = None
wu_conf: WorklistUpdateConfiguration = None
agent: QualifiedAgent = None
def __init__(
self,
worklist_id: str,
revision: WorklistRevision,
client_worklist_id: int,
wu_conf: WorklistUpdateConfiguration,
agent: QualifiedAgent,
):
"""
Constructor
"""
self.worklist_id = worklist_id
self.revision = revision
self.client_worklist_id = client_worklist_id
self.wu_conf = wu_conf
self.agent = agent
pass
| 28.638889 | 96 | 0.706111 | 728 | 0.706111 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.152279 |
df3ca98f6de6dd3416b0ed3855860078017dafec | 163,982 | py | Python | tests/examples/minlplib/arki0019.py | ouyang-w-19/decogo | 52546480e49776251d4d27856e18a46f40c824a1 | [
"MIT"
] | 2 | 2021-07-03T13:19:10.000Z | 2022-02-06T10:48:13.000Z | tests/examples/minlplib/arki0019.py | ouyang-w-19/decogo | 52546480e49776251d4d27856e18a46f40c824a1 | [
"MIT"
] | 1 | 2021-07-04T14:52:14.000Z | 2021-07-15T10:17:11.000Z | tests/examples/minlplib/arki0019.py | ouyang-w-19/decogo | 52546480e49776251d4d27856e18a46f40c824a1 | [
"MIT"
] | null | null | null | # NLP written by GAMS Convert at 04/21/18 13:51:02
#
# Equation counts
# Total E G L N X C B
# 3 2 0 1 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 511 511 0 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 1022 512 510 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,None),initialize=40)
m.x2 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x3 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x4 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x5 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x6 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x7 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x8 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x9 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x10 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x11 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x12 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x13 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x14 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x15 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x16 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x17 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x18 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x19 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x20 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x21 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x22 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x23 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x24 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x25 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x26 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x27 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x28 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x29 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x30 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x31 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x32 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x33 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x34 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x35 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x36 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x37 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x38 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x39 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x40 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x41 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x42 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x43 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x44 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x45 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x46 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x47 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x48 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x49 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x50 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x51 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x52 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x53 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x54 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x55 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x56 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x57 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x58 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x59 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x60 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x61 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x62 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x63 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x64 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x65 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x66 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x67 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x68 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x69 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x70 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x71 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x72 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x73 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x74 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x75 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x76 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x77 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x78 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x79 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x80 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x81 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x82 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x83 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x84 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x85 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x86 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x87 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x88 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x89 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x90 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x91 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x92 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x93 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x94 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x95 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x96 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x97 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x98 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x99 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x100 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x101 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x102 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x103 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x104 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x105 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x106 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x107 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x108 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x109 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x110 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x111 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x112 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x113 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x114 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x115 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x116 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x117 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x118 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x119 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x120 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x121 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x122 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x123 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x124 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x125 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x126 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x127 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x128 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x129 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x130 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x131 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x132 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x133 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x134 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x135 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x136 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x137 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x138 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x139 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x140 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x141 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x142 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x143 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x144 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x145 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x146 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x147 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x148 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x149 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x150 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x151 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x152 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x153 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x154 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x155 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x156 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x157 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x158 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x159 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x160 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x161 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x162 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x163 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x164 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x165 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x166 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x167 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x168 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x169 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x170 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x171 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x172 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x173 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x174 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x175 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x176 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x177 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x178 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x179 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x180 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x181 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x182 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x183 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x184 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x185 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x186 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x187 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x188 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x189 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x190 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x191 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x192 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x193 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x194 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x195 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x196 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x197 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x198 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x199 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x200 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x201 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x202 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x203 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x204 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x205 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x206 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x207 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x208 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x209 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x210 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x211 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x212 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x213 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x214 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x215 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x216 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x217 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x218 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x219 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x220 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x221 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x222 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x223 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x224 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x225 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x226 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x227 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x228 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x229 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x230 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x231 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x232 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x233 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x234 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x235 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x236 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x237 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x238 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x239 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x240 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x241 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x242 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x243 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x244 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x245 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x246 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x247 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x248 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x249 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x250 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x251 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x252 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x253 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x254 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x255 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x256 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x257 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x258 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x259 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x260 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x261 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x262 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x263 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x264 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x265 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x266 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x267 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x268 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x269 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x270 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x271 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x272 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x273 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x274 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x275 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x276 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x277 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x278 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x279 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x280 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x281 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x282 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x283 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x284 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x285 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x286 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x287 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x288 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x289 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x290 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x291 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x292 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x293 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x294 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x295 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x296 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x297 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x298 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x299 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x300 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x301 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x302 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x303 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x304 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x305 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x306 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x307 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x308 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x309 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x310 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x311 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x312 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x313 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x314 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x315 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x316 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x317 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x318 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x319 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x320 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x321 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x322 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x323 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x324 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x325 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x326 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x327 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x328 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x329 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x330 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x331 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x332 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x333 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x334 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x335 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x336 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x337 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x338 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x339 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x340 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x341 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x342 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x343 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x344 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x345 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x346 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x347 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x348 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x349 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x350 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x351 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x352 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x353 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x354 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x355 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x356 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x357 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x358 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x359 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x360 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x361 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x362 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x363 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x364 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x365 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x366 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x367 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x368 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x369 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x370 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x371 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x372 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x373 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x374 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x375 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x376 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x377 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x378 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x379 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x380 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x381 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x382 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x383 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x384 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x385 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x386 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x387 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x388 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x389 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x390 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x391 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x392 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x393 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x394 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x395 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x396 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x397 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x398 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x399 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x400 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x401 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x402 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x403 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x404 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x405 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x406 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x407 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x408 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x409 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x410 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x411 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x412 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x413 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x414 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x415 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x416 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x417 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x418 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x419 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x420 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x421 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x422 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x423 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x424 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x425 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x426 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x427 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x428 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x429 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x430 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x431 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x432 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x433 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x434 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x435 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x436 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x437 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x438 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x439 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x440 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x441 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x442 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x443 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x444 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x445 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x446 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x447 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x448 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x449 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x450 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x451 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x452 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x453 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x454 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x455 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x456 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x457 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x458 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x459 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x460 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x461 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x462 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x463 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x464 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x465 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x466 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x467 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x468 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x469 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x470 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x471 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x472 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x473 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x474 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x475 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x476 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x477 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x478 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x479 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x480 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x481 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x482 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x483 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x484 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x485 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x486 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x487 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x488 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x489 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x490 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x491 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x492 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x493 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x494 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x495 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x496 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x497 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x498 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x499 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x500 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x501 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x502 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x503 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x504 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x505 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x506 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x507 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x508 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x509 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.x510 = Var(within=Reals,bounds=(5E-5,None),initialize=0.05)
m.obj = Objective(expr=-1000*(5.1896555300528e-7*log(100 + 0.77*m.x2*(3115.6025 + m.x1)/(0.000697151847870123 + m.x2) -
m.x1) + 2.38853851131484e-6*log(100 + 0.77*m.x3*(3115.6025 + m.x1)/(0.00441135490801732 + m.x3)
- m.x1) + 1.75006368705556e-6*log(100 + 0.77*m.x4*(3115.6025 + m.x1)/(0.000853304974954616 +
m.x4) - m.x1) + 5.4205853966032e-7*log(116.725409814076 - 0.994631725384071*m.x1) +
1.28420497011416e-6*log(100 + 0.77*m.x5*(3115.6025 + m.x1)/(0.00111268459826716 + m.x5) - m.x1)
+ 1.13642040985436e-6*log(100 + 0.77*m.x6*(3115.6025 + m.x1)/(0.000326187439886633 + m.x6) -
m.x1) + 8.3264643528916e-7*log(100 + 0.77*m.x7*(3115.6025 + m.x1)/(0.00019264225638747 + m.x7) -
m.x1) + 2.5790096715064e-7*log(273.722164695496 - 0.944241229522862*m.x1) + 6.1099986822816e-7*
log(100 + 0.77*m.x8*(3115.6025 + m.x1)/(0.000292444426062126 + m.x8) - m.x1) + 7.2368781007106e-6
*log(100 + 0.77*m.x9*(3115.6025 + m.x1)/(0.000340072769701281 + m.x9) - m.x1) +
2.24152501608832e-6*log(122.167672752509 - 0.992884948335833*m.x1) + 5.31045518239344e-6*log(100
+ 0.77*m.x10*(3115.6025 + m.x1)/(0.000928640395934794 + m.x10) - m.x1) + 1.40112029745028e-6*
log(100 + 0.77*m.x11*(3115.6025 + m.x1)/(0.000520524158336466 + m.x11) - m.x1) +
3.31943054456628e-6*log(100 + 0.77*m.x12*(3115.6025 + m.x1)/(0.000148520174019557 + m.x12) - m.x1
) + 6.4780110964124e-7*log(100 + 0.77*m.x13*(3115.6025 + m.x1)/(0.00104750410938401 + m.x13) -
m.x1) + 9.559575451052e-8*log(369.775187215178 - 0.913411551308237*m.x1) + 7.004225000328e-8*log(
463.468636910111 - 0.883339213872723*m.x1) + 2.169464931432e-8*log(448.707925466966 -
0.88807688867018*m.x1) + 5.139736486672e-8*log(809.359927930429 - 0.772320144199901*m.x1) +
5.8738086293776e-7*log(100 + 0.77*m.x14*(3115.6025 + m.x1)/(0.000494204879879725 + m.x14) - m.x1)
+ 1.8193327718832e-7*log(147.784367425591 - 0.984662880638467*m.x1) + 4.3102285130624e-7*log(100
+ 0.77*m.x15*(3115.6025 + m.x1)/(0.000378101608644323 + m.x15) - m.x1) + 1.152774843986e-7*log(
439.112527740056 - 0.891156677483711*m.x1) + 2.7310693619852e-7*log(100 + 0.77*m.x16*(3115.6025
+ m.x1)/(7.68864972863027e-5 + m.x16) - m.x1) + 5.447264353996e-8*log(642.105475210568 -
0.826003004166749*m.x1) + 2.4063538402192e-7*log(100 + 0.77*m.x17*(3115.6025 + m.x1)/(
0.000285184441727289 + m.x17) - m.x1) + 7.453356083372e-8*log(493.516505615775 -
0.873694893486645*m.x1) + 1.7657938873836e-7*log(100 + 0.77*m.x18*(3115.6025 + m.x1)/(
0.000288214605454492 + m.x18) - m.x1) + 4.7303472465e-8*log(440.16955630314 - 0.890817408092611*
m.x1) + 1.120678412982e-7*log(295.420037315374 - 0.937276967355311*m.x1) + 2.2412317111e-8*log(
573.716341402481 - 0.847953536626549*m.x1) + 6.7932840206568e-7*log(100 + 0.77*m.x19*(3115.6025
+ m.x1)/(0.000204377628677949 + m.x19) - m.x1) + 1.60941456154132e-6*log(100 + 0.77*m.x20*(
3115.6025 + m.x1)/(6.19657352352374e-5 + m.x20) - m.x1) + 3.1708525773332e-7*log(100 + 0.77*m.x21
*(3115.6025 + m.x1)/(0.000484370227279492 + m.x21) - m.x1) + 1.7433187894212e-7*log(100 + 0.77*
m.x22*(3115.6025 + m.x1)/(2.04333582846966e-5 + m.x22) - m.x1) + 4.3774137231148e-7*log(
119.000522100771 - 0.993901493499004*m.x1) + 2.01470428953719e-6*log(103.022920309924 -
0.999029747758283*m.x1) + 1.47615824512421e-6*log(115.545995934311 - 0.99501027620362*m.x1) +
4.5722003637812e-7*log(101.873352153701 - 0.99939871913901*m.x1) + 1.08321186770806e-6*log(
111.940081528697 - 0.996167649265689*m.x1) + 9.5855732013751e-7*log(140.246799210435 -
0.987082177777674*m.x1) + 7.0232752660181e-7*log(167.363613619287 - 0.978378623839438*m.x1) +
2.1753644847374e-7*log(120.667450462708 - 0.993366467492979*m.x1) + 5.1537124044456e-7*log(
144.803854659423 - 0.985619521534142*m.x1) + 6.10422200994085e-6*log(100 + 0.77*m.x23*(3115.6025
+ m.x1)/(0.00305512052568459 + m.x23) - m.x1) + 1.89070012630112e-6*log(102.487967250958 -
0.999201449077359*m.x1) + 4.47930681656604e-6*log(100 + 0.77*m.x24*(3115.6025 + m.x1)/(
0.0083426507129411 + m.x24) - m.x1) + 1.18182857846273e-6*log(125.379696264084 -
0.991854000545935*m.x1) + 2.79990082859373e-6*log(100 + 0.77*m.x25*(3115.6025 + m.x1)/(
0.00133426452380755 + m.x25) - m.x1) + 5.4641265701959e-7*log(112.679121242756 -
0.995930443231203*m.x1) + 8.063390050507e-8*log(133.363509020125 - 0.989291474435482*m.x1) +
5.907981841698e-8*log(146.753601064768 - 0.984993720776393*m.x1) + 1.829918287962e-8*log(
144.573259758642 - 0.985693534474105*m.x1) + 4.335307594052e-8*log(207.104998133011 -
0.965623022149645*m.x1) + 4.9544888581316e-7*log(126.716260334795 - 0.991425009982886*m.x1) +
1.5345859077612e-7*log(105.414841252999 - 0.998262024358692*m.x1) + 3.6356273231584e-7*log(
134.800988433026 - 0.988830093558782*m.x1) + 9.723520939885e-8*log(143.170525043829 -
0.986143763511607*m.x1) + 2.3036250546307e-7*log(261.936397566447 - 0.948024050704014*m.x1) +
4.594703752211e-8*log(175.505916825191 - 0.975765227809006*m.x1) + 2.0297313110372e-7*log(
145.922600022893 - 0.985260443197458*m.x1) + 6.286818655627e-8*log(151.27829160512 -
0.983541452542447*m.x1) + 1.4894264850651e-7*log(145.448936430143 - 0.985412472730349*m.x1) +
3.989992559625e-8*log(143.324491011074 - 0.986094345793125*m.x1) + 9.452791299495e-8*log(
123.450262944899 - 0.992473281509788*m.x1) + 1.890452726975e-8*log(163.953187166908 -
0.979473252070215*m.x1) + 5.7300555932538e-7*log(163.598133703279 - 0.979587211878512*m.x1) +
1.35752235328037e-6*log(100 + 0.77*m.x26*(3115.6025 + m.x1)/(0.000556683176287833 + m.x26) - m.x1
) + 2.6745770515237e-7*log(127.252545117543 - 0.991252881226812*m.x1) + 1.4704690028817e-7*log(
100 + 0.77*m.x27*(3115.6025 + m.x1)/(0.000183567688642283 + m.x27) - m.x1) + 8.573813856476e-8*
log(175.098075010055 - 0.975896130841449*m.x1) + 3.9460970899603e-7*log(112.189454534394 -
0.996087609207402*m.x1) + 2.8912748067577e-7*log(161.708775393222 - 0.980193630158783*m.x1) +
8.955332375044e-8*log(107.565006489971 - 0.997571896129249*m.x1) + 2.1216310607822e-7*log(
147.609223184625 - 0.984719095845948*m.x1) + 1.8774766456787e-7*log(254.987449685228 -
0.950254421196148*m.x1) + 1.3756126014697e-7*log(351.179935246506 - 0.919379980197568*m.x1) +
4.260773904838e-8*log(181.517913158427 - 0.973835586164016*m.x1) + 1.0094309932872e-7*log(
271.59122797394 - 0.944925186067883*m.x1) + 1.19560239361145e-6*log(249.052415932832 -
0.952159360530481*m.x1) + 3.7032165490144e-7*log(110.039136596835 - 0.99677778644842*m.x1) +
8.7733865886348e-7*log(156.821256781618 - 0.981762353579567*m.x1) + 2.3147865116101e-7*log(
199.523689768056 - 0.968056358355068*m.x1) + 5.4840209400801e-7*log(415.971843439801 -
0.898584032000295*m.x1) + 1.0702302104483e-7*log(150.509314160132 - 0.983788267546925*m.x1) +
1.579334504759e-8*log(229.558267186124 - 0.958416304009859*m.x1) + 1.157165846826e-8*log(
278.639758958691 - 0.942662852864352*m.x1) + 3.58416630594e-9*log(270.755418624658 -
0.945193451788327*m.x1) + 8.49134275924e-9*log(481.615742418209 - 0.877514624404683*m.x1) +
9.704101076692e-8*log(204.592823836379 - 0.966429342691701*m.x1) + 3.005714047644e-8*log(
121.768604799103 - 0.993013035263933*m.x1) + 7.120915200608e-8*log(234.90398420317 -
0.956700514843222*m.x1) + 1.904495755745e-8*log(265.661094609679 - 0.946828552548126*m.x1) +
4.511991249359e-8*log(643.645574905152 - 0.825508685750139*m.x1) + 8.99940859807e-9*log(
378.882844396923 - 0.910488310239537*m.x1) + 3.975529739764e-8*log(275.639640374999 -
0.943625786545299*m.x1) + 1.231366654199e-8*log(294.870657652889 - 0.937453299112166*m.x1) +
2.917262622087e-8*log(273.926918368189 - 0.944175510718011*m.x1) + 7.81499206125e-9*log(
266.221094994647 - 0.946648811908885*m.x1) + 1.851469341315e-8*log(192.176519000713 -
0.970414544538107*m.x1) + 3.70273198075e-9*log(339.418986006275 - 0.923154835699909*m.x1) +
1.1223163527906e-7*log(338.189154866992 - 0.923549568705574*m.x1) + 2.6589088213369e-7*log(
739.647257673155 - 0.794695485809517*m.x1) + 5.238555739769e-8*log(206.622092527959 -
0.965778018046924*m.x1) + 2.880131582229e-8*log(1358.00873885992 - 0.596222965265974*m.x1) +
8.2928697456672e-7*log(117.708072269849 - 0.994316324925966*m.x1) + 3.81679258712616e-6*log(
102.816009888922 - 0.999096158804301*m.x1) + 2.79653439796344e-6*log(114.487098874626 -
0.995350145317118*m.x1) + 8.6618867820768e-7*log(101.745069142043 - 0.999439893522347*m.x1) +
2.05211010291984e-6*log(111.125650567867 - 0.996429053267268*m.x1) + 1.81595606503464e-6*log(100
+ 0.77*m.x28*(3115.6025 + m.x1)/(0.00314596400504048 + m.x28) - m.x1) + 1.33053694836984e-6*log(
100 + 0.77*m.x29*(3115.6025 + m.x1)/(0.00185796732288463 + m.x29) - m.x1) + 4.1211581683536e-7*
log(119.262530850324 - 0.99381739780658*m.x1) + 9.7635426715584e-7*log(141.787032240127 -
0.986587816565134*m.x1) + 1.15642525996044e-5*log(100 + 0.77*m.x30*(3115.6025 + m.x1)/(
0.00327988316455864 + m.x30) - m.x1) + 3.58187068147968e-6*log(102.317637448576 -
0.999256119017565*m.x1) + 8.48590293956256e-6*log(100 + 0.77*m.x31*(3115.6025 + m.x1)/(
0.00895641248557199 + m.x31) - m.x1) + 2.23893629499672e-6*log(100 + 0.77*m.x32*(3115.6025 + m.x1
)/(0.00502027382308052 + m.x32) - m.x1) + 5.30432221878072e-6*log(100 + 0.77*m.x33*(3115.6025 +
m.x1)/(0.00143242523884508 + m.x33) - m.x1) + 1.03516123415976e-6*log(111.814530003852 -
0.996207946936796*m.x1) + 1.5275833546248e-7*log(131.106832242588 - 0.990015789163544*m.x1) +
1.1192481901872e-7*log(143.607926262619 - 0.986003372939064*m.x1) + 3.466721440368e-8*log(
141.571690034213 - 0.986656933920738*m.x1) + 8.213101036128e-8*log(200.071509157679 -
0.967880527391514*m.x1) + 9.3861200598624e-7*log(124.904463963847 - 0.992006533579349*m.x1) +
2.9072237287968e-7*log(105.044555247892 - 0.998380873282811*m.x1) + 6.8875792286976e-7*log(
132.448415533481 - 0.989585187605453*m.x1) + 1.842089821164e-7*log(140.261800370076 -
0.987077362927371*m.x1) + 4.3641436997448e-7*log(100 + 0.77*m.x34*(3115.6025 + m.x1)/(
0.000741543429815745 + m.x34) - m.x1) + 8.704518728904e-8*log(170.483695524706 -
0.977377186106153*m.x1) + 3.8452607968608e-7*log(142.831818536397 - 0.986252476515731*m.x1) +
1.1910176081928e-7*log(147.834377406707 - 0.984646829174548*m.x1) + 2.8216706525064e-7*log(
142.389460178795 - 0.986394458157356*m.x1) + 7.558912791e-8*log(140.405570410136 -
0.987031217746765*m.x1) + 1.790800960068e-7*log(121.857913496112 - 0.992984370279549*m.x1) +
3.5814019914e-8*log(159.679650282215 - 0.980844908719192*m.x1) + 1.08554063371632e-6*log(100 +
0.77*m.x35*(3115.6025 + m.x1)/(0.00197115089250469 + m.x35) - m.x1) + 2.57178251010168e-6*log(100
+ 0.77*m.x36*(3115.6025 + m.x1)/(0.00059763788778525 + m.x36) - m.x1) + 5.0669003470968e-7*log(
125.40476939922 - 0.991845952941936*m.x1) + 2.7857563112088e-7*log(100 + 0.77*m.x37*(3115.6025 +
m.x1)/(0.000197072608583864 + m.x37) - m.x1) + 1.29788901763516e-6*log(100 + 0.77*m.x38*(
3115.6025 + m.x1)/(0.00103350340834749 + m.x38) - m.x1) + 5.97353308727723e-6*log(100 + 0.77*
m.x39*(3115.6025 + m.x1)/(0.00653968048251619 + m.x39) - m.x1) + 4.37676147566657e-6*log(100 +
0.77*m.x40*(3115.6025 + m.x1)/(0.00126499499738795 + m.x40) - m.x1) + 1.35564262688804e-6*log(
111.307816465541 - 0.996370584352291*m.x1) + 3.21168816976702e-6*log(100 + 0.77*m.x41*(3115.6025
+ m.x1)/(0.00164951628291331 + m.x41) - m.x1) + 2.84209146604267e-6*log(100 + 0.77*m.x42*(
3115.6025 + m.x1)/(0.000483561553932482 + m.x42) - m.x1) + 2.08237841158577e-6*log(100 + 0.77*
m.x43*(3115.6025 + m.x1)/(0.000285585456276797 + m.x43) - m.x1) + 6.4498853722358e-7*log(100 +
0.77*m.x44*(3115.6025 + m.x1)/(0.000949480964356804 + m.x44) - m.x1) + 1.52805906703752e-6*log(
100 + 0.77*m.x45*(3115.6025 + m.x1)/(0.00043353870754387 + m.x45) - m.x1) + 1.80988209226695e-5*
log(100 + 0.77*m.x46*(3115.6025 + m.x1)/(0.000504146073263974 + m.x46) - m.x1) +
5.60586475207904e-6*log(100 + 0.77*m.x47*(3115.6025 + m.x1)/(0.00794759125316883 + m.x47) - m.x1)
+ 1.32809999044427e-5*log(100 + 0.77*m.x48*(3115.6025 + m.x1)/(0.00137667714323634 + m.x48) -
m.x1) + 3.50408352908141e-6*log(100 + 0.77*m.x49*(3115.6025 + m.x1)/(0.00077165899138256 + m.x49)
- m.x1) + 8.30161544180841e-6*log(100 + 0.77*m.x50*(3115.6025 + m.x1)/(0.00022017600114885 +
m.x50) - m.x1) + 1.62009586367803e-6*log(100 + 0.77*m.x51*(3115.6025 + m.x1)/(0.00155288847130484
+ m.x51) - m.x1) + 2.3907690827119e-7*log(288.890309631698 - 0.939372782750143*m.x1) +
1.7516975167866e-7*log(357.894740601638 - 0.917224761309686*m.x1) + 5.425648566354e-8*log(
346.901414191408 - 0.920753236591829*m.x1) + 1.2854046864884e-7*log(100 + 0.77*m.x52*(3115.6025
+ m.x1)/(0.000176557293751724 + m.x52) - m.x1) + 1.46898992960372e-6*log(100 + 0.77*m.x53*(
3115.6025 + m.x1)/(0.000732641574913837 + m.x53) - m.x1) + 4.5499976065404e-7*log(
132.443353443726 - 0.989586812360137*m.x1) + 1.07795174808928e-6*log(100 + 0.77*m.x54*(3115.6025
+ m.x1)/(0.000560522506580771 + m.x54) - m.x1) + 2.8829925245545e-7*log(100 + 0.77*m.x55*(
3115.6025 + m.x1)/(0.000450253536640847 + m.x55) - m.x1) + 6.8301738155719e-7*log(100 + 0.77*
m.x56*(3115.6025 + m.x1)/(0.00011398156261661 + m.x56) - m.x1) + 1.3623148088087e-7*log(100 +
0.77*m.x57*(3115.6025 + m.x1)/(0.000253899142154748 + m.x57) - m.x1) + 6.0180877202324e-7*log(100
+ 0.77*m.x58*(3115.6025 + m.x1)/(0.000422776032844626 + m.x58) - m.x1) + 1.8640214074159e-7*log(
100 + 0.77*m.x59*(3115.6025 + m.x1)/(0.00037775801625084 + m.x59) - m.x1) + 4.4161013781567e-7*
log(100 + 0.77*m.x60*(3115.6025 + m.x1)/(0.000427268145358538 + m.x60) - m.x1) +
1.1830198951125e-7*log(340.563369854733 - 0.922787528301594*m.x1) + 2.8027220613915e-7*log(
235.410795115064 - 0.956537846174195*m.x1) + 5.605131221075e-8*log(441.491937984964 -
0.890392969582941*m.x1) + 1.69894295932146e-6*log(100 + 0.77*m.x61*(3115.6025 + m.x1)/(
0.000302982738228342 + m.x61) - m.x1) + 4.02500989160129e-6*log(100 + 0.77*m.x62*(3115.6025 +
m.x1)/(9.1862050946334e-5 + m.x62) - m.x1) + 7.9300344942529e-7*log(100 + 0.77*m.x63*(3115.6025
+ m.x1)/(0.000718062043907347 + m.x63) - m.x1) + 4.3598930563389e-7*log(100 + 0.77*m.x64*(
3115.6025 + m.x1)/(3.02917441813244e-5 + m.x64) - m.x1) + 2.386749707546e-7*log(100 + 0.77*m.x65*
(3115.6025 + m.x1)/(0.000480318835957082 + m.x65) - m.x1) + 1.09850134760005e-6*log(
138.827733020974 - 0.98753764865031*m.x1) + 8.0486343825295e-7*log(100 + 0.77*m.x66*(3115.6025 +
m.x1)/(0.000587904132419288 + m.x66) - m.x1) + 2.492955560374e-7*log(124.199690905835 -
0.992232741209498*m.x1) + 5.906125790237e-7*log(100 + 0.77*m.x67*(3115.6025 + m.x1)/(
0.000766609702979113 + m.x67) - m.x1) + 5.2264568720645e-7*log(100 + 0.77*m.x68*(3115.6025 + m.x1
)/(0.000224734355806163 + m.x68) - m.x1) + 3.8293844830495e-7*log(100 + 0.77*m.x69*(3115.6025 +
m.x1)/(0.000132725323223145 + m.x69) - m.x1) + 1.186100029873e-7*log(344.164724732999 -
0.921631618689162*m.x1) + 2.810020334412e-7*log(100 + 0.77*m.x70*(3115.6025 + m.x1)/(
0.000201486328606083 + m.x70) - m.x1) + 3.32827806978575e-6*log(100 + 0.77*m.x71*(3115.6025 +
m.x1)/(0.00023430097385907 + m.x71) - m.x1) + 1.0308890726224e-6*log(132.041291503744 -
0.989715860253757*m.x1) + 2.4423060991458e-6*log(100 + 0.77*m.x72*(3115.6025 + m.x1)/(
0.000639808207294919 + m.x72) - m.x1) + 6.4438254924835e-7*log(100 + 0.77*m.x73*(3115.6025 + m.x1
)/(0.000358627117726995 + m.x73) - m.x1) + 1.52662345999335e-6*log(100 + 0.77*m.x74*(3115.6025 +
m.x1)/(0.000102326397497417 + m.x74) - m.x1) + 2.9792711674805e-7*log(255.436528988183 -
0.950110282364909*m.x1) + 4.396498723265e-8*log(472.644004866244 - 0.880394240001334*m.x1) +
3.22127969271e-8*log(593.778869946635 - 0.841514163008075*m.x1) + 9.9774826299e-9*log(
574.960968571768 - 0.847554054610058*m.x1) + 2.36379167854e-8*log(1008.34110667307 -
0.708454109061388*m.x1) + 2.701395294382e-7*log(100 + 0.77*m.x75*(3115.6025 + m.x1)/(
0.000340493844136466 + m.x75) - m.x1) + 8.36720652474e-8*log(168.737850847238 - 0.977937541503694
*m.x1) + 1.982296625168e-7*log(100 + 0.77*m.x76*(3115.6025 + m.x1)/(0.000260501819069085 + m.x76)
- m.x1) + 5.301671769575e-8*log(562.675437637781 - 0.851497282584097*m.x1) + 1.2560330764265e-7*
log(100 + 0.77*m.x77*(3115.6025 + m.x1)/(5.29727246513075e-5 + m.x77) - m.x1) + 2.505225352345e-8
*log(813.995783375217 - 0.770832195899439*m.x1) + 1.106694710494e-7*log(100 + 0.77*m.x78*(
3115.6025 + m.x1)/(0.000196484395045369 + m.x78) - m.x1) + 3.427837425665e-8*log(631.785089268111
- 0.829315488972643*m.x1) + 8.120978396145e-8*log(582.55897619313 - 0.845115358524353*m.x1) +
2.175511426875e-8*log(564.030854811577 - 0.851062240830922*m.x1) + 5.154058605525e-8*log(
373.579196696883 - 0.91219059661915*m.x1) + 1.030754180125e-8*log(731.294065299251 -
0.797376569925319*m.x1) + 3.124266833451e-7*log(100 + 0.77*m.x79*(3115.6025 + m.x1)/(
0.000140810678480119 + m.x79) - m.x1) + 7.4017817017615e-7*log(100 + 0.77*m.x80*(3115.6025 + m.x1
)/(4.26927216908973e-5 + m.x80) - m.x1) + 1.4582916761615e-7*log(412.601143991731 -
0.899665909244927*m.x1) + 8.017614245715e-8*log(100 + 0.77*m.x81*(3115.6025 + m.x1)/(
1.40780332089543e-5 + m.x81) - m.x1) + 4.674802317404e-8*log(390.022082245375 - 0.906913002462485
*m.x1) + 2.1515773644787e-7*log(151.027826575194 - 0.983621843102516*m.x1) + 1.5764440881433e-7*
log(342.309228866923 - 0.922227168303106*m.x1) + 4.882822188676e-8*log(131.865551273648 -
0.989772266753012*m.x1) + 1.1568020913038e-7*log(290.304903067674 - 0.938918747475753*m.x1) +
1.0236788809523e-7*log(644.934834943347 - 0.825094878135658*m.x1) + 7.500415899913e-8*log(
897.171525613417 - 0.744135676610409*m.x1) + 2.323152340102e-8*log(412.345505235737 -
0.899747960391052*m.x1) + 5.503840444488e-8*log(692.287531223752 - 0.80989631019241*m.x1) +
6.5189248727705e-7*log(627.577976915564 - 0.830665825657938*m.x1) + 2.0191487236576e-7*log(
142.147090668079 - 0.986472250337429*m.x1) + 4.7836177274892e-7*log(324.491484308978 -
0.927946044365744*m.x1) + 1.2621185309029e-7*log(473.12785885208 - 0.880238939706821*m.x1) +
2.9901178435329e-7*log(1041.09237573213 - 0.697942091222442*m.x1) + 5.835343234307e-8*log(
301.153639757546 - 0.935436680463074*m.x1) + 8.61119301911e-9*log(568.894701148243 -
0.849501115386753*m.x1) + 6.30935272554e-9*log(711.870091064714 - 0.803610989827902*m.x1) +
1.95423754626e-9*log(689.945276050275 - 0.810648092608003*m.x1) + 4.62983562196e-9*log(
1169.90196090969 - 0.656598696107834*m.x1) + 5.291082237268e-8*log(489.770582229685 -
0.874897204560054*m.x1) + 1.638841154076e-8*log(189.977742290068 - 0.971120275359239*m.x1) +
3.882621134432e-8*log(585.248078101017 - 0.844252250375002*m.x1) + 1.038410831105e-8*log(
675.57524783692 - 0.815260371681907*m.x1) + 2.460126555311e-8*log(1431.31698254577 -
0.572693569688118*m.x1) + 4.90685439103e-9*log(960.954196259431 - 0.723663658550977*m.x1) +
2.167625277556e-8*log(703.571997904625 - 0.80627438901316*m.x1) + 6.71392659671e-9*log(
755.836939027607 - 0.789499161389296*m.x1) + 1.590613733223e-8*log(698.810231655749 -
0.807802750300865*m.x1) + 4.26106090125e-9*log(677.162823229841 - 0.814750815217974*m.x1) +
1.009498609635e-8*log(448.644758163218 - 0.888097163176876*m.x1) + 2.01888451675e-9*log(
868.998821335345 - 0.753178134458634*m.x1) + 6.119338691874e-8*log(866.013581015413 -
0.754136292734579*m.x1) + 1.4497484232601e-7*log(1557.17087140146 - 0.532298850254018*m.x1) +
2.856279938201e-8*log(496.376909904565 - 0.872776803233222*m.x1) + 1.570368335541e-8*log(
2077.53138725867 - 0.365281229791454*m.x1) + 4.5216208910888e-7*log(100 + 0.77*m.x82*(3115.6025
+ m.x1)/(0.000609790965265749 + m.x82) - m.x1) + 2.08107562619314e-6*log(100 + 0.77*m.x83*(
3115.6025 + m.x1)/(0.0038585630601252 + m.x83) - m.x1) + 1.52478800997526e-6*log(100 + 0.77*m.x84
*(3115.6025 + m.x1)/(0.000746376368266585 + m.x84) - m.x1) + 4.7228244782872e-7*log(
119.10247354294 - 0.993868770633308*m.x1) + 1.11889661802836e-6*log(100 + 0.77*m.x85*(3115.6025
+ m.x1)/(0.000973252839086021 + m.x85) - m.x1) + 9.9013551795506e-7*log(100 + 0.77*m.x86*(
3115.6025 + m.x1)/(0.00028531252471569 + m.x86) - m.x1) + 7.2546462764086e-7*log(100 + 0.77*m.x87
*(3115.6025 + m.x1)/(0.000168502038447399 + m.x87) - m.x1) + 2.2470285246244e-7*log(
296.570952433757 - 0.936907563646596*m.x1) + 5.3234935394736e-7*log(100 + 0.77*m.x88*(3115.6025
+ m.x1)/(0.000255797885926617 + m.x88) - m.x1) + 6.3053162231951e-6*log(100 + 0.77*m.x89*(
3115.6025 + m.x1)/(0.000297457868225251 + m.x89) - m.x1) + 1.95298633636672e-6*log(
125.309989139262 - 0.991876374107653*m.x1) + 4.62687069591624e-6*log(100 + 0.77*m.x90*(3115.6025
+ m.x1)/(0.000812271422864163 + m.x90) - m.x1) + 1.22076210476638e-6*log(100 + 0.77*m.x91*(
3115.6025 + m.x1)/(0.000455296690277536 + m.x91) - m.x1) + 2.89213925855238e-6*log(100 + 0.77*
m.x92*(3115.6025 + m.x1)/(0.000129908943874298 + m.x92) - m.x1) + 5.6441338228754e-7*log(100 +
0.77*m.x93*(3115.6025 + m.x1)/(0.000916240190616427 + m.x93) - m.x1) + 8.329026044042e-8*log(
403.534122457219 - 0.902576107684719*m.x1) + 6.102611224188e-8*log(506.712574347542 -
0.869459414560252*m.x1) + 1.890202133772e-8*log(490.532628751666 - 0.874652614140711*m.x1) +
4.478127640312e-8*log(878.027135329109 - 0.750280359792654*m.x1) + 5.1177068795896e-7*log(100 +
0.77*m.x94*(3115.6025 + m.x1)/(0.000432275510222909 + m.x94) - m.x1) + 1.5851404821672e-7*log(
154.474683040325 - 0.982515522105171*m.x1) + 3.7553974781504e-7*log(100 + 0.77*m.x95*(3115.6025
+ m.x1)/(0.000330721270564153 + m.x95) - m.x1) + 1.004384739431e-7*log(479.999644229205 -
0.878033335693753*m.x1) + 2.3795144418842e-7*log(100 + 0.77*m.x96*(3115.6025 + m.x1)/(
6.72517637862611e-5 + m.x96) - m.x1) + 4.746069206266e-8*log(700.334669156876 - 0.807313458903414
*m.x1) + 2.0965976898232e-7*log(100 + 0.77*m.x97*(3115.6025 + m.x1)/(0.000249447658398885 + m.x97
) - m.x1) + 6.493928234762e-8*log(539.563417544517 - 0.858915436887563*m.x1) + 1.5384933516906e-7
*log(100 + 0.77*m.x98*(3115.6025 + m.x1)/(0.000252098108899402 + m.x98) - m.x1) +
4.12143673275e-8*log(481.160537164744 - 0.877660729452893*m.x1) + 9.76419898197e-8*log(
320.839421249599 - 0.929118229540001*m.x1) + 1.95273078685e-8*log(626.683196813309 -
0.830953018938292*m.x1) + 5.9188234689228e-7*log(100 + 0.77*m.x99*(3115.6025 + m.x1)/(
0.000178766838029624 + m.x99) - m.x1) + 1.40224384099222e-6*log(100 + 0.77*m.x100*(3115.6025 +
m.x1)/(5.42007392190642e-5 + m.x100) - m.x1) + 2.7626868822422e-7*log(100 + 0.77*m.x101*(
3115.6025 + m.x1)/(0.000423673249007549 + m.x101) - m.x1) + 1.5189113443902e-7*log(100 + 0.77*
m.x102*(3115.6025 + m.x1)/(1.78728311631289e-5 + m.x102) - m.x1) + 7.0766353293472e-7*log(100 +
0.77*m.x103*(3115.6025 + m.x1)/(0.000276679422907153 + m.x103) - m.x1) + 3.25702080163016e-6*log(
100 + 0.77*m.x104*(3115.6025 + m.x1)/(0.00175073928860366 + m.x104) - m.x1) + 2.38639394169944e-6
*log(100 + 0.77*m.x105*(3115.6025 + m.x1)/(0.00033865208670899 + m.x105) - m.x1) +
7.3915322319968e-7*log(141.701349131272 - 0.986615317861867*m.x1) + 1.75114710581584e-6*log(100
+ 0.77*m.x106*(3115.6025 + m.x1)/(0.000441592363940183 + m.x106) - m.x1) + 1.54962748005064e-6*
log(100 + 0.77*m.x107*(3115.6025 + m.x1)/(0.000129454369092067 + m.x107) - m.x1) +
1.13540005626584e-6*log(100 + 0.77*m.x108*(3115.6025 + m.x1)/(7.64541448002395e-5 + m.x108) -
m.x1) + 3.5167480481936e-7*log(100 + 0.77*m.x109*(3115.6025 + m.x1)/(0.000254185756096936 +
m.x109) - m.x1) + 8.3316189845184e-7*log(100 + 0.77*m.x110*(3115.6025 + m.x1)/(
0.000116062741972903 + m.x110) - m.x1) + 9.8682363299644e-6*log(100 + 0.77*m.x111*(3115.6025 +
m.x1)/(0.000134965055252807 + m.x111) - m.x1) + 3.05655260327168e-6*log(100 + 0.77*m.x112*(
3115.6025 + m.x1)/(0.00212765138815038 + m.x112) - m.x1) + 7.24135822522656e-6*log(100 + 0.77*
m.x113*(3115.6025 + m.x1)/(0.000368550538337489 + m.x113) - m.x1) + 1.91057332036472e-6*log(100
+ 0.77*m.x114*(3115.6025 + m.x1)/(0.000206580996920193 + m.x114) - m.x1) + 4.52638895374872e-6*
log(100 + 0.77*m.x115*(3115.6025 + m.x1)/(5.89433652988846e-5 + m.x115) - m.x1) +
8.8334422050376e-7*log(100 + 0.77*m.x116*(3115.6025 + m.x1)/(0.000415724111415154 + m.x116) -
m.x1) + 1.3035475857448e-7*log(100 + 0.77*m.x117*(3115.6025 + m.x1)/(0.000156618059539867 +
m.x117) - m.x1) + 9.550989618672e-8*log(100 + 0.77*m.x118*(3115.6025 + m.x1)/(
0.000111130510572367 + m.x118) - m.x1) + 2.958291179568e-8*log(819.669890654164 -
0.769011004884556*m.x1) + 7.008565519328e-8*log(100 + 0.77*m.x119*(3115.6025 + m.x1)/(
4.72661916261924e-5 + m.x119) - m.x1) + 8.0095492704224e-7*log(100 + 0.77*m.x120*(3115.6025 +
m.x1)/(0.000196135635845714 + m.x120) - m.x1) + 2.4808495467168e-7*log(216.865217149611 -
0.962490331436821*m.x1) + 5.8774450821376e-7*log(100 + 0.77*m.x121*(3115.6025 + m.x1)/(
0.000150057602514548 + m.x121) - m.x1) + 1.571928452764e-7*log(100 + 0.77*m.x122*(3115.6025 +
m.x1)/(0.00012053747251679 + m.x122) - m.x1) + 3.7240972588648e-7*log(100 + 0.77*m.x123*(
3115.6025 + m.x1)/(3.05140289931351e-5 + m.x123) - m.x1) + 7.427911766504e-8*log(100 + 0.77*
m.x124*(3115.6025 + m.x1)/(6.79713947342661e-5 + m.x124) - m.x1) + 3.2813138563808e-7*log(100 +
0.77*m.x125*(3115.6025 + m.x1)/(0.000113181463981294 + m.x125) - m.x1) + 1.0163426585128e-7*log(
100 + 0.77*m.x126*(3115.6025 + m.x1)/(0.000101129680938306 + m.x126) - m.x1) + 2.4078437066664e-7
*log(100 + 0.77*m.x127*(3115.6025 + m.x1)/(0.000114384048402347 + m.x127) - m.x1) +
6.450320691e-8*log(100 + 0.77*m.x128*(3115.6025 + m.x1)/(0.000120101258419391 + m.x128) - m.x1)
+ 1.528161629268e-7*log(100 + 0.77*m.x129*(3115.6025 + m.x1)/(0.000223759740569752 + m.x129) -
m.x1) + 3.0561526514e-8*log(1018.11416141955 - 0.705317298525872*m.x1) + 9.2633496432432e-7*log(
100 + 0.77*m.x130*(3115.6025 + m.x1)/(8.11115749466991e-5 + m.x130) - m.x1) + 2.19460422369368e-6
*log(100 + 0.77*m.x131*(3115.6025 + m.x1)/(2.45924096985208e-5 + m.x131) - m.x1) +
4.3237874350168e-7*log(100 + 0.77*m.x132*(3115.6025 + m.x1)/(0.000192232546419447 + m.x132) -
m.x1) + 2.3771965719288e-7*log(100 + 0.77*m.x133*(3115.6025 + m.x1)/(8.10940944291689e-6 + m.x133
) - m.x1) + 3.69643743012e-8*log(259.243438352797 - 0.948888396914306*m.x1) + 1.701285009261e-7*
log(126.655917760543 - 0.991444377849696*m.x1) + 1.246518363399e-7*log(231.702010264842 -
0.957728237069767*m.x1) + 3.86092191228e-8*log(116.581295018683 - 0.994677981219144*m.x1) +
9.14701041714e-8*log(202.310059251124 - 0.967162030698356*m.x1) + 8.09438490669e-8*log(
416.457222584263 - 0.898428242182928*m.x1) + 5.93069314839e-8*log(590.940569459868 -
0.842425158710115*m.x1) + 1.83695195706e-8*log(272.323136981283 - 0.944690268742151*m.x1) +
4.35197051064e-8*log(447.679121507101 - 0.888407098945677*m.x1) + 5.154613236615e-7*log(
405.179826008594 - 0.902047894104401*m.x1) + 1.596571664928e-7*log(121.977097201104 -
0.992946116457056*m.x1) + 3.782479433076e-7*log(221.559135870346 - 0.960983746845002*m.x1) +
9.97976355387e-8*log(308.580883515259 - 0.93305279363614*m.x1) + 2.364331744287e-7*log(
700.293486922057 - 0.80732667696792*m.x1) + 4.61409481821e-8*log(208.388630306009 -
0.96521102088408*m.x1) + 6.8090015433e-9*log(367.695674982814 - 0.914079002381461*m.x1) +
4.9889013462e-9*log(460.789282500999 - 0.884199193414115*m.x1) + 1.5452454078e-9*log(
446.118879639336 - 0.888907882299062*m.x1) + 3.6608815788e-9*log(805.013912804639 -
0.773715063842503*m.x1) + 4.18373935404e-8*log(318.676489558247 - 0.929812455357111*m.x1) +
1.29585667428e-8*log(147.377954877314 - 0.984793324925977*m.x1) + 3.07004769696e-8*log(
378.042793763629 - 0.910757937264581*m.x1) + 8.2108726815e-9*log(436.583029316824 -
0.89196855846764*m.x1) + 1.94525955633e-8*log(1040.3513804883 - 0.698179924913946*m.x1) +
3.8799245409e-9*log(638.457618384797 - 0.827173839286367*m.x1) + 1.71397433868e-8*log(
455.219495877816 - 0.885986901128171*m.x1) + 5.3088040713e-9*log(490.658159781153 -
0.874612323047901*m.x1) + 1.25772251769e-8*log(452.032892933289 - 0.887009689800516*m.x1) +
3.3692857875e-9*log(437.633465661542 - 0.89163140494927*m.x1) + 7.9822593405e-9*log(
293.861493987256 - 0.937777205536568*m.x1) + 1.5963627525e-9*log(570.412097376226 -
0.849014083992991*m.x1) + 4.83865435422e-8*log(568.252989540977 - 0.849707082485337*m.x1) +
1.146338170503e-7*log(1166.15178222077 - 0.657802372985397*m.x1) + 2.25850407303e-8*log(
322.704323762715 - 0.928519660719647*m.x1) + 1.24171417323e-8*log(1798.72481850053 -
0.454768437725759*m.x1) + 3.5753099129492e-7*log(120.525902665119 - 0.993411899411071*m.x1) +
1.64553607990201e-6*log(103.26736425089 - 0.998951289758276*m.x1) + 1.20567155418859e-6*log(
116.796000913132 - 0.99460906809738*m.x1) + 3.7344044494348e-7*log(102.024916542162 -
0.99935007224376*m.x1) + 8.8472746087274e-7*log(112.901715639768 - 0.995858998174585*m.x1) +
7.8291422871929e-7*log(143.446715021488 - 0.986055116138375*m.x1) + 5.7363519347899e-7*log(
172.653107945594 - 0.976680880200348*m.x1) + 1.7767573956946e-7*log(122.32539836666 -
0.992834323901505*m.x1) + 4.2093620145624e-7*log(148.358661660843 - 0.984478552170618*m.x1) +
4.98570316708715e-6*log(100 + 0.77*m.x134*(3115.6025 + m.x1)/(0.00282626697367363 + m.x134) -
m.x1) + 1.54425405766048e-6*log(102.68920156283 - 0.99913685986488*m.x1) + 3.65853242974116e-6*
log(100 + 0.77*m.x135*(3115.6025 + m.x1)/(0.00771771783949399 + m.x135) - m.x1) +
9.6527394924367e-7*log(127.411304883875 - 0.991201924865616*m.x1) + 2.28685562319267e-6*log(100
+ 0.77*m.x136*(3115.6025 + m.x1)/(0.00123431718195035 + m.x136) - m.x1) + 4.4628968445161e-7*
log(113.699934767029 - 0.995602797607516*m.x1) + 6.585879289253e-8*log(136.024511029641 -
0.988437385375817*m.x1) + 4.825421442942e-8*log(150.459788227524 - 0.983804163648115*m.x1) +
1.494609696198e-8*log(148.110144892852 - 0.984558317406392*m.x1) + 3.540919181308e-8*log(
215.360654511061 - 0.9629732436949*m.x1) + 4.0466435773564e-7*log(128.853558453294 -
0.990739011650782*m.x1) + 1.2533931118548e-7*log(105.852231884397 - 0.998121637184334*m.x1) +
2.9694461685536e-7*log(137.574819672958 - 0.987939790241869*m.x1) + 7.941812906915e-8*log(
146.598306222914 - 0.985043565017388*m.x1) + 1.8815158937453e-7*log(274.097414622119 -
0.944120787352649*m.x1) + 3.752784386269e-8*log(181.412433739765 - 0.973869441387415*m.x1) +
1.6578095962588e-7*log(149.564301239472 - 0.984091583814215*m.x1) + 5.134841365733e-8*log(
155.334716319722 - 0.982239481345993*m.x1) + 1.2165085627029e-7*log(149.05385875812 -
0.984255418090684*m.x1) + 3.258878610375e-8*log(146.764254564814 - 0.98499030137355*m.x1) +
7.720690932105e-8*log(125.329071289493 - 0.991870249401362*m.x1) + 1.544051991025e-8*log(
168.982812305731 - 0.977858917398567*m.x1) + 4.6800978523302e-7*log(168.600655860411 -
0.977981576320981*m.x1) + 1.10877413782123e-6*log(100 + 0.77*m.x137*(3115.6025 + m.x1)/(
0.000514983046565565 + m.x137) - m.x1) + 2.1844957890923e-7*log(129.432213686124 -
0.990553283454444*m.x1) + 1.2010247911743e-7*log(100 + 0.77*m.x138*(3115.6025 + m.x1)/(
0.000169816965151329 + m.x138) - m.x1) + 5.595595833396e-7*log(100 + 0.77*m.x139*(3115.6025 +
m.x1)/(0.000969631097815696 + m.x139) - m.x1) + 2.5753725010113e-6*log(119.392186516001 -
0.993775782849063*m.x1) + 1.8869555057667e-6*log(100 + 0.77*m.x140*(3115.6025 + m.x1)/(
0.00118681610349971 + m.x140) - m.x1) + 5.844589276524e-7*log(112.048952823725 -
0.996132705367991*m.x1) + 1.3846568309562e-6*log(100 + 0.77*m.x141*(3115.6025 + m.x1)/(
0.00154757330391728 + m.x141) - m.x1) + 1.2253124072577e-6*log(100 + 0.77*m.x142*(3115.6025 +
m.x1)/(0.000453676607753738 + m.x142) - m.x1) + 8.977769135187e-7*log(100 + 0.77*m.x143*(
3115.6025 + m.x1)/(0.000267935777718076 + m.x143) - m.x1) + 2.780742515298e-7*log(
227.498426217162 - 0.959077441291961*m.x1) + 6.587929193112e-7*log(100 + 0.77*m.x144*(3115.6025
+ m.x1)/(0.000406745260389136 + m.x144) - m.x1) + 7.8029543025795e-6*log(100 + 0.77*m.x145*(
3115.6025 + m.x1)/(0.000472988967941613 + m.x145) - m.x1) + 2.4168594558624e-6*log(
115.979755437715 - 0.994871054495008*m.x1) + 5.7258445613508e-6*log(100 + 0.77*m.x146*(3115.6025
+ m.x1)/(0.00129159609823484 + m.x146) - m.x1) + 1.5107173979271e-6*log(100 + 0.77*m.x147*(
3115.6025 + m.x1)/(0.000723969121833849 + m.x147) - m.x1) + 3.5790798862971e-6*log(100 + 0.77*
m.x148*(3115.6025 + m.x1)/(0.00020656874083075 + m.x148) - m.x1) + 6.984727924593e-7*log(100 +
0.77*m.x149*(3115.6025 + m.x1)/(0.0014569172593482 + m.x149) - m.x1) + 1.030733547789e-7*log(
300.294195813365 - 0.935712532066152*m.x1) + 7.55210283246e-8*log(372.950133675651 -
0.912392503961705*m.x1) + 2.33916275574e-8*log(361.393387088831 - 0.916101817517212*m.x1) +
5.54177207004e-8*log(656.239567646576 - 0.821466452268357*m.x1) + 6.333264106332e-7*log(100 +
0.77*m.x150*(3115.6025 + m.x1)/(0.000687363049653602 + m.x150) - m.x1) + 1.961642891124e-7*log(
134.549709596937 - 0.988910745322313*m.x1) + 4.647379111968e-7*log(100 + 0.77*m.x151*(3115.6025
+ m.x1)/(0.000525881239497159 + m.x151) - m.x1) + 1.242946102395e-7*log(353.903089657918 -
0.918505942379389*m.x1) + 2.944696474389e-7*log(100 + 0.77*m.x152*(3115.6025 + m.x1)/(
0.000106937303542527 + m.x152) - m.x1) + 5.87335508997e-8*log(516.195273474239 -
0.866415798076218*m.x1) + 2.594581363644e-7*log(100 + 0.77*m.x153*(3115.6025 + m.x1)/(
0.000396647737730021 + m.x153) - m.x1) + 8.03636542029e-8*log(396.605249264855 -
0.904800034900198*m.x1) + 1.903916138877e-7*log(100 + 0.77*m.x154*(3115.6025 + m.x1)/(
0.000400862229867344 + m.x154) - m.x1) + 5.10035997375e-8*log(354.727360960765 -
0.918241379970402*m.x1) + 1.208339054865e-7*log(243.796027825877 - 0.953846478225038*m.x1) +
2.41654320825e-8*log(460.605646202893 - 0.88425813427647*m.x1) + 7.324661827926e-7*log(100 + 0.77
*m.x155*(3115.6025 + m.x1)/(0.000284257877346811 + m.x155) - m.x1) + 1.7353046580099e-6*log(100
+ 0.77*m.x156*(3115.6025 + m.x1)/(8.61848162156681e-5 + m.x156) - m.x1) + 3.418879994499e-7*log(
100 + 0.77*m.x157*(3115.6025 + m.x1)/(0.000673684559054268 + m.x157) - m.x1) + 1.879683015159e-7*
log(100 + 0.77*m.x158*(3115.6025 + m.x1)/(2.84196616363883e-5 + m.x158) - m.x1) +
4.074501436232e-8*log(249.07063798668 - 0.952153511885204*m.x1) + 1.8752889355546e-7*log(
124.859074951742 - 0.992021101872995*m.x1) + 1.3740096939214e-7*log(223.191842620706 -
0.960459704785605*m.x1) + 4.255809052408e-8*log(115.459144825851 - 0.995038152387588*m.x1) +
1.0082547800804e-7*log(195.619011271494 - 0.969309624295303*m.x1) + 8.922261921434e-8*log(
397.575610857661 - 0.904488582591116*m.x1) + 6.537272227054e-8*log(563.967667251912 -
0.851082521839063*m.x1) + 2.024831619316e-8*log(261.374980189838 - 0.948204246148269*m.x1) +
4.797081089904e-8*log(427.227345312113 - 0.894971407516808*m.x1) + 5.681816460539e-7*log(
386.878396754352 - 0.9079220161255*m.x1) + 1.7598657260608e-7*log(120.492923527267 -
0.993422484566864*m.x1) + 4.1693436380136e-7*log(213.671510258415 - 0.963515400228876*m.x1) +
1.1000473212982e-7*log(295.531454568911 - 0.937241206293514*m.x1) + 2.6061507248382e-7*log(
669.104546649669 - 0.817337241625121*m.x1) + 5.086014931706e-8*log(201.317582629125 -
0.967480581162351*m.x1) + 7.50541219538e-9*log(351.372276733356 - 0.91931824527251*m.x1) +
5.49915589932e-9*log(439.694024130733 - 0.890970037374558*m.x1) + 1.70328992508e-9*log(
425.744308464212 - 0.895447410745045*m.x1) + 4.03530900568e-9*log(770.412001886567 -
0.784821073328011*m.x1) + 4.611643597144e-8*log(305.054577441609 - 0.934184615193495*m.x1) +
1.428394225608e-8*log(144.210295969209 - 0.985810033221758*m.x1) + 3.384045851456e-8*log(
361.165768375904 - 0.916174875204426*m.x1) + 9.0506638259e-9*log(416.6832011542 -
0.898355710924548*m.x1) + 2.144216696738e-8*log(1000.33799452779 - 0.711022829604292*m.x1) +
4.27675523074e-9*log(609.571269923759 - 0.836445352087194*m.x1) + 1.889276103448e-8*log(
434.3964722351 - 0.892670367213051*m.x1) + 5.85177761618e-9*log(468.1319046228 -
0.881842467188032*m.x1) + 1.386359786034e-8*log(431.366382166974 - 0.89364292069769*m.x1) +
3.7138893975e-9*log(417.681098644165 - 0.898035420550547*m.x1) + 8.7986683833e-9*log(
281.656582108581 - 0.941694557598865*m.x1) + 1.7596354465e-9*log(544.304427698094 -
0.857393737584273*m.x1) + 5.333541954492e-8*log(542.237671927793 - 0.858057094277016*m.x1) +
1.2635832772558e-7*log(1124.54527292762 - 0.671156614835294*m.x1) + 2.489499217358e-8*log(
308.855541867342 - 0.932964637861428*m.x1) + 1.368714141078e-8*log(1763.23723209658 -
0.466158718226546*m.x1) + 6.37686849336e-8*log(569.736214727577 - 0.849231018806932*m.x1) +
2.934953175558e-7*log(188.889418976798 - 0.971469589276296*m.x1) + 2.150417483922e-7*log(
498.037793762171 - 0.872243717302778*m.x1) + 6.66062709384e-8*log(155.847309756665 -
0.982074956687618*m.x1) + 1.577986470492e-7*log(417.53184038923 - 0.898083327257174*m.x1) +
1.396393934982e-7*log(921.108334776294 - 0.736452793712839*m.x1) + 1.023127024242e-7*log(
1223.70253679607 - 0.63933058315492*m.x1) + 3.16899752268e-8*log(602.595974199814 -
0.838684179320111*m.x1) + 7.50775419792e-8*log(981.05542553819 - 0.717211863343225*m.x1) +
8.89242449397e-7*log(898.744559240035 - 0.743630787547502*m.x1) + 2.754308097984e-7*log(
173.625982517328 - 0.976368621312466*m.x1) + 6.525302911128e-7*log(470.737308077767 -
0.881006223329912*m.x1) + 1.721647964586e-7*log(689.934119679135 - 0.81065167341497*m.x1) +
4.078800978786e-7*log(1379.48685669706 - 0.589329236737658*m.x1) + 7.95995507238e-8*log(
434.53489173985 - 0.892625939368116*m.x1) + 1.17464743374e-8*log(821.528017445544 -
0.768414610835129*m.x1) + 8.6065484436e-9*log(1005.40092452386 - 0.70939780523226*m.x1) +
2.6657631684e-9*log(978.126239585041 - 0.718152030117757*m.x1) + 6.3155297064e-9*log(
1509.82444407999 - 0.547495406079565*m.x1) + 7.21753206312e-8*log(713.319521316552 -
0.803145773147713*m.x1) + 2.23553292984e-8*log(254.833607664751 - 0.950303799132029*m.x1) +
5.29625911488e-8*log(843.299200702345 - 0.761426818503854*m.x1) + 1.4164896957e-8*log(
960.073904810529 - 0.723946201477714*m.x1) + 3.35584318974e-8*log(1751.11759537046 -
0.470048699931889*m.x1) + 6.6934092702e-9*log(1294.134940734 - 0.61672423207582*m.x1) +
2.95684403304e-8*log(995.115859229992 - 0.712698953338883*m.x1) + 9.1584251214e-9*log(
1059.13998011045 - 0.692149438155075*m.x1) + 2.16974620782e-8*log(989.193128395073 -
0.714599943864767*m.x1) + 5.812486425e-9*log(962.075202498917 - 0.723303854551755*m.x1) +
1.3770507159e-8*log(655.123209981677 - 0.821824764236877*m.x1) + 2.753947695e-9*log(
1191.8558240632 - 0.649552269885778*m.x1) + 8.34735149316e-8*log(1188.45417529629 -
0.650644080784922*m.x1) + 1.977592722834e-7*log(1857.44137536029 - 0.435922465924234*m.x1) +
3.89623353234e-8*log(722.540865408159 - 0.800186042536505*m.x1) + 2.14212958794e-8*log(
2241.25089526186 - 0.312732964085803*m.x1) + 1.37450609492688e-6*log(100 + 0.77*m.x159*(3115.6025
+ m.x1)/(0.000940324545070664 + m.x159) - m.x1) + 6.32616311960964e-6*log(100 + 0.77*m.x160*(
3115.6025 + m.x1)/(0.00595007430547527 + m.x160) - m.x1) + 4.63513077204876e-6*log(100 + 0.77*
m.x161*(3115.6025 + m.x1)/(0.00115094525652066 + m.x161) - m.x1) + 1.43566901937072e-6*log(
112.422531061639 - 0.996012799751689*m.x1) + 3.40128077544936e-6*log(100 + 0.77*m.x162*(3115.6025
+ m.x1)/(0.00150079877413969 + m.x162) - m.x1) + 3.00986601268356e-6*log(100 + 0.77*m.x163*(
3115.6025 + m.x1)/(0.000439964488305142 + m.x163) - m.x1) + 2.20530552287436e-6*log(100 + 0.77*
m.x164*(3115.6025 + m.x1)/(0.000259837570039233 + m.x164) - m.x1) + 6.8306354667144e-7*log(100 +
0.77*m.x165*(3115.6025 + m.x1)/(0.000863877417965782 + m.x165) - m.x1) + 1.61826355914336e-6*log(
100 + 0.77*m.x166*(3115.6025 + m.x1)/(0.000394451614430133 + m.x166) - m.x1) +
1.91672318134926e-5*log(100 + 0.77*m.x167*(3115.6025 + m.x1)/(0.000458693143304774 + m.x167) -
m.x1) + 5.93679056095872e-6*log(100 + 0.77*m.x168*(3115.6025 + m.x1)/(0.00723105029860804 +
m.x168) - m.x1) + 1.40650048404302e-5*log(100 + 0.77*m.x169*(3115.6025 + m.x1)/(
0.00125255833504483 + m.x169) - m.x1) + 3.71093683852188e-6*log(100 + 0.77*m.x170*(3115.6025 +
m.x1)/(0.000702087563679832 + m.x170) - m.x1) + 8.79167699815788e-6*log(100 + 0.77*m.x171*(
3115.6025 + m.x1)/(0.000200325317210912 + m.x171) - m.x1) + 1.71573347854404e-6*log(100 + 0.77*
m.x172*(3115.6025 + m.x1)/(0.0014128827573583 + m.x172) - m.x1) + 2.5319011341492e-7*log(100 +
0.77*m.x173*(3115.6025 + m.x1)/(0.00053228318911196 + m.x173) - m.x1) + 1.8551038498488e-7*log(
100 + 0.77*m.x174*(3115.6025 + m.x1)/(0.000377688899663851 + m.x174) - m.x1) + 5.745935840472e-8*
log(368.627863474912 - 0.913779802309533*m.x1) + 1.3612847878512e-7*log(100 + 0.77*m.x175*(
3115.6025 + m.x1)/(0.000160639196334586 + m.x175) - m.x1) + 1.55570744816496e-6*log(100 + 0.77*
m.x176*(3115.6025 + m.x1)/(0.000666587889373534 + m.x176) - m.x1) + 4.8185933905872e-7*log(
135.61051678515 - 0.988570263124019*m.x1) + 1.14158547275904e-6*log(100 + 0.77*m.x177*(3115.6025
+ m.x1)/(0.000509986775800951 + m.x177) - m.x1) + 3.053181545406e-7*log(100 + 0.77*m.x178*(
3115.6025 + m.x1)/(0.000409659463712101 + m.x178) - m.x1) + 7.2333731246292e-7*log(100 + 0.77*
m.x179*(3115.6025 + m.x1)/(0.00010370518389028 + m.x179) - m.x1) + 1.4427350740116e-7*log(100 +
0.77*m.x180*(3115.6025 + m.x1)/(0.000231008038688752 + m.x180) - m.x1) + 6.3733479048432e-7*log(
100 + 0.77*m.x181*(3115.6025 + m.x1)/(0.000384659283695111 + m.x181) - m.x1) + 1.9740584524212e-7
*log(100 + 0.77*m.x182*(3115.6025 + m.x1)/(0.000343700012896749 + m.x182) - m.x1) +
4.6767929904756e-7*log(100 + 0.77*m.x183*(3115.6025 + m.x1)/(0.000388746395185924 + m.x183) -
m.x1) + 1.25285601015e-7*log(361.799939823277 - 0.915971328234819*m.x1) + 2.968172550522e-7*log(
248.001154118093 - 0.952496778996007*m.x1) + 5.9360137281e-8*log(470.110557767541 -
0.881207388372701*m.x1) + 1.79923508157528e-6*log(100 + 0.77*m.x184*(3115.6025 + m.x1)/(
0.000275666343417648 + m.x184) - m.x1) + 4.26261456332172e-6*log(100 + 0.77*m.x185*(3115.6025 +
m.x1)/(8.35799287817407e-5 + m.x185) - m.x1) + 8.3981608575372e-7*log(100 + 0.77*m.x186*(
3115.6025 + m.x1)/(0.00065332282343346 + m.x186) - m.x1) + 4.6172665749852e-7*log(100 + 0.77*
m.x187*(3115.6025 + m.x1)/(2.75606934013358e-5 + m.x187) - m.x1) + 1.1756568780872e-6*log(100 +
0.77*m.x188*(3115.6025 + m.x1)/(0.000592771973025286 + m.x188) - m.x1) + 5.4109597701466e-6*log(
100 + 0.77*m.x189*(3115.6025 + m.x1)/(0.00375087229637144 + m.x189) - m.x1) + 3.9645683588494e-6*
log(100 + 0.77*m.x190*(3115.6025 + m.x1)/(0.000725545338711303 + m.x190) - m.x1) +
1.2279713880568e-6*log(119.646430506762 - 0.99369417937405*m.x1) + 2.9092189206884e-6*log(100 +
0.77*m.x191*(3115.6025 + m.x1)/(0.000946089789024766 + m.x191) - m.x1) + 2.5744299665114e-6*log(
100 + 0.77*m.x192*(3115.6025 + m.x1)/(0.000277349580164474 + m.x192) - m.x1) + 1.8862649033134e-6
*log(100 + 0.77*m.x193*(3115.6025 + m.x1)/(0.000163799222157577 + m.x193) - m.x1) +
5.842450315636e-7*log(100 + 0.77*m.x194*(3115.6025 + m.x1)/(0.000544580404908056 + m.x194) - m.x1
) + 1.3841500527984e-6*log(100 + 0.77*m.x195*(3115.6025 + m.x1)/(0.00024865868170142 + m.x195) -
m.x1) + 1.6394316473819e-5*log(100 + 0.77*m.x196*(3115.6025 + m.x1)/(0.000289155952586038 +
m.x196) - m.x1) + 5.0779175752768e-6*log(100 + 0.77*m.x197*(3115.6025 + m.x1)/(
0.00455838781941915 + m.x197) - m.x1) + 1.20302265242856e-5*log(100 + 0.77*m.x198*(3115.6025 +
m.x1)/(0.000789601291900758 + m.x198) - m.x1) + 3.1740771717622e-6*log(100 + 0.77*m.x199*(
3115.6025 + m.x1)/(0.000442589563933731 + m.x199) - m.x1) + 7.5197887961022e-6*log(100 + 0.77*
m.x200*(3115.6025 + m.x1)/(0.000126283243538118 + m.x200) - m.x1) + 1.4675190400826e-6*log(100 +
0.77*m.x201*(3115.6025 + m.x1)/(0.000890668337993608 + m.x201) - m.x1) + 2.165612065298e-7*log(
100 + 0.77*m.x202*(3115.6025 + m.x1)/(0.00033554644284477 + m.x202) - m.x1) + 1.586726758572e-7*
log(100 + 0.77*m.x203*(3115.6025 + m.x1)/(0.000238091619980701 + m.x203) - m.x1) +
4.91467372668e-8*log(499.876211729988 - 0.871653649099977*m.x1) + 1.164348291928e-7*log(100 +
0.77*m.x204*(3115.6025 + m.x1)/(0.00010126547675015 + m.x204) - m.x1) + 1.3306439080024e-6*log(
100 + 0.77*m.x205*(3115.6025 + m.x1)/(0.00042021089468532 + m.x205) - m.x1) + 4.121489517768e-7*
log(156.002187849084 - 0.982025246208692*m.x1) + 9.764327840576e-7*log(100 + 0.77*m.x206*(
3115.6025 + m.x1)/(0.000321490988290235 + m.x206) - m.x1) + 2.61147905939e-7*log(100 + 0.77*
m.x207*(3115.6025 + m.x1)/(0.000258245570474664 + m.x207) - m.x1) + 6.186924086498e-7*log(100 +
0.77*m.x208*(3115.6025 + m.x1)/(6.53747972334079e-5 + m.x208) - m.x1) + 1.234015199554e-7*log(100
+ 0.77*m.x209*(3115.6025 + m.x1)/(0.000145625349881665 + m.x209) - m.x1) + 5.451318352408e-7*
log(100 + 0.77*m.x210*(3115.6025 + m.x1)/(0.000242485686174776 + m.x210) - m.x1) +
1.688472248978e-7*log(100 + 0.77*m.x211*(3115.6025 + m.x1)/(0.000216665337347236 + m.x211) - m.x1
) + 4.000203321714e-7*log(100 + 0.77*m.x212*(3115.6025 + m.x1)/(0.000245062163791024 + m.x212) -
m.x1) + 1.07160585975e-7*log(490.323466912148 - 0.874719747813738*m.x1) + 2.53876827993e-7*log(
100 + 0.77*m.x213*(3115.6025 + m.x1)/(0.000479394172170398 + m.x213) - m.x1) + 5.0772531265e-8*
log(638.410997400727 - 0.82718880300015*m.x1) + 1.5389405014332e-6*log(100 + 0.77*m.x214*(
3115.6025 + m.x1)/(0.000173777535789056 + m.x214) - m.x1) + 3.6459439128718e-6*log(100 + 0.77*
m.x215*(3115.6025 + m.x1)/(5.26880209061672e-5 + m.x215) - m.x1) + 7.183202469518e-7*log(100 +
0.77*m.x216*(3115.6025 + m.x1)/(0.000411848718720832 + m.x216) - m.x1) + 3.949288567638e-7*log(
100 + 0.77*m.x217*(3115.6025 + m.x1)/(1.73740084645212e-5 + m.x217) - m.x1) + 2.3027003834052e-7*
log(310.644288882208 - 0.932390512306301*m.x1) + 1.05981765339381e-6*log(135.946817505807 -
0.988462322293743*m.x1) + 7.7652019480479e-7*log(274.907218484447 - 0.943860868488696*m.x1) +
2.4051662000988e-7*log(122.393839487144 - 0.992812356683131*m.x1) + 5.6981417358594e-7*log(
236.453447120506 - 0.956203191157888*m.x1) + 5.0424073396149e-7*log(509.329744330999 -
0.868619394055885*m.x1) + 3.6945328156719e-7*log(719.780347180692 - 0.801072072839622*m.x1) +
1.1443315505226e-7*log(327.517012158458 - 0.926974955194555*m.x1) + 2.7110655469944e-7*log(
547.744519196025 - 0.856289587906023*m.x1) + 3.21107285072415e-6*log(495.371037182199 -
0.873099653379339*m.x1) + 9.9458634278688e-7*log(129.657584748611 - 0.990480947184819*m.x1) +
2.35630035822996e-6*log(261.674211430671 - 0.948108203331243*m.x1) + 6.2169063581427e-7*log(
373.959049138861 - 0.912068677201645*m.x1) + 1.47286345758327e-6*log(846.602134340364 -
0.760366691726443*m.x1) + 2.8743562167141e-7*log(244.433016330596 - 0.953642027078038*m.x1) +
4.241676143793e-8*log(448.653645634299 - 0.888094310607884*m.x1) + 3.107842418502e-8*log(
563.774842073764 - 0.851144412012199*m.x1) + 9.62612585838e-9*log(545.832820860844 -
0.856903176557072*m.x1) + 2.280550820748e-8*log(964.57779102352 - 0.722500610708998*m.x1) +
2.6062657347084e-7*log(386.804719169038 - 0.90794566406689*m.x1) + 8.072555581188e-8*log(
163.697577366872 - 0.979555293922485*m.x1) + 1.9124901050016e-7*log(461.599091032222 -
0.88393927305161*m.x1) + 5.114973546615e-8*log(534.130728924762 - 0.860659140912629*m.x1) +
1.2118019067993e-7*log(1218.02524091695 - 0.641152797599519*m.x1) + 2.417003911689e-8*log(
775.353772232083 - 0.783234936988245*m.x1) + 1.0677224872428e-7*log(556.971636607364 -
0.853328004260054*m.x1) + 3.307126226673e-8*log(600.07781259564 - 0.839492421579569*m.x1) +
7.834998369249e-8*log(553.074587453001 - 0.854578821446895*m.x1) + 2.098900852875e-8*log(
535.421327803972 - 0.860244903576765*m.x1) + 4.972558576005e-8*log(355.163421699796 -
0.918101419645222*m.x1) + 9.94456200525e-9*log(695.545998372881 - 0.808850455610791*m.x1) +
3.0142458643662e-7*log(692.989141812062 - 0.809671117605002*m.x1) + 7.1411281663263e-7*log(
1347.27032802532 - 0.599669621517727*m.x1) + 1.4069379756063e-7*log(391.919406907936 -
0.906304027260237*m.x1) + 7.735274184483e-8*log(1939.03334836773 - 0.409734281453512*m.x1) +
2.2272443732888e-6*log(100 + 0.77*m.x218*(3115.6025 + m.x1)/(0.000274060001623156 + m.x218) -
m.x1) + 1.02508903122814e-5*log(100 + 0.77*m.x219*(3115.6025 + m.x1)/(0.00173416442478794 +
m.x219) - m.x1) + 7.5107480204026e-6*log(100 + 0.77*m.x220*(3115.6025 + m.x1)/(
0.000335445948448057 + m.x220) - m.x1) + 2.3263525400872e-6*log(100 + 0.77*m.x221*(3115.6025 +
m.x1)/(0.00279966370191667 + m.x221) - m.x1) + 5.5114222461836e-6*log(100 + 0.77*m.x222*(
3115.6025 + m.x1)/(0.000437411653915557 + m.x222) - m.x1) + 4.8771752747006e-6*log(100 + 0.77*
m.x223*(3115.6025 + m.x1)/(0.000128228779107299 + m.x223) - m.x1) + 3.5734685610586e-6*log(100 +
0.77*m.x224*(3115.6025 + m.x1)/(7.57303265558781e-5 + m.x224) - m.x1) + 1.1068335357244e-6*log(
100 + 0.77*m.x225*(3115.6025 + m.x1)/(0.000251779290257831 + m.x225) - m.x1) + 2.6222280278736e-6
*log(100 + 0.77*m.x226*(3115.6025 + m.x1)/(0.00011496393522606 + m.x226) - m.x1) +
3.1058508482201e-5*log(100 + 0.77*m.x227*(3115.6025 + m.x1)/(0.000133687293666454 + m.x227) -
m.x1) + 9.6199525204672e-6*log(100 + 0.77*m.x228*(3115.6025 + m.x1)/(0.00210750816509285 + m.x228
) - m.x1) + 2.27908795797624e-5*log(100 + 0.77*m.x229*(3115.6025 + m.x1)/(0.000365061340932758 +
m.x229) - m.x1) + 6.0131877361138e-6*log(100 + 0.77*m.x230*(3115.6025 + m.x1)/(
0.000204625222058021 + m.x230) - m.x1) + 1.42459994889738e-5*log(100 + 0.77*m.x231*(3115.6025 +
m.x1)/(5.83853277549572e-5 + m.x231) - m.x1) + 2.7801679092254e-6*log(100 + 0.77*m.x232*(
3115.6025 + m.x1)/(0.000411788305223751 + m.x232) - m.x1) + 4.102682829542e-7*log(100 + 0.77*
m.x233*(3115.6025 + m.x1)/(0.000155135301356022 + m.x233) - m.x1) + 3.006003121188e-7*log(100 +
0.77*m.x234*(3115.6025 + m.x1)/(0.000110078398992706 + m.x234) - m.x1) + 9.31069226772e-8*log(100
+ 0.77*m.x235*(3115.6025 + m.x1)/(0.000115570007588387 + m.x235) - m.x1) + 2.205820618312e-7*
log(100 + 0.77*m.x236*(3115.6025 + m.x1)/(4.68187059871874e-5 + m.x236) - m.x1) +
2.5208623469896e-6*log(100 + 0.77*m.x237*(3115.6025 + m.x1)/(0.000194278750885906 + m.x237) -
m.x1) + 7.808030139672e-7*log(100 + 0.77*m.x238*(3115.6025 + m.x1)/(0.000967158090120206 + m.x238
) - m.x1) + 1.8498206957504e-6*log(100 + 0.77*m.x239*(3115.6025 + m.x1)/(0.000148636954481809 +
m.x239) - m.x1) + 4.94736359681e-7*log(100 + 0.77*m.x240*(3115.6025 + m.x1)/(0.000119396301924079
+ m.x240) - m.x1) + 1.1720929904342e-6*log(100 + 0.77*m.x241*(3115.6025 + m.x1)/(
3.02251419621976e-5 + m.x241) - m.x1) + 2.337802347766e-7*log(100 + 0.77*m.x242*(3115.6025 + m.x1
)/(6.73278856644583e-5 + m.x242) - m.x1) + 1.0327348356232e-6*log(100 + 0.77*m.x243*(3115.6025 +
m.x1)/(0.000112109935305285 + m.x243) - m.x1) + 3.198756700262e-7*log(100 + 0.77*m.x244*(
3115.6025 + m.x1)/(0.000100172250725714 + m.x244) - m.x1) + 7.578257318406e-7*log(100 + 0.77*
m.x245*(3115.6025 + m.x1)/(0.0001133011344372 + m.x245) - m.x1) + 2.03012304525e-7*log(100 + 0.77
*m.x246*(3115.6025 + m.x1)/(0.000118964217618766 + m.x246) - m.x1) + 4.80961534947e-7*log(100 +
0.77*m.x247*(3115.6025 + m.x1)/(0.000221641328507187 + m.x247) - m.x1) + 9.6186937435e-8*log(100
+ 0.77*m.x248*(3115.6025 + m.x1)/(7.98854666975635e-5 + m.x248) - m.x1) + 2.9154735846228e-6*
log(100 + 0.77*m.x249*(3115.6025 + m.x1)/(8.03436631751567e-5 + m.x249) - m.x1) +
6.9071241929722e-6*log(100 + 0.77*m.x250*(3115.6025 + m.x1)/(2.43595847174932e-5 + m.x250) - m.x1
) + 1.3608347452922e-6*log(100 + 0.77*m.x251*(3115.6025 + m.x1)/(0.000190412613378249 + m.x251)
- m.x1) + 7.481800944402e-7*log(100 + 0.77*m.x252*(3115.6025 + m.x1)/(8.03263481518263e-6 +
m.x252) - m.x1) + 3.48578463039596e-6*log(100 + 0.77*m.x253*(3115.6025 + m.x1)/(
0.000115610604617764 + m.x253) - m.x1) + 1.60433207630746e-5*log(100 + 0.77*m.x254*(3115.6025 +
m.x1)/(0.000731547093588758 + m.x254) - m.x1) + 1.17548169955132e-5*log(100 + 0.77*m.x255*(
3115.6025 + m.x1)/(0.000141505906323334 + m.x255) - m.x1) + 3.64089546094324e-6*log(100 + 0.77*
m.x256*(3115.6025 + m.x1)/(0.00118102171563895 + m.x256) - m.x1) + 8.62574003453462e-6*log(100 +
0.77*m.x257*(3115.6025 + m.x1)/(0.000184519541255673 + m.x257) - m.x1) + 7.63310161030727e-6*log(
100 + 0.77*m.x258*(3115.6025 + m.x1)/(5.40925585426253e-5 + m.x258) - m.x1) + 5.59271444872837e-6
*log(100 + 0.77*m.x259*(3115.6025 + m.x1)/(3.19463941807333e-5 + m.x259) - m.x1) +
1.73226762788398e-6*log(100 + 0.77*m.x260*(3115.6025 + m.x1)/(0.000106211617180695 + m.x260) -
m.x1) + 4.10396015209512e-6*log(100 + 0.77*m.x261*(3115.6025 + m.x1)/(4.84968619353583e-5 +
m.x261) - m.x1) + 4.86086182588105e-5*log(100 + 0.77*m.x262*(3115.6025 + m.x1)/(
5.63952009010913e-5 + m.x262) - m.x1) + 1.50558614237142e-5*log(100 + 0.77*m.x263*(3115.6025 +
m.x1)/(0.000889039961177141 + m.x263) - m.x1) + 3.56692326648611e-5*log(100 + 0.77*m.x264*(
3115.6025 + m.x1)/(0.000153998985980601 + m.x264) - m.x1) + 9.41103618516721e-6*log(100 + 0.77*
m.x265*(3115.6025 + m.x1)/(8.63199500184675e-5 + m.x265) - m.x1) + 2.22959306391542e-5*log(100 +
0.77*m.x266*(3115.6025 + m.x1)/(2.46295081463161e-5 + m.x266) - m.x1) + 4.35114650377943e-6*log(
100 + 0.77*m.x267*(3115.6025 + m.x1)/(0.000173710481863398 + m.x267) - m.x1) + 6.4209697517339e-7
*log(100 + 0.77*m.x268*(3115.6025 + m.x1)/(6.54429171754531e-5 + m.x268) - m.x1) +
4.7045935346946e-7*log(100 + 0.77*m.x269*(3115.6025 + m.x1)/(4.64359271237298e-5 + m.x269) - m.x1
) + 1.4571848690874e-7*log(100 + 0.77*m.x270*(3115.6025 + m.x1)/(4.87525300074436e-5 + m.x270) -
m.x1) + 3.4522550380804e-7*log(100 + 0.77*m.x271*(3115.6025 + m.x1)/(1.97501965793709e-5 + m.x271
) - m.x1) + 3.94531616281732e-6*log(100 + 0.77*m.x272*(3115.6025 + m.x1)/(8.19553518254294e-5 +
m.x272) - m.x1) + 1.22200831578924e-6*log(100 + 0.77*m.x273*(3115.6025 + m.x1)/(
0.000407989968975871 + m.x273) - m.x1) + 2.89509163321568e-6*log(100 + 0.77*m.x274*(3115.6025 +
m.x1)/(6.27016276523772e-5 + m.x274) - m.x1) + 7.7429509727645e-7*log(100 + 0.77*m.x275*(
3115.6025 + m.x1)/(5.03666298358569e-5 + m.x275) - m.x1) + 1.83440298713939e-6*log(100 + 0.77*
m.x276*(3115.6025 + m.x1)/(1.27502988988239e-5 + m.x276) - m.x1) + 3.6588151666147e-7*log(100 +
0.77*m.x277*(3115.6025 + m.x1)/(2.8401873761961e-5 + m.x277) - m.x1) + 1.61629826545444e-6*log(
100 + 0.77*m.x278*(3115.6025 + m.x1)/(4.72929188044174e-5 + m.x278) - m.x1) + 5.0062656239579e-7*
log(100 + 0.77*m.x279*(3115.6025 + m.x1)/(4.22570765661979e-5 + m.x279) - m.x1) +
1.18604735082027e-6*log(100 + 0.77*m.x280*(3115.6025 + m.x1)/(4.77954191731146e-5 + m.x280) -
m.x1) + 3.1772767253625e-7*log(100 + 0.77*m.x281*(3115.6025 + m.x1)/(5.01843576053702e-5 + m.x281
) - m.x1) + 7.5273658626615e-7*log(100 + 0.77*m.x282*(3115.6025 + m.x1)/(9.34980947428967e-5 +
m.x282) - m.x1) + 1.5053891354575e-7*log(100 + 0.77*m.x283*(3115.6025 + m.x1)/(
3.36992156840782e-5 + m.x283) - m.x1) + 4.56290882737626e-6*log(100 + 0.77*m.x284*(3115.6025 +
m.x1)/(3.38925031813217e-5 + m.x284) - m.x1) + 1.08101058154415e-5*log(100 + 0.77*m.x285*(
3115.6025 + m.x1)/(1.02759479703261e-5 + m.x285) - m.x1) + 2.12979630638549e-6*log(100 + 0.77*
m.x286*(3115.6025 + m.x1)/(8.03244443885601e-5 + m.x286) - m.x1) + 1.17095129086209e-6*log(100 +
0.77*m.x287*(3115.6025 + m.x1)/(3.38851989402637e-6 + m.x287) - m.x1) + 1.8183738183868e-7*log(
193.364173093206 - 0.970033348896977*m.x1) + 8.3690639350379e-7*log(115.254735565526 -
0.995103760648052*m.x1) + 6.1319483935361e-7*log(176.825857180065 - 0.975341572880345*m.x1) +
1.8992880179492e-7*log(109.471992936955 - 0.996959819830368*m.x1) + 4.4996525907646e-7*log(
159.35999423299 - 0.980947507189062*m.x1) + 3.9818386942891e-7*log(291.087251996003 -
0.938667640690363*m.x1) + 2.9174623809521e-7*log(406.623520914578 - 0.901584518270679*m.x1) +
9.036444975734e-8*log(201.277457067861 - 0.967493460071411*m.x1) + 2.1408476092296e-7*log(
311.19442233872 - 0.932213938607791*m.x1) + 2.53568846505985e-6*log(283.883103312431 -
0.940979921760741*m.x1) + 7.8539517293792e-7*log(112.566524343699 - 0.995966582918168*m.x1) +
1.86070011997164e-6*log(170.777277381972 - 0.97728295654469*m.x1) + 4.9093055416493e-7*log(
223.415319381483 - 0.960387976520919*m.x1) + 1.16307634663593e-6*log(483.165743746284 -
0.877017127908235*m.x1) + 2.2697933812219e-7*log(162.956712613236 - 0.979793085731175*m.x1) +
3.349525149487e-8*log(260.157811542521 - 0.948594914934585*m.x1) + 2.454170471418e-8*log(
319.709287061812 - 0.929480963293035*m.x1) + 7.60146450642e-9*log(310.18391133055 -
0.932538277482269*m.x1) + 1.800882969332e-8*log(559.688690006188 - 0.852455924654641*m.x1) +
2.0580903229556e-7*log(229.632746466765 - 0.958392398752163*m.x1) + 6.374656391292e-8*log(
127.215287247597 - 0.991264839706735*m.x1) + 1.5102363989344e-7*log(266.673373771106 -
0.946503646157972*m.x1) + 4.039142063785e-8*log(304.020979998872 - 0.934516364010212*m.x1) +
9.569238257287e-8*log(744.280921668034 - 0.793208240888228*m.x1) + 1.908635905751e-8*log(
439.474870738743 - 0.891040377988289*m.x1) + 8.431486050452e-8*log(316.086561642226 -
0.930643732105676*m.x1) + 2.611538951407e-8*log(339.269581293755 - 0.923202789414325*m.x1) +
6.187064545791e-8*log(314.017393171395 - 0.931307863191343*m.x1) + 1.657439407125e-8*log(
304.698766948572 - 0.934298817981892*m.x1) + 3.926681208795e-8*log(214.392130109138 -
0.96328410632963*m.x1) + 7.85292403475e-9*log(392.620228975356 - 0.9060790877606*m.x1) +
2.3802600639858e-7*log(391.153960662165 - 0.906549708872629*m.x1) + 5.6391359401217e-7*log(
850.86177688669 - 0.758999494676651*m.x1) + 1.1110169596417e-7*log(232.119855144384 -
0.957594123401691*m.x1) + 6.108315331197e-8*log(1491.69160837479 - 0.55331541543737*m.x1) +
1.75878843630284e-6*log(100 + 0.77*m.x288*(3115.6025 + m.x1)/(0.00320020232823025 + m.x288) -
m.x1) + 8.09482226524927e-6*log(105.908941656468 - 0.998103435320627*m.x1) + 5.93101364389693e-6*
log(100 + 0.77*m.x289*(3115.6025 + m.x1)/(0.00391700685565556 + m.x289) - m.x1) +
1.83705119893396e-6*log(103.663543949195 - 0.998824129859571*m.x1) + 4.35220572579398e-6*log(100
+ 0.77*m.x290*(3115.6025 + m.x1)/(0.00510766177101759 + m.x290) - m.x1) + 3.85135981387583e-6*
log(100 + 0.77*m.x291*(3115.6025 + m.x1)/(0.00149732917979604 + m.x291) - m.x1) +
2.82186151553773e-6*log(100 + 0.77*m.x292*(3115.6025 + m.x1)/(0.000884304042641739 + m.x292) -
m.x1) + 8.7403342304542e-7*log(140.116885035848 - 0.987123875707556*m.x1) + 2.07069524479848e-6*
log(100 + 0.77*m.x293*(3115.6025 + m.x1)/(0.00134243541922925 + m.x293) - m.x1) +
2.45259775812781e-5*log(100 + 0.77*m.x294*(3115.6025 + m.x1)/(0.00156106832778344 + m.x294) -
m.x1) + 7.59658951379296e-6*log(104.864298820897 - 0.998438729324137*m.x1) + 1.79972776848313e-5*
log(100 + 0.77*m.x295*(3115.6025 + m.x1)/(0.00426282619236897 + m.x295) - m.x1) +
4.74843496404409e-6*log(100 + 0.77*m.x296*(3115.6025 + m.x1)/(0.00238941147254734 + m.x296) -
m.x1) + 1.12496407961671e-5*log(100 + 0.77*m.x297*(3115.6025 + m.x1)/(0.000681766258152549 +
m.x297) - m.x1) + 2.19541565728847e-6*log(100 + 0.77*m.x298*(3115.6025 + m.x1)/(
0.00480845758341298 + m.x298) - m.x1) + 3.2397662353331e-7*log(164.437067242418 -
0.979317943401824*m.x1) + 2.3737509868434e-7*log(189.824673314027 - 0.971169405174753*m.x1) +
7.352375918346e-8*log(185.708911713758 - 0.972490421446973*m.x1) + 1.7418707361316e-7*log(
301.02255994106 - 0.935478752523449*m.x1) + 1.99064979064228e-6*log(100 + 0.77*m.x299*(3115.6025
+ m.x1)/(0.00226859558939085 + m.x299) - m.x1) + 6.1657684646796e-7*log(110.574382290427 -
0.996605991203812*m.x1) + 1.46074821781472e-6*log(100 + 0.77*m.x300*(3115.6025 + m.x1)/(
0.00173563571837019 + m.x300) - m.x1) + 3.9067854379205e-7*log(183.057292642768 -
0.973341498909836*m.x1) + 9.2556686754731e-7*log(100 + 0.77*m.x301*(3115.6025 + m.x1)/(
0.00035293938957031 + m.x301) - m.x1) + 1.8460927704763e-7*log(243.449327510641 -
0.953957756963335*m.x1) + 8.1551989015876e-7*log(100 + 0.77*m.x302*(3115.6025 + m.x1)/(
0.00130910922373505 + m.x302) - m.x1) + 2.5259627378291e-7*log(198.343400264271 -
0.968435190219461*m.x1) + 5.9843237225283e-7*log(100 + 0.77*m.x303*(3115.6025 + m.x1)/(
0.00132301887203383 + m.x303) - m.x1) + 1.6031276042625e-7*log(183.348480295548 -
0.973248037804711*m.x1) + 3.7980097564335e-7*log(145.468442132924 - 0.985406212078427*m.x1) +
7.595595495175e-8*log(222.047016072243 - 0.960827154275219*m.x1) + 2.30226251258154e-6*log(100 +
0.77*m.x304*(3115.6025 + m.x1)/(0.000938174036447789 + m.x304) - m.x1) + 5.45434991525821e-6*log(
100 + 0.77*m.x305*(3115.6025 + m.x1)/(0.000284447198664313 + m.x305) - m.x1) +
1.07461060063421e-6*log(100 + 0.77*m.x306*(3115.6025 + m.x1)/(0.00222345064966968 + m.x306) -
m.x1) + 5.9081550015561e-7*log(100 + 0.77*m.x307*(3115.6025 + m.x1)/(9.37971848687272e-5 + m.x307
) - m.x1) + 2.75262012741516e-6*log(100 + 0.77*m.x308*(3115.6025 + m.x1)/(0.000497933292970139 +
m.x308) - m.x1) + 1.26689317687422e-5*log(100 + 0.77*m.x309*(3115.6025 + m.x1)/(
0.00315076332727193 + m.x309) - m.x1) + 9.28242828710157e-6*log(100 + 0.77*m.x310*(3115.6025 +
m.x1)/(0.000609464003265626 + m.x310) - m.x1) + 2.87510652270804e-6*log(100 + 0.77*m.x311*(
3115.6025 + m.x1)/(0.00508664437731854 + m.x311) - m.x1) + 6.81148956417702e-6*log(100 + 0.77*
m.x312*(3115.6025 + m.x1)/(0.000794723140654341 + m.x312) - m.x1) + 6.02763261502767e-6*log(100
+ 0.77*m.x313*(3115.6025 + m.x1)/(0.000232975909860184 + m.x313) - m.x1) + 4.41639974662077e-6*
log(100 + 0.77*m.x314*(3115.6025 + m.x1)/(0.000137592682829811 + m.x314) - m.x1) +
1.36792006511358e-6*log(100 + 0.77*m.x315*(3115.6025 + m.x1)/(0.000457451982621508 + m.x315) -
m.x1) + 3.24077489419752e-6*log(100 + 0.77*m.x316*(3115.6025 + m.x1)/(0.000208875321100784 +
m.x316) - m.x1) + 3.83847756451445e-5*log(100 + 0.77*m.x317*(3115.6025 + m.x1)/(
0.000242893359006603 + m.x317) - m.x1) + 1.1889164588399e-5*log(100 + 0.77*m.x318*(3115.6025 +
m.x1)/(0.00382908295406458 + m.x318) - m.x1) + 2.81669288763827e-5*log(100 + 0.77*m.x319*(
3115.6025 + m.x1)/(0.000663271526491096 + m.x319) - m.x1) + 7.43161450573641e-6*log(100 + 0.77*
m.x320*(3115.6025 + m.x1)/(0.000371778844197042 + m.x320) - m.x1) + 1.76064312469634e-5*log(100
+ 0.77*m.x321*(3115.6025 + m.x1)/(0.000106078954747077 + m.x321) - m.x1) + 3.43597058154303e-6*
log(100 + 0.77*m.x322*(3115.6025 + m.x1)/(0.000748168669679125 + m.x322) - m.x1) +
5.0704482491619e-7*log(100 + 0.77*m.x323*(3115.6025 + m.x1)/(0.000281861749261525 + m.x323) -
m.x1) + 3.7150771570866e-7*log(100 + 0.77*m.x324*(3115.6025 + m.x1)/(0.000199998903052942 +
m.x324) - m.x1) + 1.1506954173354e-7*log(100 + 0.77*m.x325*(3115.6025 + m.x1)/(
0.000209976479990677 + m.x325) - m.x1) + 2.7261428086884e-7*log(100 + 0.77*m.x326*(3115.6025 +
m.x1)/(8.50638265588891e-5 + m.x326) - m.x1) + 3.11549846886372e-6*log(100 + 0.77*m.x327*(
3115.6025 + m.x1)/(0.000352980579470926 + m.x327) - m.x1) + 9.6498350947404e-7*log(100 + 0.77*
m.x328*(3115.6025 + m.x1)/(0.00175720721660966 + m.x328) - m.x1) + 2.28616749032928e-6*log(100 +
0.77*m.x329*(3115.6025 + m.x1)/(0.000270055052776176 + m.x329) - m.x1) + 6.1143773793045e-7*log(
100 + 0.77*m.x330*(3115.6025 + m.x1)/(0.000216928385876834 + m.x330) - m.x1) +
1.44857331120219e-6*log(100 + 0.77*m.x331*(3115.6025 + m.x1)/(5.49153629810655e-5 + m.x331) -
m.x1) + 2.8892571796587e-7*log(100 + 0.77*m.x332*(3115.6025 + m.x1)/(0.000122326481861876 +
m.x332) - m.x1) + 1.27634251944324e-6*log(100 + 0.77*m.x333*(3115.6025 + m.x1)/(
0.000203689954501239 + m.x333) - m.x1) + 3.9532986058659e-7*log(100 + 0.77*m.x334*(3115.6025 +
m.x1)/(0.000182000650852623 + m.x334) - m.x1) + 9.3658620830067e-7*log(100 + 0.77*m.x335*(
3115.6025 + m.x1)/(0.000205854216716901 + m.x335) - m.x1) + 2.5090006388625e-7*log(100 + 0.77*
m.x336*(3115.6025 + m.x1)/(0.000216143341872927 + m.x336) - m.x1) + 5.9441362496415e-7*log(100 +
0.77*m.x337*(3115.6025 + m.x1)/(0.000402695015355116 + m.x337) - m.x1) + 1.1887608883575e-7*log(
100 + 0.77*m.x338*(3115.6025 + m.x1)/(0.000145142061072707 + m.x338) - m.x1) +
3.60319297075146e-6*log(100 + 0.77*m.x339*(3115.6025 + m.x1)/(0.000145974547679888 + m.x339) -
m.x1) + 8.53641805279629e-6*log(100 + 0.77*m.x340*(3115.6025 + m.x1)/(4.42583673718464e-5 +
m.x340) - m.x1) + 1.68183660262029e-6*log(100 + 0.77*m.x341*(3115.6025 + m.x1)/(
0.000345956283445014 + m.x341) - m.x1) + 9.2466530012889e-7*log(100 + 0.77*m.x342*(3115.6025 +
m.x1)/(1.45943088413546e-5 + m.x342) - m.x1) + 1.9937603495584e-7*log(337.116032640652 -
0.923894003602625*m.x1) + 9.1762802938952e-7*log(140.874328942254 - 0.986880762567672*m.x1) +
6.7233895742168e-7*log(297.292766602765 - 0.936675886412735*m.x1) + 2.0824789184096e-7*log(
125.483573075406 - 0.991820659703731*m.x1) + 4.9336549127248e-7*log(254.25882364464 -
0.950488284803777*m.x1) + 4.3658966196808e-7*log(555.572368624705 - 0.853777120597154*m.x1) +
3.1988586492248e-7*log(781.652769600945 - 0.781213177996569*m.x1) + 9.908031842192e-8*log(
355.861242670046 - 0.917877443393358*m.x1) + 2.3473375136448e-7*log(597.242366314084 -
0.840402501181045*m.x1) + 2.7802617203068e-6*log(540.385754980185 - 0.858651495182654*m.x1) +
8.6114842762496e-7*log(133.73528945573 - 0.98917214585117*m.x1) + 2.04016912480032e-6*log(
282.505104930273 - 0.941422211296122*m.x1) + 5.3828198766584e-7*log(407.270363915862 -
0.901376904173154*m.x1) + 1.27525785951384e-6*log(915.333422034018 - 0.738306339774083*m.x1) +
2.4887204156872e-7*log(263.204514879424 - 0.9476170291687*m.x1) + 3.672594911656e-8*log(
489.383061907709 - 0.875021585100247*m.x1) + 2.690881119984e-8*log(614.577491492312 -
0.834838529147312*m.x1) + 8.33464405296e-9*log(595.172961948805 - 0.841066707980622*m.x1) +
1.974582466016e-8*log(1038.00461477439 - 0.698933155056079*m.x1) + 2.2565980879328e-7*log(
421.441706101908 - 0.896828396401047*m.x1) + 6.989507342496e-8*log(172.31269511839 -
0.976790140873751*m.x1) + 1.6559023343872e-7*log(503.543143364888 - 0.870476691630307*m.x1) +
4.42872703708e-8*log(582.495725195754 - 0.845135659893791*m.x1) + 1.0492214318056e-7*log(
1296.19682040876 - 0.616062440440088*m.x1) + 2.092728432488e-8*log(840.465204251466 -
0.762336432760127*m.x1) + 9.244723172576e-8*log(607.224377039528 - 0.837198623046577*m.x1) +
2.863428168616e-8*log(653.71932095886 - 0.822275363767085*m.x1) + 6.783821812008e-8*log(
603.009773354627 - 0.838551364188908*m.x1) + 1.817303427e-8*log(583.894696018611 -
0.844686638934649*m.x1) + 4.30541906196e-8*log(386.497448150095 - 0.908044287372958*m.x1) +
8.610357458e-9*log(755.890960453142 - 0.78948182239129*m.x1) + 2.6098418758704e-7*log(
753.168856574781 - 0.790355523024911*m.x1) + 6.1830441735896e-7*log(1425.13050916184 -
0.574679212395729*m.x1) + 1.2181772193496e-7*log(427.078436558279 - 0.895019202045743*m.x1) +
6.697477046136e-8*log(1993.11512355808 - 0.392375913307915*m.x1) + 3.1203660011952e-7*log(
825.500626441516 - 0.767139541568119*m.x1) + 1.43614818364956e-6*log(253.821676807616 -
0.950628593728624*m.x1) + 1.05225466264404e-6*log(727.460064530513 - 0.798607150774044*m.x1) +
3.2592163931088e-7*log(197.663258929557 - 0.968653491923454*m.x1) + 7.7214942381144e-7*log(
612.435114895593 - 0.835526157494227*m.x1) + 6.8329151895324e-7*log(1253.77546341785 -
0.629678220049622*m.x1) + 5.0064240538644e-7*log(1565.13156178004 - 0.529743745622224*m.x1) +
1.5506721109176e-7*log(869.119411862432 - 0.753139429095197*m.x1) + 3.6737375043744e-7*log(
1319.24310584124 - 0.608665384675599*m.x1) + 4.3512923448354e-6*log(1228.83305190904 -
0.637683866311882*m.x1) + 1.34775389436288e-6*log(228.026463886737 - 0.95890795957227*m.x1) +
3.19299878499696e-6*log(689.056338131733 - 0.810933410750655*m.x1) + 8.4244669312452e-7*log(
981.250272507588 - 0.717149324245443*m.x1) + 1.99586237556852e-6*log(1708.54761640287 -
0.483712182024867*m.x1) + 3.8950110394716e-7*log(637.181320846455 - 0.827583486389405*m.x1) +
5.747852444268e-8*log(1140.45340526994 - 0.666050657851911*m.x1) + 4.211405830152e-8*log(
1345.26734846551 - 0.600312508265894*m.x1) + 1.304426579688e-8*log(1316.09032651707 -
0.609677317142649*m.x1) + 3.090351352848e-8*log(1820.87972878856 - 0.447657482368638*m.x1) +
3.5317243386384e-7*log(1010.3724312703 - 0.707802124542429*m.x1) + 1.0939038426288e-7*log(
362.464060898813 - 0.915758168476623*m.x1) + 2.5915959993216e-7*log(1165.73400069356 -
0.657936466319577*m.x1) + 6.93124892274e-8*log(1296.55636528799 - 0.615947039043656*m.x1) +
1.6421005083468e-7*log(2012.48192595162 - 0.386159843577085*m.x1) + 3.275257556364e-8*log(
1631.26886676703 - 0.508515971865144*m.x1) + 1.4468599440528e-7*log(1334.31202758498 -
0.603828785095344*m.x1) + 4.481453303148e-8*log(1401.59969118774 - 0.582231786247527*m.x1) +
1.0617127050924e-7*log(1327.97752237393 - 0.605861940868924*m.x1) + 2.8441993185e-8*log(
1298.7307493202 - 0.615249137423596*m.x1) + 6.73826383638e-8*log(937.204525824623 -
0.731286476428035*m.x1) + 1.3475775399e-8*log(1534.49263325395 - 0.539577775645657*m.x1) +
4.0845740862312e-7*log(1531.19227562112 - 0.540637075615032*m.x1) + 9.6768705563988e-7*log(
2090.8347738355 - 0.361011305570753*m.x1) + 1.9065274216788e-7*log(1021.75459043966 -
0.704148847473432*m.x1) + 1.0481991816708e-7*log(2347.09064275551 - 0.278762087668273*m.x1) +
6.80740688226184e-6*log(100 + 0.77*m.x343*(3115.6025 + m.x1)/(0.000162399166573692 + m.x343) -
m.x1) + 3.133108432017e-5*log(100 + 0.77*m.x344*(3115.6025 + m.x1)/(0.00102761021535188 + m.x344)
- m.x1) + 2.29560430719712e-5*log(100 + 0.77*m.x345*(3115.6025 + m.x1)/(0.000198774509727227 +
m.x345) - m.x1) + 7.11032362765496e-6*log(100 + 0.77*m.x346*(3115.6025 + m.x1)/(
0.00165899091142481 + m.x346) - m.x1) + 1.68452524472295e-5*log(100 + 0.77*m.x347*(3115.6025 +
m.x1)/(0.000259196116269397 + m.x347) - m.x1) + 1.49067237206526e-5*log(100 + 0.77*m.x348*(
3115.6025 + m.x1)/(7.59842616013031e-5 + m.x348) - m.x1) + 1.0922041051192e-5*log(100 + 0.77*
m.x349*(3115.6025 + m.x1)/(4.48753624906532e-5 + m.x349) - m.x1) + 3.38295443417492e-6*log(100 +
0.77*m.x350*(3115.6025 + m.x1)/(0.000149196331665396 + m.x350) - m.x1) + 8.01464506449648e-6*log(
100 + 0.77*m.x351*(3115.6025 + m.x1)/(6.81239405829684e-5 + m.x351) - m.x1) + 9.49280226858643e-5
*log(100 + 0.77*m.x352*(3115.6025 + m.x1)/(7.92188022489237e-5 + m.x352) - m.x1) +
2.9402669855289e-5*log(100 + 0.77*m.x353*(3115.6025 + m.x1)/(0.00124884174097374 + m.x353) - m.x1
) + 6.96586294547383e-5*log(100 + 0.77*m.x354*(3115.6025 + m.x1)/(0.00021632364140928 + m.x354)
- m.x1) + 1.83788613724093e-5*log(100 + 0.77*m.x355*(3115.6025 + m.x1)/(0.000121254343302067 +
m.x355) - m.x1) + 4.35418385737073e-5*log(100 + 0.77*m.x356*(3115.6025 + m.x1)/(
3.45972725365983e-5 + m.x356) - m.x1) + 8.49737657262922e-6*log(100 + 0.77*m.x357*(3115.6025 +
m.x1)/(0.000244012541695467 + m.x357) - m.x1) + 1.25395451278306e-6*log(100 + 0.77*m.x358*(
3115.6025 + m.x1)/(9.1928203667674e-5 + m.x358) - m.x1) + 9.1876251123084e-7*log(100 + 0.77*
m.x359*(3115.6025 + m.x1)/(6.52289285131175e-5 + m.x359) - m.x1) + 2.8457438879196e-7*log(100 +
0.77*m.x360*(3115.6025 + m.x1)/(6.8483079625303e-5 + m.x360) - m.x1) + 6.7419267675416e-7*log(100
+ 0.77*m.x361*(3115.6025 + m.x1)/(2.77432634727662e-5 + m.x361) - m.x1) + 7.70482839509528e-6*
log(100 + 0.77*m.x362*(3115.6025 + m.x1)/(0.000115123356345274 + m.x362) - m.x1) +
2.38646637733896e-6*log(100 + 0.77*m.x363*(3115.6025 + m.x1)/(0.000573106862914264 + m.x363) -
m.x1) + 5.65383946468672e-6*log(100 + 0.77*m.x364*(3115.6025 + m.x1)/(8.80774917424437e-5 +
m.x364) - m.x1) + 1.5121249110283e-6*log(100 + 0.77*m.x365*(3115.6025 + m.x1)/(
7.07504189214491e-5 + m.x365) - m.x1) + 3.58241510694706e-6*log(100 + 0.77*m.x366*(3115.6025 +
m.x1)/(1.79104496648944e-5 + m.x366) - m.x1) + 7.1453191137938e-7*log(100 + 0.77*m.x367*(
3115.6025 + m.x1)/(3.98963455240416e-5 + m.x367) - m.x1) + 3.15647726485976e-6*log(100 + 0.77*
m.x368*(3115.6025 + m.x1)/(6.64327517710652e-5 + m.x368) - m.x1) + 9.7767620999266e-7*log(100 +
0.77*m.x369*(3115.6025 + m.x1)/(5.93588627866824e-5 + m.x369) - m.x1) + 2.31623802235458e-6*log(
100 + 0.77*m.x370*(3115.6025 + m.x1)/(6.7138618169301e-5 + m.x370) - m.x1) + 6.204920194575e-7*
log(100 + 0.77*m.x371*(3115.6025 + m.x1)/(7.04943796210888e-5 + m.x371) - m.x1) +
1.4700231830721e-6*log(100 + 0.77*m.x372*(3115.6025 + m.x1)/(0.000131337542197591 + m.x372) -
m.x1) + 2.939882249705e-7*log(100 + 0.77*m.x373*(3115.6025 + m.x1)/(4.73375652638053e-5 + m.x373)
- m.x1) + 8.91092831259804e-6*log(100 + 0.77*m.x374*(3115.6025 + m.x1)/(4.76090777999194e-5 +
m.x374) - m.x1) + 2.11111117090605e-5*log(100 + 0.77*m.x375*(3115.6025 + m.x1)/(
1.44347085775828e-5 + m.x375) - m.x1) + 4.15929025203646e-6*log(100 + 0.77*m.x376*(3115.6025 +
m.x1)/(0.000112832407263379 + m.x376) - m.x1) + 2.28675684857286e-6*log(100 + 0.77*m.x377*(
3115.6025 + m.x1)/(4.75988174724677e-6 + m.x377) - m.x1) + 8.43089005274e-8*log(268.6379716465 -
0.945873078595071*m.x1) + 3.8803164215845e-7*log(128.327406131493 - 0.990907888239436*m.x1) +
2.8430778199855e-7*log(239.572993467521 - 0.955201925320216*m.x1) + 8.80604873206e-8*log(
117.625736786455 - 0.994342751751401*m.x1) + 2.086263885053e-7*log(208.508539802713 -
0.965172534107701*m.x1) + 1.8461794764005e-7*log(433.744528262518 - 0.892879618544882*m.x1) +
1.3526814078655e-7*log(615.400559449551 - 0.834574352970396*m.x1) + 4.18974763537e-8*log(
282.426455742256 - 0.941447454949001*m.x1) + 9.92603975628e-8*log(466.371952966359 -
0.882407350434993*m.x1) + 1.17567193504175e-6*log(421.946311777256 - 0.896666435536223*m.x1) +
3.641484651856e-7*log(123.358083734547 - 0.992502867829081*m.x1) + 8.627135946402e-7*log(
228.858395401842 - 0.958640938501673*m.x1) + 2.2761994722115e-7*log(320.599393198273 -
0.929195270193077*m.x1) + 5.3926033812615e-7*log(728.407398947384 - 0.798303089387243*m.x1) +
1.0523896817045e-7*log(214.936952702276 - 0.96310923723348*m.x1) + 1.553007306785e-8*log(
382.680820667042 - 0.909269292001453*m.x1) + 1.13787611799e-8*log(480.056384794622 -
0.878015123946453*m.x1) + 3.5244189531e-9*log(464.742740831163 - 0.882930270844512*m.x1) +
8.3497937326e-9*log(836.025395487855 - 0.763761456897067*m.x1) + 9.54233560558e-8*log(
331.215308112915 - 0.925787930869578*m.x1) + 2.95560938106e-8*log(150.321278984262 -
0.98384862029599*m.x1) + 7.00221093392e-8*log(393.527383487514 - 0.905787922725215*m.x1) +
1.872748183175e-8*log(454.782462930071 - 0.886127173498522*m.x1) + 4.436777235785e-8*log(
1075.75197813101 - 0.686817564778881*m.x1) + 8.84939021305e-9*log(664.584532393001 -
0.818788008934708*m.x1) + 3.90925843486e-8*log(474.243770813553 - 0.879880770793594*m.x1) +
1.210840012385e-8*log(511.198542742875 - 0.868019574787581*m.x1) + 2.868632423505e-8*log(
470.917477630236 - 0.880948395172287*m.x1) + 7.68471766875e-9*log(455.879896829577 -
0.885774935400271*m.x1) + 1.820605708725e-8*log(305.111286054903 - 0.934166413701715*m.x1) +
3.64100816125e-9*log(594.114163141561 - 0.841406545558504*m.x1) + 1.103607558219e-7*log(
591.874012592349 - 0.842125555942278*m.x1) + 2.6145853301935e-7*log(1202.70807178794 -
0.646069075953064*m.x1) + 5.151230037935e-8*log(335.44915647061 - 0.924429012856868*m.x1) +
2.832120350835e-8*log(1828.83738966731 - 0.445103350100885*m.x1) + 8.1546287696304e-7*log(100 +
0.77*m.x378*(3115.6025 + m.x1)/(0.00029817724214172 + m.x378) - m.x1) + 3.75316718979612e-6*log(
100 + 0.77*m.x379*(3115.6025 + m.x1)/(0.00188677064343949 + m.x379) - m.x1) + 2.74991656161108e-6
*log(100 + 0.77*m.x380*(3115.6025 + m.x1)/(0.00036496514341188 + m.x380) - m.x1) +
8.5174943437776e-7*log(138.743343180511 - 0.987564734852886*m.x1) + 2.01790171520088e-6*log(100
+ 0.77*m.x381*(3115.6025 + m.x1)/(0.000475903816218067 + m.x381) - m.x1) + 1.78568433201948e-6*
log(100 + 0.77*m.x382*(3115.6025 + m.x1)/(0.000139512893129108 + m.x382) - m.x1) +
1.30835708397588e-6*log(100 + 0.77*m.x383*(3115.6025 + m.x1)/(8.23945843435174e-5 + m.x383) -
m.x1) + 4.0524590394552e-7*log(100 + 0.77*m.x384*(3115.6025 + m.x1)/(0.000273935831397647 +
m.x384) - m.x1) + 9.6007857840288e-7*log(100 + 0.77*m.x385*(3115.6025 + m.x1)/(
0.000125080744904183 + m.x385) - m.x1) + 1.13714781300258e-5*log(100 + 0.77*m.x386*(3115.6025 +
m.x1)/(0.000145451756180262 + m.x386) - m.x1) + 3.52216139938176e-6*log(100 + 0.77*m.x387*(
3115.6025 + m.x1)/(0.00229296857891227 + m.x387) - m.x1) + 8.34444412724592e-6*log(100 + 0.77*
m.x388*(3115.6025 + m.x1)/(0.000397186686153369 + m.x388) - m.x1) + 2.20161354084804e-6*log(100
+ 0.77*m.x389*(3115.6025 + m.x1)/(0.000222632212013906 + m.x389) - m.x1) + 5.21589991103604e-6*
log(100 + 0.77*m.x390*(3115.6025 + m.x1)/(6.35232281558983e-5 + m.x390) - m.x1) +
1.01790524151132e-6*log(100 + 0.77*m.x391*(3115.6025 + m.x1)/(0.000448025616546069 + m.x391) -
m.x1) + 1.5021187542636e-7*log(100 + 0.77*m.x392*(3115.6025 + m.x1)/(0.000168787185445507 +
m.x392) - m.x1) + 1.1005904797704e-7*log(100 + 0.77*m.x393*(3115.6025 + m.x1)/(
0.000119765282188656 + m.x393) - m.x1) + 3.408931679976e-8*log(782.545765405689 -
0.780926557413634*m.x1) + 8.076189793296e-8*log(100 + 0.77*m.x394*(3115.6025 + m.x1)/(
5.09387453448955e-5 + m.x394) - m.x1) + 9.2296547543568e-7*log(100 + 0.77*m.x395*(3115.6025 +
m.x1)/(0.000211375252874563 + m.x395) - m.x1) + 2.8587607168176e-7*log(208.821730508207 -
0.965072010788216*m.x1) + 6.7727642485632e-7*log(100 + 0.77*m.x396*(3115.6025 + m.x1)/(
0.00016171698498591 + m.x396) - m.x1) + 1.811382442098e-7*log(100 + 0.77*m.x397*(3115.6025 + m.x1
)/(0.000129903159230785 + m.x397) - m.x1) + 4.2913940361036e-7*log(100 + 0.77*m.x398*(3115.6025
+ m.x1)/(3.28849500848452e-5 + m.x398) - m.x1) + 8.559415622028e-8*log(100 + 0.77*m.x399*(
3115.6025 + m.x1)/(7.32527298684983e-5 + m.x399) - m.x1) + 3.7811608384656e-7*log(100 + 0.77*
m.x400*(3115.6025 + m.x1)/(0.000121975593403019 + m.x400) - m.x1) + 1.1711635116396e-7*log(100 +
0.77*m.x401*(3115.6025 + m.x1)/(0.000108987394306425 + m.x401) - m.x1) + 2.7746337983148e-7*log(
100 + 0.77*m.x402*(3115.6025 + m.x1)/(0.000123271617886315 + m.x402) - m.x1) + 7.4329067745e-8*
log(100 + 0.77*m.x403*(3115.6025 + m.x1)/(0.000129433051569077 + m.x403) - m.x1) +
1.760948559126e-7*log(100 + 0.77*m.x404*(3115.6025 + m.x1)/(0.000241145733370368 + m.x404) - m.x1
) + 3.5217005223e-8*log(976.093683297966 - 0.718804409966301*m.x1) + 1.06744482354024e-6*log(100
+ 0.77*m.x405*(3115.6025 + m.x1)/(8.74138939182849e-5 + m.x405) - m.x1) + 2.52891125621076e-6*
log(100 + 0.77*m.x406*(3115.6025 + m.x1)/(2.65032246506635e-5 + m.x406) - m.x1) +
4.9824358286676e-7*log(100 + 0.77*m.x407*(3115.6025 + m.x1)/(0.000207168895825208 + m.x407) -
m.x1) + 2.7393181440516e-7*log(100 + 0.77*m.x408*(3115.6025 + m.x1)/(8.73950551754048e-6 + m.x408
) - m.x1) + 1.27625335484744e-6*log(100 + 0.77*m.x409*(3115.6025 + m.x1)/(0.000344826568804241 +
m.x409) - m.x1) + 5.87395496790682e-6*log(100 + 0.77*m.x410*(3115.6025 + m.x1)/(
0.00218195272860088 + m.x410) - m.x1) + 4.30380135804238e-6*log(100 + 0.77*m.x411*(3115.6025 +
m.x1)/(0.000422063324591525 + m.x411) - m.x1) + 1.33304421798136e-6*log(133.575365188442 -
0.989223475976656*m.x1) + 3.15814969207268e-6*log(100 + 0.77*m.x412*(3115.6025 + m.x1)/(
0.000550358165662165 + m.x412) - m.x1) + 2.79471412349978e-6*log(100 + 0.77*m.x413*(3115.6025 +
m.x1)/(0.000161339449973174 + m.x413) - m.x1) + 2.04766539953518e-6*log(100 + 0.77*m.x414*(
3115.6025 + m.x1)/(9.52850781070775e-5 + m.x414) - m.x1) + 6.3423665142772e-7*log(100 + 0.77*
m.x415*(3115.6025 + m.x1)/(0.000316792630231961 + m.x415) - m.x1) + 1.50258649561968e-6*log(100
+ 0.77*m.x416*(3115.6025 + m.x1)/(0.000144649416498018 + m.x416) - m.x1) + 1.77971156296763e-5*
log(100 + 0.77*m.x417*(3115.6025 + m.x1)/(0.000168207438132896 + m.x417) - m.x1) +
5.51241562217536e-6*log(100 + 0.77*m.x418*(3115.6025 + m.x1)/(0.00265169964603287 + m.x418) -
m.x1) + 1.30596071416471e-5*log(100 + 0.77*m.x419*(3115.6025 + m.x1)/(0.000459325873353868 +
m.x419) - m.x1) + 3.44567085389494e-6*log(100 + 0.77*m.x420*(3115.6025 + m.x1)/(
0.000257462646118264 + m.x420) - m.x1) + 8.16322845351294e-6*log(100 + 0.77*m.x421*(3115.6025 +
m.x1)/(7.34613300701079e-5 + m.x421) - m.x1) + 1.59308904929402e-6*log(100 + 0.77*m.x422*(
3115.6025 + m.x1)/(0.000518118468667565 + m.x422) - m.x1) + 2.3509152331346e-7*log(100 + 0.77*
m.x423*(3115.6025 + m.x1)/(0.000195193655951906 + m.x423) - m.x1) + 1.7224969177644e-7*log(100 +
0.77*m.x424*(3115.6025 + m.x1)/(0.000138502358605079 + m.x424) - m.x1) + 5.335203619836e-8*log(
713.834896128684 - 0.802980355764677*m.x1) + 1.2639771360856e-7*log(100 + 0.77*m.x425*(3115.6025
+ m.x1)/(5.89080261468274e-5 + m.x425) - m.x1) + 1.44450203401048e-6*log(100 + 0.77*m.x426*(
3115.6025 + m.x1)/(0.00024444455470623 + m.x426) - m.x1) + 4.4741496622536e-7*log(194.68097059979
- 0.969610702713266*m.x1) + 1.05998241465152e-6*log(100 + 0.77*m.x427*(3115.6025 + m.x1)/(
0.000187017334554172 + m.x427) - m.x1) + 2.834933366003e-7*log(100 + 0.77*m.x428*(3115.6025 +
m.x1)/(0.000150226289413102 + m.x428) - m.x1) + 6.7163155923746e-7*log(100 + 0.77*m.x429*(
3115.6025 + m.x1)/(3.80297450657429e-5 + m.x429) - m.x1) + 1.3396051753858e-7*log(100 + 0.77*
m.x430*(3115.6025 + m.x1)/(8.47129959170147e-5 + m.x430) - m.x1) + 5.9177668801816e-7*log(100 +
0.77*m.x431*(3115.6025 + m.x1)/(0.000141058469281279 + m.x431) - m.x1) + 1.8329483818706e-7*log(
100 + 0.77*m.x432*(3115.6025 + m.x1)/(0.000126038288340387 + m.x432) - m.x1) + 4.3424854688178e-7
*log(100 + 0.77*m.x433*(3115.6025 + m.x1)/(0.000142557254609265 + m.x433) - m.x1) +
1.163299087575e-7*log(100 + 0.77*m.x434*(3115.6025 + m.x1)/(0.000149682634200548 + m.x434) - m.x1
) + 2.756001002361e-7*log(100 + 0.77*m.x435*(3115.6025 + m.x1)/(0.000278872576668224 + m.x435) -
m.x1) + 5.51169431905e-8*log(100 + 0.77*m.x436*(3115.6025 + m.x1)/(0.000100513140244829 + m.x436)
- m.x1) + 1.67062177279164e-6*log(100 + 0.77*m.x437*(3115.6025 + m.x1)/(0.000101089650199842 +
m.x437) - m.x1) + 3.95791343300686e-6*log(100 + 0.77*m.x438*(3115.6025 + m.x1)/(
3.06496094500486e-5 + m.x438) - m.x1) + 7.7978417182286e-7*log(100 + 0.77*m.x439*(3115.6025 +
m.x1)/(0.000239580120190449 + m.x439) - m.x1) + 4.2872141333526e-7*log(100 + 0.77*m.x440*(
3115.6025 + m.x1)/(1.0106786416741e-5 + m.x440) - m.x1) + 9.273520298852e-8*log(358.539933407436
- 0.917017676867496*m.x1) + 4.2681369198781e-7*log(144.935947672355 - 0.98557712427296*m.x1) +
3.1272308985079e-7*log(315.477181856525 - 0.930839321814473*m.x1) + 9.686174432188e-8*log(
128.034017517148 - 0.991002055776644*m.x1) + 2.2947767514194e-7*log(268.780873913804 -
0.945827211939327*m.x1) + 2.0306969658749e-7*log(592.240115646012 - 0.842008049600033*m.x1) +
1.4878759437319e-7*log(829.685004457404 - 0.765796501813885*m.x1) + 4.608494417626e-8*log(
378.760424559288 - 0.910527602748012*m.x1) + 1.0918103615544e-7*log(636.338426930868 -
0.827854026009137*m.x1) + 1.29317515543415e-6*log(576.130636396736 - 0.847178631934999*m.x1) +
4.0054349689888e-7*log(137.098673227772 - 0.988092616684005*m.x1) + 9.4893801033396e-7*log(
299.450015262575 - 0.935983484651018*m.x1) + 2.5036955621227e-7*log(434.053417972084 -
0.892780475695444*m.x1) + 5.9315702858127e-7*log(968.024268423618 - 0.721394411378339*m.x1) +
1.1575713850541e-7*log(278.501752054383 - 0.942707148278902*m.x1) + 1.708223531993e-8*log(
521.879880291747 - 0.864591237074772*m.x1) + 1.251601813302e-8*log(654.639033329393 -
0.821980168096093*m.x1) + 3.87666907038e-9*log(634.152014126405 - 0.828555788446567*m.x1) +
9.18431875948e-9*log(1093.6490504942 - 0.681073227250844*m.x1) + 1.0496049928684e-7*log(
449.2537526685 - 0.887901697129688*m.x1) + 3.251009492388e-8*log(179.393118324343 -
0.974517571376855*m.x1) + 7.702051008416e-8*log(536.964765902902 - 0.859749513648515*m.x1) +
2.059921097615e-8*log(620.749808952132 - 0.83285742999881*m.x1) + 4.880213536193e-8*log(
1354.01132259096 - 0.597505996804482*m.x1) + 9.73384770289e-9*log(890.73349355316 -
0.746202060900529*m.x1) + 4.299971559628e-8*log(646.879588813031 - 0.824470679808149*m.x1) +
1.331858126873e-8*log(695.864610493836 - 0.808748192205573*m.x1) + 3.155339571849e-8*log(
642.42996136499 - 0.825898855401166*m.x1) + 8.45277127875e-9*log(622.229472198603 -
0.832382509579254*m.x1) + 2.002567213005e-8*log(411.739691140382 - 0.89994240563731*m.x1) +
4.00491085525e-9*log(802.850834872808 - 0.774409336597718*m.x1) + 1.2139082622462e-7*log(
800.012103104391 - 0.775320470726163*m.x1) + 2.8759015929463e-7*log(1482.01660573493 -
0.556420754658229*m.x1) + 5.666072742263e-8*log(455.294784271713 - 0.885962736173272*m.x1) +
3.115178278683e-8*log(2030.55035493416 - 0.380360506536326*m.x1) + 1.4513664887868e-7*log(
244.798336775476 - 0.953524771925984*m.x1) + 6.6799130162379e-7*log(124.108480080755 -
0.992262016710811*m.x1) + 4.8943205843361e-7*log(219.621784363451 - 0.961605569271609*m.x1) +
1.5159495555492e-7*log(114.990579362077 - 0.995188545598459*m.x1) + 3.5914754795646e-7*log(
192.815416502162 - 0.970209480669578*m.x1) + 3.1781733690891e-7*log(389.595030572337 -
0.907050071190937*m.x1) + 2.3286230197521e-7*log(552.485529158339 - 0.854767888664122*m.x1) +
7.212594727734e-8*log(256.774613290639 - 0.949680803860364*m.x1) + 1.7087545180296e-7*log(
418.572098212241 - 0.897749440690126*m.x1) + 2.02390357085985e-6*log(379.146592813175 -
0.910403656174632*m.x1) + 6.2687673069792e-7*log(119.873056718799 - 0.993621440245089*m.x1) +
1.48514995789164e-6*log(210.363968705052 - 0.964577005986787*m.x1) + 3.9184470620493e-7*log(
290.040074396276 - 0.939003748264974*m.x1) + 9.2832948667593e-7*log(655.767806524211 -
0.821617871174448*m.x1) + 1.8116748144219e-7*log(198.35403749885 - 0.968431776037267*m.x1) +
2.673481385487e-8*log(344.486646749132 - 0.921528292922755*m.x1) + 1.958838575418e-8*log(
430.76167441788 - 0.893837010845292*m.x1) + 6.06724026642e-9*log(417.122283752445 -
0.898214780687702*m.x1) + 1.437405865332e-8*log(755.551665629421 - 0.789590724224473*m.x1) +
1.6427003597556e-7*log(299.31993900287 - 0.936025234604585*m.x1) + 5.088042167292e-8*log(
142.88596479926 - 0.986235097449286*m.x1) + 1.2054212821344e-7*log(354.043606427674 -
0.918460841385358*m.x1) + 3.223911043785e-8*log(408.265351965367 - 0.90105754762831*m.x1) +
7.637852893287e-8*log(982.98447692449 - 0.716592704966538*m.x1) + 1.523410733751e-8*log(
597.250560461113 - 0.840399871144951*m.x1) + 6.729736306452e-8*log(425.581399511489 -
0.895499698850707*m.x1) + 2.084444947407e-8*log(458.581981464026 - 0.884907660247408*m.x1) +
4.938312493791e-8*log(422.618704492563 - 0.896450620869458*m.x1) + 1.322913907125e-8*log(
409.240663257227 - 0.900744506638049*m.x1) + 3.134148468795e-8*log(276.523652448133 -
0.943342049427636*m.x1) + 6.26794703475e-9*log(533.199969835709 - 0.860957882195913*m.x1) +
1.8998457063858e-7*log(531.173474307521 - 0.861608316751729*m.x1) + 4.5009738077217e-7*log(
1106.40538864313 - 0.676978886541806*m.x1) + 8.867773872417e-8*log(303.024440850973 -
0.934836218403672*m.x1) + 4.875457447197e-8*log(1747.34237541307 - 0.471260414185356*m.x1) +
3.14345285277512e-6*log(100 + 0.77*m.x441*(3115.6025 + m.x1)/(0.000157156288200091 + m.x441) -
m.x1) + 1.44677390510339e-5*log(100 + 0.77*m.x442*(3115.6025 + m.x1)/(0.000994434950427626 +
m.x442) - m.x1) + 1.06004004654177e-5*log(100 + 0.77*m.x443*(3115.6025 + m.x1)/(
0.000192357293430744 + m.x443) - m.x1) + 3.28333056596728e-6*log(100 + 0.77*m.x444*(3115.6025 +
m.x1)/(0.00160543221555821 + m.x444) - m.x1) + 7.77862375156964e-6*log(100 + 0.77*m.x445*(
3115.6025 + m.x1)/(0.000250828254899286 + m.x445) - m.x1) + 6.88347031632794e-6*log(100 + 0.77*
m.x446*(3115.6025 + m.x1)/(7.35311933356927e-5 + m.x446) - m.x1) + 5.04346540383214e-6*log(100 +
0.77*m.x447*(3115.6025 + m.x1)/(4.34266107976881e-5 + m.x447) - m.x1) + 1.56214516787956e-6*log(
100 + 0.77*m.x448*(3115.6025 + m.x1)/(0.000144379692287175 + m.x448) - m.x1) +
3.70091862110064e-6*log(100 + 0.77*m.x449*(3115.6025 + m.x1)/(6.59246341312025e-5 + m.x449) -
m.x1) + 4.38348652991099e-5*log(100 + 0.77*m.x450*(3115.6025 + m.x1)/(7.66613103980958e-5 +
m.x450) - m.x1) + 1.35772560733293e-5*log(100 + 0.77*m.x451*(3115.6025 + m.x1)/(
0.00120852425970865 + m.x451) - m.x1) + 3.21662302940158e-5*log(100 + 0.77*m.x452*(3115.6025 +
m.x1)/(0.000209339870709147 + m.x452) - m.x1) + 8.48679757374262e-6*log(100 + 0.77*m.x453*(
3115.6025 + m.x1)/(0.000117339780268179 + m.x453) - m.x1) + 2.01062929022566e-5*log(100 + 0.77*
m.x454*(3115.6025 + m.x1)/(3.34803376668285e-5 + m.x454) - m.x1) + 3.92382930685946e-6*log(100 +
0.77*m.x455*(3115.6025 + m.x1)/(0.000236134865321051 + m.x455) - m.x1) + 5.7903794479058e-7*log(
100 + 0.77*m.x456*(3115.6025 + m.x1)/(8.89604027786557e-5 + m.x456) - m.x1) + 4.2425650279212e-7*
log(100 + 0.77*m.x457*(3115.6025 + m.x1)/(6.31230843400847e-5 + m.x457) - m.x1) +
1.3140777240828e-7*log(100 + 0.77*m.x458*(3115.6025 + m.x1)/(6.62721787647855e-5 + m.x458) - m.x1
) + 3.1132161331288e-7*log(100 + 0.77*m.x459*(3115.6025 + m.x1)/(2.68476027428296e-5 + m.x459) -
m.x1) + 3.55785473346904e-6*log(100 + 0.77*m.x460*(3115.6025 + m.x1)/(0.000111406725478175 +
m.x460) - m.x1) + 1.10199737897928e-6*log(100 + 0.77*m.x461*(3115.6025 + m.x1)/(
0.000554604738545467 + m.x461) - m.x1) + 2.61077060645696e-6*log(100 + 0.77*m.x462*(3115.6025 +
m.x1)/(8.52340068502479e-5 + m.x462) - m.x1) + 6.982531597619e-7*log(100 + 0.77*m.x463*(3115.6025
+ m.x1)/(6.84663195069478e-5 + m.x463) - m.x1) + 1.65425002244258e-6*log(100 + 0.77*m.x464*(
3115.6025 + m.x1)/(1.73322305077971e-5 + m.x464) - m.x1) + 3.2994904140034e-7*log(100 + 0.77*
m.x465*(3115.6025 + m.x1)/(3.86083359144677e-5 + m.x465) - m.x1) + 1.45756491929368e-6*log(100 +
0.77*m.x466*(3115.6025 + m.x1)/(6.42880434889493e-5 + m.x466) - m.x1) + 4.5146105184338e-7*log(
100 + 0.77*m.x467*(3115.6025 + m.x1)/(5.74425272256582e-5 + m.x467) - m.x1) + 1.06956806681394e-6
*log(100 + 0.77*m.x468*(3115.6025 + m.x1)/(6.49711217673195e-5 + m.x468) - m.x1) +
2.865242877975e-7*log(100 + 0.77*m.x469*(3115.6025 + m.x1)/(6.821854615363e-5 + m.x469) - m.x1)
+ 6.788118660153e-7*log(100 + 0.77*m.x470*(3115.6025 + m.x1)/(0.000127097454183856 + m.x470) -
m.x1) + 1.357547948065e-7*log(100 + 0.77*m.x471*(3115.6025 + m.x1)/(4.58093240639472e-5 + m.x471)
- m.x1) + 4.11479488586172e-6*log(100 + 0.77*m.x472*(3115.6025 + m.x1)/(4.60720711166307e-5 +
m.x472) - m.x1) + 9.74846743772878e-6*log(100 + 0.77*m.x473*(3115.6025 + m.x1)/(
1.39686998964588e-5 + m.x473) - m.x1) + 1.92063336809678e-6*log(100 + 0.77*m.x474*(3115.6025 +
m.x1)/(0.000109189737166214 + m.x474) - m.x1) + 1.05595456002198e-6*log(100 + 0.77*m.x475*(
3115.6025 + m.x1)/(4.60621420325593e-6 + m.x475) - m.x1) + 6.944196785648e-8*log(438.328502947439
- 0.891408322163229*m.x1) + 3.1960659732844e-7*log(160.672269148748 - 0.98052631259965*m.x1) +
2.3417328105796e-7*log(383.737721647126 - 0.908930063560058*m.x1) + 7.253200423312e-8*log(
137.946787402231 - 0.987820401542806*m.x1) + 1.7183745576056e-7*log(323.76453259191 -
0.928179370573778*m.x1) + 1.5206263520876e-7*log(723.154378888364 - 0.799989126055598*m.x1) +
1.1141511543556e-7*log(994.136523516822 - 0.713013286028361*m.x1) + 3.450932449624e-8*log(
463.72887675709 - 0.883255685936479*m.x1) + 8.175693543456e-8*log(774.830149075393 -
0.783403001802896*m.x1) + 9.683553244346e-7*log(704.117904521328 - 0.806099171983163*m.x1) +
2.9993495178112e-7*log(150.148891888383 - 0.983903950555829*m.x1) + 7.1058369085104e-7*log(
363.245448869307 - 0.915507370125263*m.x1) + 1.8748171260148e-7*log(532.438202944837 -
0.861202382863399*m.x1) + 4.4416780235748e-7*log(1144.15218362596 - 0.664863478692818*m.x1) +
8.668125191084e-8*log(336.316624140497 - 0.924150585917011*m.x1) + 1.279151819132e-8*log(
639.374735800217 - 0.826879476505679*m.x1) + 9.37224377448e-9*log(796.090332382495 -
0.776579222676033*m.x1) + 2.90292704712e-9*log(772.282933330948 - 0.784220569430488*m.x1) +
6.87740089552e-9*log(1275.69246176927 - 0.622643626146379*m.x1) + 7.859651332816e-8*log(
551.137259264743 - 0.855200636389031*m.x1) + 2.434420688112e-8*log(206.64947926604 -
0.965769227856879*m.x1) + 5.767449267584e-8*log(657.476234564291 - 0.821069525215655*m.x1) +
1.54250996426e-8*log(756.635274889761 - 0.789242923354388*m.x1) + 3.654401139932e-8*log(
1535.04726916935 - 0.5393997568145*m.x1) + 7.28889911836e-9*log(1061.02290920942 -
0.691545083427869*m.x1) + 3.219904385872e-8*log(787.089182577275 - 0.779468278582626*m.x1) +
9.97321904252e-9*log(843.590944378369 - 0.761333178934614*m.x1) + 2.362781145276e-8*log(
781.918774588875 - 0.781127799650669*m.x1) + 6.329603565e-9*log(758.365707096016 -
0.788687514823853*m.x1) + 1.49956223262e-8*log(504.84109778787 - 0.870060093420817*m.x1) +
2.998957051e-9*log(964.386178144426 - 0.722562111776317*m.x1) + 9.089986953288e-8*log(
961.226291333032 - 0.723576325499472*m.x1) + 2.1535324185412e-7*log(1656.41268033145 -
0.500445682550503*m.x1) + 4.242868172612e-8*log(558.546332794591 - 0.852822581573037*m.x1) +
2.332707568692e-8*log(2135.61678696677 - 0.34663783747549*m.x1) + 1.0868132303496e-7*log(
952.855197136797 - 0.726263155477377*m.x1) + 5.0020569578538e-7*log(292.358397193926 -
0.938259647309332*m.x1) + 3.6649684319742e-7*log(845.268759609223 - 0.760794658622458*m.x1) +
1.1351743658424e-7*log(222.900839352747 - 0.960553106709618*m.x1) + 2.6893710843012e-7*log(
716.158724669567 - 0.802234487657021*m.x1) + 2.3798819199402e-7*log(1397.99930308635 -
0.583387385558217*m.x1) + 1.7437210559262e-7*log(1698.31981573055 - 0.486994950180406*m.x1) +
5.400940035348e-8*log(1000.01299130032 - 0.711127144332334*m.x1) + 1.2795507075312e-7*log(
1462.69503446476 - 0.562622306772202*m.x1) + 1.5155408332467e-6*log(1373.12671852222 -
0.591370619800755*m.x1) + 4.6941825513024e-7*log(260.562902565146 - 0.948464894810828*m.x1) +
1.11211099040808e-6*log(802.513881672884 - 0.774517486851136*m.x1) + 2.9342141646246e-7*log(
1119.29289935334 - 0.672842444004541*m.x1) + 6.9515231062446e-7*log(1830.61326417666 -
0.444533356172151*m.x1) + 1.3566195531018e-7*log(744.203496969942 - 0.793233091522445*m.x1) +
2.001958129314e-8*log(1283.98039223831 - 0.619983488831353*m.x1) + 1.466818819596e-8*log(
1488.17838576561 - 0.554443037657848*m.x1) + 4.54327493724e-9*log(1459.59879689333 -
0.563616091303903*m.x1) + 1.076359226904e-8*log(1931.71652847139 - 0.412082726062971*m.x1) +
1.2300879883032e-7*log(1149.82151209152 - 0.663043821510761*m.x1) + 3.810031158024e-8*log(
424.287526310851 - 0.895914987129825*m.x1) + 9.026443752768e-8*log(1309.64325245314 -
0.611746603601347*m.x1) + 2.41413123627e-8*log(1440.37165493351 - 0.569787334894773*m.x1) +
5.719382140914e-8*log(2099.27686813522 - 0.358301687029966*m.x1) + 1.140761450322e-8*log(
1759.78384606747 - 0.467267134986742*m.x1) + 5.039365667544e-8*log(1477.46685899388 -
0.55788106506081*m.x1) + 1.560875467554e-8*log(1542.89054316818 - 0.536882338755287*m.x1) +
3.697910483202e-8*log(1471.26264848806 - 0.559872400767407*m.x1) + 9.9062528175e-9*log(
1442.51559884057 - 0.56909920349577*m.x1) + 2.34691516449e-8*log(1072.76969944935 -
0.687774772471984*m.x1) + 4.6935683145e-9*log(1669.57680330368 - 0.496220457101417*m.x1) +
1.4226437397276e-7*log(1666.47040697755 - 0.497217502239919*m.x1) + 3.3704222341374e-7*log(
2166.0852268286 - 0.336858528381396*m.x1) + 6.640372395774e-8*log(1161.70370083739 -
0.659230052345448*m.x1) + 3.650843324934e-8*log(2377.97102001738 - 0.268850561001483*m.x1) +
2.34922704076588e-6*log(100 + 0.77*m.x476*(3115.6025 + m.x1)/(0.000455053041852194 + m.x476) -
m.x1) + 1.08123154344204e-5*log(100 + 0.77*m.x477*(3115.6025 + m.x1)/(0.00287943075201725 +
m.x477) - m.x1) + 7.92209986363301e-6*log(100 + 0.77*m.x478*(3115.6025 + m.x1)/(
0.000556979122506819 + m.x478) - m.x1) + 2.45376320581172e-6*log(100 + 0.77*m.x479*(3115.6025 +
m.x1)/(0.00464860058445214 + m.x479) - m.x1) + 5.81327416474486e-6*log(100 + 0.77*m.x480*(
3115.6025 + m.x1)/(0.000726284399317671 + m.x480) - m.x1) + 5.14429048527031e-6*log(100 + 0.77*
m.x481*(3115.6025 + m.x1)/(0.000212912849887539 + m.x481) - m.x1) + 3.76918180763861e-6*log(100
+ 0.77*m.x482*(3115.6025 + m.x1)/(0.000125743688446365 + m.x482) - m.x1) + 1.16745306574094e-6*
log(100 + 0.77*m.x483*(3115.6025 + m.x1)/(0.000418057838534042 + m.x483) - m.x1) +
2.76584332820136e-6*log(100 + 0.77*m.x484*(3115.6025 + m.x1)/(0.000190887718448796 + m.x484) -
m.x1) + 3.27595340894288e-5*log(100 + 0.77*m.x485*(3115.6025 + m.x1)/(0.0002219762434489 + m.x485
) - m.x1) + 1.01468221731747e-5*log(100 + 0.77*m.x486*(3115.6025 + m.x1)/(0.0034993358956939 +
m.x486) - m.x1) + 2.40391001695772e-5*log(100 + 0.77*m.x487*(3115.6025 + m.x1)/(
0.000606152932460819 + m.x487) - m.x1) + 6.34252056051713e-6*log(100 + 0.77*m.x488*(3115.6025 +
m.x1)/(0.00033976256726883 + m.x488) - m.x1) + 1.50262304503281e-5*log(100 + 0.77*m.x489*(
3115.6025 + m.x1)/(9.6943810979624e-5 + m.x489) - m.x1) + 2.93243332817479e-6*log(100 + 0.77*
m.x490*(3115.6025 + m.x1)/(0.000683739034450168 + m.x490) - m.x1) + 4.3273803083467e-7*log(100 +
0.77*m.x491*(3115.6025 + m.x1)/(0.000257588813991856 + m.x491) - m.x1) + 3.1706371791138e-7*log(
100 + 0.77*m.x492*(3115.6025 + m.x1)/(0.000182775706075957 + m.x492) - m.x1) + 9.820624223322e-8*
log(100 + 0.77*m.x493*(3115.6025 + m.x1)/(0.000191894049436265 + m.x493) - m.x1) +
2.3266299404612e-7*log(100 + 0.77*m.x494*(3115.6025 + m.x1)/(7.77384311788353e-5 + m.x494) - m.x1
) + 2.65892600857796e-6*log(100 + 0.77*m.x495*(3115.6025 + m.x1)/(0.00032258314250264 + m.x495)
- m.x1) + 8.2356636564972e-7*log(100 + 0.77*m.x496*(3115.6025 + m.x1)/(0.00160588275652981 +
m.x496) - m.x1) + 1.95113246267104e-6*log(100 + 0.77*m.x497*(3115.6025 + m.x1)/(
0.000246798868379189 + m.x497) - m.x1) + 5.2183229112685e-7*log(100 + 0.77*m.x498*(3115.6025 +
m.x1)/(0.000198247281816642 + m.x498) - m.x1) + 1.23628667803267e-6*log(100 + 0.77*m.x499*(
3115.6025 + m.x1)/(5.01862464746856e-5 + m.x499) - m.x1) + 2.4658401014291e-7*log(100 + 0.77*
m.x500*(3115.6025 + m.x1)/(0.000111792158620858 + m.x500) - m.x1) + 1.08929609650532e-6*log(100
+ 0.77*m.x501*(3115.6025 + m.x1)/(0.000186148897250143 + m.x501) - m.x1) + 3.3739475682187e-7*
log(100 + 0.77*m.x502*(3115.6025 + m.x1)/(0.000166327399591117 + m.x502) - m.x1) +
7.9933065395931e-7*log(100 + 0.77*m.x503*(3115.6025 + m.x1)/(0.000188126780871317 + m.x503) -
m.x1) + 2.1413096879625e-7*log(100 + 0.77*m.x504*(3115.6025 + m.x1)/(0.000197529843021106 +
m.x504) - m.x1) + 5.0730304093095e-7*log(100 + 0.77*m.x505*(3115.6025 + m.x1)/(
0.000368016347296246 + m.x505) - m.x1) + 1.0145494454975e-7*log(100 + 0.77*m.x506*(3115.6025 +
m.x1)/(0.000132642940980837 + m.x506) - m.x1) + 3.07514948237178e-6*log(100 + 0.77*m.x507*(
3115.6025 + m.x1)/(0.000133403736790733 + m.x507) - m.x1) + 7.28541650959397e-6*log(100 + 0.77*
m.x508*(3115.6025 + m.x1)/(4.04469935718446e-5 + m.x508) - m.x1) + 1.43536552162597e-6*log(100 +
0.77*m.x509*(3115.6025 + m.x1)/(0.000316163753964881 + m.x509) - m.x1) + 7.8915674018577e-7*log(
100 + 0.77*m.x510*(3115.6025 + m.x1)/(1.33374986684954e-5 + m.x510) - m.x1) + 1.7351096111916e-7*
log(1756.34518082976 - 0.468370826885087*m.x1) + 7.9858404930423e-7*log(725.200186930332 -
0.799332492854807*m.x1) + 5.8511635425957e-7*log(1648.94114589024 - 0.502843785145813*m.x1) +
1.8123187108404e-7*log(529.899097644463 - 0.862017347320634*m.x1) + 4.2936113456502e-7*log(
1498.32765799351 - 0.551185474400694*m.x1) + 3.7995083952567e-7*log(2083.00079499621 -
0.363525740207165*m.x1) + 2.7838703825877e-7*log(2234.54546771311 - 0.314885173024123*m.x1) +
8.622661836558e-8*log(1799.10804348668 - 0.454645435838916*m.x1) + 2.0428171728552e-7*log(
2119.22302589951 - 0.351899664382889*m.x1) + 2.41957807697445e-6*log(2068.46991167032 -
0.368189648175491*m.x1) + 7.4943155217504e-7*log(639.346029121697 - 0.826888690350679*m.x1) +
1.77549777117468e-6*log(1601.95443552502 - 0.517924884344193*m.x1) + 4.6845060919041e-7*log(
1897.31212561191 - 0.423125342333654*m.x1) + 1.10981852421741e-6*log(2289.83683300728 -
0.297138568540986*m.x1) + 2.1658584562503e-7*log(1533.35146924954 - 0.539944049586061*m.x1) +
3.196148790219e-8*log(2013.37867265892 - 0.385872019084936*m.x1) + 2.341792831266e-8*log(
2132.89963200708 - 0.347509949678408*m.x1) + 7.25338980954e-9*log(2117.53920216988 -
0.352440113214097*m.x1) + 1.718419676484e-8*log(2328.32819868025 - 0.28478417940663*m.x1) +
1.9638493823172e-7*log(1920.39808889742 - 0.415715551358873*m.x1) + 6.082757825004e-8*log(
1029.01261405065 - 0.701819274425846*m.x1) + 1.4410819516128e-7*log(2029.74189826765 -
0.380619992997292*m.x1) + 3.854187816045e-8*log(2106.97326999007 - 0.355831409818786*m.x1) +
9.131058258819e-8*log(2385.97240833931 - 0.266282393745893*m.x1) + 1.821238554387e-8*log(
2260.97728055389 - 0.306401480755683*m.x1) + 8.045404269924e-8*log(2127.19009521653 -
0.3493425123338*m.x1) + 2.491955333259e-8*log(2161.20757843742 - 0.338424083804843*m.x1) +
5.903755899867e-8*log(2123.85717760185 - 0.350412262924476*m.x1) + 1.581544463625e-8*log(
2108.16088165946 - 0.355450227793995*m.x1) + 3.746876597415e-8*log(1860.5916573208 -
0.434911335024029*m.x1) + 7.49333488575e-9*log(2221.71201851028 - 0.319004263698504*m.x1) +
2.2712668167546e-7*log(2220.3062861031 - 0.319455454890955*m.x1) + 5.3809172072229e-7*log(
2407.06872653783 - 0.259511209617455*m.x1) + 1.0601429614629e-7*log(1929.17750011989 -
0.412897665822297*m.x1) + 5.828612649489e-8*log(2467.89538756556 - 0.239987967795776*m.x1))
, sense=minimize)
m.c2 = Constraint(expr= m.x1 - m.x2 - m.x3 - m.x4 - m.x5 - m.x6 - m.x7 - m.x8 - m.x9 - m.x10 - m.x11 - m.x12 - m.x13
- m.x14 - m.x15 - m.x16 - m.x17 - m.x18 - m.x19 - m.x20 - m.x21 - m.x22 - m.x23 - m.x24 - m.x25
- m.x26 - m.x27 - m.x28 - m.x29 - m.x30 - m.x31 - m.x32 - m.x33 - m.x34 - m.x35 - m.x36 - m.x37
- m.x38 - m.x39 - m.x40 - m.x41 - m.x42 - m.x43 - m.x44 - m.x45 - m.x46 - m.x47 - m.x48 - m.x49
- m.x50 - m.x51 - m.x52 - m.x53 - m.x54 - m.x55 - m.x56 - m.x57 - m.x58 - m.x59 - m.x60 - m.x61
- m.x62 - m.x63 - m.x64 - m.x65 - m.x66 - m.x67 - m.x68 - m.x69 - m.x70 - m.x71 - m.x72 - m.x73
- m.x74 - m.x75 - m.x76 - m.x77 - m.x78 - m.x79 - m.x80 - m.x81 - m.x82 - m.x83 - m.x84 - m.x85
- m.x86 - m.x87 - m.x88 - m.x89 - m.x90 - m.x91 - m.x92 - m.x93 - m.x94 - m.x95 - m.x96 - m.x97
- m.x98 - m.x99 - m.x100 - m.x101 - m.x102 - m.x103 - m.x104 - m.x105 - m.x106 - m.x107 - m.x108
- m.x109 - m.x110 - m.x111 - m.x112 - m.x113 - m.x114 - m.x115 - m.x116 - m.x117 - m.x118
- m.x119 - m.x120 - m.x121 - m.x122 - m.x123 - m.x124 - m.x125 - m.x126 - m.x127 - m.x128
- m.x129 - m.x130 - m.x131 - m.x132 - m.x133 - m.x134 - m.x135 - m.x136 - m.x137 - m.x138
- m.x139 - m.x140 - m.x141 - m.x142 - m.x143 - m.x144 - m.x145 - m.x146 - m.x147 - m.x148
- m.x149 - m.x150 - m.x151 - m.x152 - m.x153 - m.x154 - m.x155 - m.x156 - m.x157 - m.x158
- m.x159 - m.x160 - m.x161 - m.x162 - m.x163 - m.x164 - m.x165 - m.x166 - m.x167 - m.x168
- m.x169 - m.x170 - m.x171 - m.x172 - m.x173 - m.x174 - m.x175 - m.x176 - m.x177 - m.x178
- m.x179 - m.x180 - m.x181 - m.x182 - m.x183 - m.x184 - m.x185 - m.x186 - m.x187 - m.x188
- m.x189 - m.x190 - m.x191 - m.x192 - m.x193 - m.x194 - m.x195 - m.x196 - m.x197 - m.x198
- m.x199 - m.x200 - m.x201 - m.x202 - m.x203 - m.x204 - m.x205 - m.x206 - m.x207 - m.x208
- m.x209 - m.x210 - m.x211 - m.x212 - m.x213 - m.x214 - m.x215 - m.x216 - m.x217 - m.x218
- m.x219 - m.x220 - m.x221 - m.x222 - m.x223 - m.x224 - m.x225 - m.x226 - m.x227 - m.x228
- m.x229 - m.x230 - m.x231 - m.x232 - m.x233 - m.x234 - m.x235 - m.x236 - m.x237 - m.x238
- m.x239 - m.x240 - m.x241 - m.x242 - m.x243 - m.x244 - m.x245 - m.x246 - m.x247 - m.x248
- m.x249 - m.x250 - m.x251 - m.x252 - m.x253 - m.x254 - m.x255 - m.x256 - m.x257 - m.x258
- m.x259 - m.x260 - m.x261 - m.x262 - m.x263 - m.x264 - m.x265 - m.x266 - m.x267 - m.x268
- m.x269 - m.x270 - m.x271 - m.x272 - m.x273 - m.x274 - m.x275 - m.x276 - m.x277 - m.x278
- m.x279 - m.x280 - m.x281 - m.x282 - m.x283 - m.x284 - m.x285 - m.x286 - m.x287 - m.x288
- m.x289 - m.x290 - m.x291 - m.x292 - m.x293 - m.x294 - m.x295 - m.x296 - m.x297 - m.x298
- m.x299 - m.x300 - m.x301 - m.x302 - m.x303 - m.x304 - m.x305 - m.x306 - m.x307 - m.x308
- m.x309 - m.x310 - m.x311 - m.x312 - m.x313 - m.x314 - m.x315 - m.x316 - m.x317 - m.x318
- m.x319 - m.x320 - m.x321 - m.x322 - m.x323 - m.x324 - m.x325 - m.x326 - m.x327 - m.x328
- m.x329 - m.x330 - m.x331 - m.x332 - m.x333 - m.x334 - m.x335 - m.x336 - m.x337 - m.x338
- m.x339 - m.x340 - m.x341 - m.x342 - m.x343 - m.x344 - m.x345 - m.x346 - m.x347 - m.x348
- m.x349 - m.x350 - m.x351 - m.x352 - m.x353 - m.x354 - m.x355 - m.x356 - m.x357 - m.x358
- m.x359 - m.x360 - m.x361 - m.x362 - m.x363 - m.x364 - m.x365 - m.x366 - m.x367 - m.x368
- m.x369 - m.x370 - m.x371 - m.x372 - m.x373 - m.x374 - m.x375 - m.x376 - m.x377 - m.x378
- m.x379 - m.x380 - m.x381 - m.x382 - m.x383 - m.x384 - m.x385 - m.x386 - m.x387 - m.x388
- m.x389 - m.x390 - m.x391 - m.x392 - m.x393 - m.x394 - m.x395 - m.x396 - m.x397 - m.x398
- m.x399 - m.x400 - m.x401 - m.x402 - m.x403 - m.x404 - m.x405 - m.x406 - m.x407 - m.x408
- m.x409 - m.x410 - m.x411 - m.x412 - m.x413 - m.x414 - m.x415 - m.x416 - m.x417 - m.x418
- m.x419 - m.x420 - m.x421 - m.x422 - m.x423 - m.x424 - m.x425 - m.x426 - m.x427 - m.x428
- m.x429 - m.x430 - m.x431 - m.x432 - m.x433 - m.x434 - m.x435 - m.x436 - m.x437 - m.x438
- m.x439 - m.x440 - m.x441 - m.x442 - m.x443 - m.x444 - m.x445 - m.x446 - m.x447 - m.x448
- m.x449 - m.x450 - m.x451 - m.x452 - m.x453 - m.x454 - m.x455 - m.x456 - m.x457 - m.x458
- m.x459 - m.x460 - m.x461 - m.x462 - m.x463 - m.x464 - m.x465 - m.x466 - m.x467 - m.x468
- m.x469 - m.x470 - m.x471 - m.x472 - m.x473 - m.x474 - m.x475 - m.x476 - m.x477 - m.x478
- m.x479 - m.x480 - m.x481 - m.x482 - m.x483 - m.x484 - m.x485 - m.x486 - m.x487 - m.x488
- m.x489 - m.x490 - m.x491 - m.x492 - m.x493 - m.x494 - m.x495 - m.x496 - m.x497 - m.x498
- m.x499 - m.x500 - m.x501 - m.x502 - m.x503 - m.x504 - m.x505 - m.x506 - m.x507 - m.x508
- m.x509 - m.x510 == 0)
m.c3 = Constraint(expr= m.x1 <= 100)
| 97.260973 | 120 | 0.592053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 697 | 0.00425 |
df3d6de4c0225786e943add2dd3dc174ee168668 | 1,304 | py | Python | bin/build_upset_input.py | NCBI-Hackathons/AssesSV | 73c61db1e915df6e76933a75fac7904e25b07884 | [
"MIT"
] | 4 | 2018-11-12T02:08:45.000Z | 2021-11-21T15:24:03.000Z | bin/build_upset_input.py | NCBI-Hackathons/AssesSV | 73c61db1e915df6e76933a75fac7904e25b07884 | [
"MIT"
] | null | null | null | bin/build_upset_input.py | NCBI-Hackathons/AssesSV | 73c61db1e915df6e76933a75fac7904e25b07884 | [
"MIT"
] | 2 | 2018-11-10T21:04:11.000Z | 2018-11-10T21:44:39.000Z | #!/usr/bin/env python3
from glob import glob
import pandas as pd
import os
import gzip
import sys
path_to_vcf_files = sys.argv[1]
true_variants = sys.argv[2]
vcf_files = glob(path_to_vcf_files + "/*tp-base.vcf")
print(vcf_files)
all_variants = []
summary = {}
## Build master list of True Variants
with gzip.open(true_variants, 'rb') as f:
for line in f:
if line.startswith(b'#'):
continue
all_variants.append(line.split()[2].decode("utf-8"))
## Iterate through each of the condition VCFs
for vcf in vcf_files:
sample_variants = []
with open(vcf, 'r') as v:
sample_name = os.path.splitext(os.path.basename(vcf))[0]
## Build list of varints which exist in this condition
for line in v:
if line.startswith('#'):
continue
sample_variants.append(line.split()[2])
## Check and score variants that exist in this condition
for variant in all_variants:
if variant not in summary: summary[variant] = {}
if variant in sample_variants:
summary[variant][sample_name] = 1
else:
summary[variant][sample_name] = 0
## Convert summary dict to data frame and write to file
pd.DataFrame(summary).transpose().to_csv('upset_input.txt', sep=';', index_label='SV')
| 30.325581 | 86 | 0.654141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.252301 |
df3f58815ff66d9dcb98bc560bc4e1a7dda2766a | 3,448 | py | Python | Apple EFI Package Extractor/Linux_Pre-Alpha/Apple_EFI_Package.py | Coool/BIOSUtilities | 319246a09adc207ea18201357ecf6af82df25e4d | [
"BSD-2-Clause-Patent"
] | null | null | null | Apple EFI Package Extractor/Linux_Pre-Alpha/Apple_EFI_Package.py | Coool/BIOSUtilities | 319246a09adc207ea18201357ecf6af82df25e4d | [
"BSD-2-Clause-Patent"
] | null | null | null | Apple EFI Package Extractor/Linux_Pre-Alpha/Apple_EFI_Package.py | Coool/BIOSUtilities | 319246a09adc207ea18201357ecf6af82df25e4d | [
"BSD-2-Clause-Patent"
] | null | null | null | #!/usr/bin/env python3
"""
Apple EFI Package
Apple EFI Package Extractor
Copyright (C) 2019-2021 Plato Mavropoulos
"""
print('Apple EFI Package Extractor v2.0_Linux_a1')
import os
import sys
import zlib
import shutil
import subprocess
if len(sys.argv) >= 2 :
pkg = sys.argv[1:]
else :
pkg = []
in_path = input('\nEnter the full folder path: ')
print('\nWorking...')
for root, dirs, files in os.walk(in_path):
for name in files :
pkg.append(os.path.join(root, name))
final_path = os.path.join(os.getcwd(), 'AppleEFI')
if os.path.exists(final_path) : shutil.rmtree(final_path)
for input_file in pkg :
file_path = os.path.abspath(input_file)
file_name = os.path.basename(input_file)
file_dir = os.path.dirname(file_path)
file_ext = os.path.splitext(file_path)[1]
print('\nFile: %s\n' % file_name)
with open(input_file, 'rb') as in_buff : file_adler = zlib.adler32(in_buff.read()) & 0xFFFFFFFF
pkg_payload = os.path.join(final_path, '%s_%0.8X' % (file_name, file_adler))
pkg_temp = os.path.join(final_path, '__TEMP_%s_%0.8X' % (file_name, file_adler))
os.makedirs(pkg_temp)
subprocess.run(['bsdtar', '-xf', file_path, '-C', pkg_temp], check = True)
#subprocess.run(['7z', 'x', '-aou', '-bso0', '-bse0', '-bsp0', '-o' + pkg_temp, file_path])
if os.path.isfile(os.path.join(pkg_temp, 'Scripts')) :
scripts_init = os.path.join(pkg_temp, 'Scripts')
scripts_cpgz = os.path.join(pkg_temp, 'Scripts.gz')
efi_path = os.path.join(pkg_temp, 'Tools', 'EFIPayloads', '')
os.replace(scripts_init, scripts_cpgz)
os.system('gunzip -k -q %s' % scripts_cpgz)
os.system('(cd %s && cpio --quiet -id < %s)' % (pkg_temp, scripts_init))
shutil.copytree(efi_path, pkg_payload)
elif os.path.isfile(os.path.join(pkg_temp, 'Payload')) :
payload_init = os.path.join(pkg_temp, 'Payload')
payload_pbzx = os.path.join(pkg_temp, 'Payload.pbzx')
payload_cpio = os.path.join(pkg_temp, 'Payload.cpio')
zip_path = os.path.join(pkg_temp, 'usr', 'standalone', 'firmware', 'bridgeOSCustomer.bundle', 'Contents', 'Resources', 'UpdateBundle')
efi_path = os.path.join(zip_path, 'boot', 'Firmware', 'MacEFI', '')
os.replace(payload_init, payload_pbzx)
subprocess.run(['python', 'parse_pbzx_fix.py', payload_pbzx, payload_cpio], check = True, stdout=subprocess.DEVNULL)
os.system('(cd %s && cpio --quiet -id < %s)' % (pkg_temp, payload_cpio))
shutil.unpack_archive(zip_path + '.zip', zip_path)
if os.path.exists(efi_path) : shutil.copytree(efi_path, pkg_payload)
shutil.rmtree(pkg_temp)
im4p_files = []
for root, dirs, files in os.walk(pkg_payload):
for name in files :
if name.endswith('.im4p') :
im4p_files.append(os.path.join(root, name))
if im4p_files : subprocess.run(['python', 'Apple_EFI_Split.py', '-skip', *im4p_files], check = True, stdout=subprocess.DEVNULL)
for im4p in im4p_files : os.remove(im4p)
final_files = []
for root, dirs, files in os.walk(pkg_payload):
for name in files :
final_files.append(os.path.join(root, name))
if final_files : subprocess.run(['python', 'Apple_EFI_Rename.py', '-skip', *final_files], check = True, stdout=subprocess.DEVNULL)
for root, dirs, files in os.walk(pkg_payload):
for name in files :
if not os.path.isfile(os.path.join(final_path, name)) :
shutil.copy2(os.path.join(root, name), os.path.join(final_path, name))
shutil.rmtree(pkg_payload)
print('\nDone!') | 33.803922 | 136 | 0.686195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 785 | 0.227668 |
df4048ff5c3a54d37d423b8ae11ce64e3577a99f | 3,344 | py | Python | sourcing_code_pro.py | kiwi0fruit/open-fonts | deb211dd1e08becf3b9300b02365cd79415c64d4 | [
"MIT"
] | 30 | 2018-02-21T10:19:45.000Z | 2022-02-23T15:45:34.000Z | sourcing_code_pro.py | kiwi0fruit/open-fonts | deb211dd1e08becf3b9300b02365cd79415c64d4 | [
"MIT"
] | 33 | 2018-02-09T15:38:46.000Z | 2021-12-08T21:40:26.000Z | sourcing_code_pro.py | kiwi0fruit/open-fonts | deb211dd1e08becf3b9300b02365cd79415c64d4 | [
"MIT"
] | 4 | 2019-11-18T03:14:07.000Z | 2022-02-23T03:59:00.000Z | # -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import subprocess
from helper import rename_font
import os
import shutil
from os import path as p
here = p.dirname(p.abspath(__file__))
repos = p.dirname(here)
sourcecodepro = p.join(here, 'Fonts', 'SourceCodePro')
remove = (
u'∕',
# ---- Bad dashes: ----
# [\u00AD \u1806 \uFE63 \uFF0D]
u'', u'᠆', u'﹣', u'-',
# ---- OK dashes: ----
# [u2E3A \u2E3B] (multiple of character width)
# u'⸺', u'⸻',
)
spaces = (
# ---- OK whitespaces: ----
# [\u202F]
# u' ',
# ---- Bad whitespaces (dont't touch): ----
# [\u1680]
# u' ',
# ---- Bad whitespaces: ----
# [\u205F \u3000]
u' ', u' ',
# ---- Bad whitespaces: ----
# [\u2000 \u2001 \u2002 \u2003 \u2004 \u2005 \u2006 \u2009 \u200A]
u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ',
)
dir_ = p.join(repos, '_SourcingCodePro')
if not p.exists(dir_):
os.makedirs(dir_)
# Rename Source Code Pro:
# ---------------------------------
styles = [
('SourceCodePro-It', 'Source Code Pro', 'Italic'),
('SourceCodePro-MediumIt', 'Source Code Pro Medium', 'Italic'),
('SourceCodePro-SemiboldIt', 'Source Code Pro Semibold', 'Italic'),
('SourceCodePro-BoldIt', 'Source Code Pro', 'Bold Italic'),
('SourceCodePro-BlackIt', 'Source Code Pro Black', 'Italic'),
('SourceCodePro-LightIt', 'Source Code Pro Light', 'Italic'),
('SourceCodePro-ExtraLightIt', 'Source Code Pro ExtraLight', 'Italic'),
('SourceCodePro-Regular', 'Source Code Pro', 'Regular'),
('SourceCodePro-Medium', 'Source Code Pro Medium', 'Regular'),
('SourceCodePro-Semibold', 'Source Code Pro Semibold', 'Regular'),
('SourceCodePro-Bold', 'Source Code Pro', 'Bold'),
('SourceCodePro-Black', 'Source Code Pro Black', 'Regular'),
('SourceCodePro-Light', 'Source Code Pro Light', 'Regular'),
('SourceCodePro-ExtraLight', 'Source Code Pro ExtraLight', 'Regular'),
]
shutil.copy(p.join(sourcecodepro, 'LICENSE.txt'), p.join(dir_, 'LICENSE.txt'))
reps = (('Source is a trademark', 'xxyyzz'), ('Source Code Pro', 'Sourcing Code Pro'), ('SourceCodePro', 'SourcingCodePro'), ('xxyyzz', 'Source is a trademark'))
def rep(s): return s.replace('Source', 'Sourcing')
for fn, ff, style in styles:
clean_up = False
ref = p.join(sourcecodepro, fn + '.ttf')
of = ref # Old Font
rename_font(input=of, save_as=p.join(dir_, rep(fn).replace('It', 'Italic') +'.ttf'),
fontname=rep(fn), # Font Name
familyname=rep(ff), # Font Family
fullname=rep(ff) + ((' ' + style) if style != 'Regular' else ''),
reps=reps, sfnt_ref=ref, clean_up=clean_up, mono=True, remove=remove, spaces=spaces)
| 38 | 161 | 0.621711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,230 | 0.659178 |
df4060697a7eb303c5884fc6599a1897ddcbd97e | 4,264 | bzl | Python | apple/bundling/debug_symbol_actions.bzl | kastiglione/rules_apple | 3745c3f03b9d29a04671fd4fac96468ca7031fd6 | [
"Apache-2.0"
] | 2 | 2019-09-01T06:06:40.000Z | 2020-11-10T00:37:01.000Z | apple/bundling/debug_symbol_actions.bzl | c-parsons/rules_apple | f75c4b1be219cb32704d900bd1a42ab200eab445 | [
"Apache-2.0"
] | null | null | null | apple/bundling/debug_symbol_actions.bzl | c-parsons/rules_apple | f75c4b1be219cb32704d900bd1a42ab200eab445 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Actions to manipulate debug symbol outputs."""
load("@build_bazel_rules_apple//apple/bundling:file_actions.bzl", "file_actions")
def _collect_linkmaps(ctx, debug_outputs, bundle_name):
"""Collects the available linkmaps from the binary.
Args:
ctx: The current context.
debug_outputs: dSYM bundle binary provider.
bundle_name: Anticipated name of the dSYM bundle.
Returns:
A list of linkmap files, one per linked architecture.
"""
outputs = []
actions = ctx.actions
# TODO(b/36174487): Iterate over .items() once the Map/dict problem is fixed.
for arch in debug_outputs.outputs_map:
arch_outputs = debug_outputs.outputs_map[arch]
linkmap = arch_outputs["linkmap"]
out_linkmap = actions.declare_file("%s_%s.linkmap" % (bundle_name, arch))
outputs.append(out_linkmap)
file_actions.symlink(ctx, linkmap, out_linkmap)
return outputs
def _create_symbol_bundle(ctx, debug_outputs, bundle_name, bundle_extension = ""):
"""Creates the .dSYM bundle next to the output archive.
The generated bundle will have the same name as the bundle being built
(including its extension), but with the ".dSYM" extension appended to it.
If the target being built does not have a binary or if the build it not
generating debug symbols (`--apple_generate_dsym` is not provided), then this
function is a no-op that returns an empty list.
This function assumes that the target has a user-provided binary in the
`binary` attribute. It is the responsibility of the caller to check this.
Args:
ctx: The Skylark context.
debug_outputs: dSYM bundle binary provider.
bundle_name: Anticipated name of the dSYM bundle.
bundle_extension: Anticipated extension of the dSYM bundle, empty string if
it does not have one.
Returns:
A list of files that comprise the .dSYM bundle, which should be returned as
additional outputs from the rule.
"""
dsym_bundle_name = bundle_name + bundle_extension + ".dSYM"
outputs = []
actions = ctx.actions
# TODO(b/36174487): Iterate over .items() once the Map/dict problem is fixed.
for arch in debug_outputs.outputs_map:
arch_outputs = debug_outputs.outputs_map[arch]
dsym_binary = arch_outputs["dsym_binary"]
out_symbols = actions.declare_file("%s/Contents/Resources/DWARF/%s_%s" % (
dsym_bundle_name,
bundle_name,
arch,
))
outputs.append(out_symbols)
file_actions.symlink(ctx, dsym_binary, out_symbols)
# If we found any outputs, create the Info.plist for the bundle as well;
# otherwise, we just return the empty list. The plist generated by dsymutil
# only varies based on the bundle name, so we regenerate it here rather than
# propagate the other one from the apple_binary. (See
# https://github.com/llvm-mirror/llvm/blob/master/tools/dsymutil/dsymutil.cpp)
if outputs:
out_plist = actions.declare_file("%s/Contents/Info.plist" %
dsym_bundle_name)
outputs.append(out_plist)
actions.expand_template(
template = ctx.file._dsym_info_plist_template,
output = out_plist,
substitutions = {
"%bundle_name_with_extension%": bundle_name + bundle_extension,
},
)
return outputs
# Define the loadable module that lists the exported symbols in this file.
debug_symbol_actions = struct(
collect_linkmaps = _collect_linkmaps,
create_symbol_bundle = _create_symbol_bundle,
)
| 39.481481 | 82 | 0.695826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,706 | 0.634615 |
df43165460434d52e608ecfd552ac420215fb6a5 | 414 | py | Python | uplift/tree/_utils.py | Antiguru11/uplift | ffaa5bcd20d6aa264fa3b2d191327c86384e4f7c | [
"Apache-2.0"
] | null | null | null | uplift/tree/_utils.py | Antiguru11/uplift | ffaa5bcd20d6aa264fa3b2d191327c86384e4f7c | [
"Apache-2.0"
] | null | null | null | uplift/tree/_utils.py | Antiguru11/uplift | ffaa5bcd20d6aa264fa3b2d191327c86384e4f7c | [
"Apache-2.0"
] | null | null | null | import numpy as np
def group_stats(y, w, groups):
uts = list()
nts = list()
nc = (w == 0).sum()
if nc == 0:
yc = 0
else:
yc = y[w == 0].mean()
for group in groups:
ng = (w == group).sum()
if ng == 0:
uts.append(-yc)
else:
uts.append(y[w == group].mean() - yc)
nts.append(ng)
return tuple(nts), nc, tuple(uts)
| 18 | 49 | 0.442029 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
df4381be0a6af61c9a8d19296067917654a1c8b3 | 3,958 | py | Python | ipyannotator/ipytyping/annotations.py | itepifanio/ipyannotator | eac99f71d8d39e02a10c21807508b0c064077806 | [
"Apache-2.0"
] | null | null | null | ipyannotator/ipytyping/annotations.py | itepifanio/ipyannotator | eac99f71d8d39e02a10c21807508b0c064077806 | [
"Apache-2.0"
] | null | null | null | ipyannotator/ipytyping/annotations.py | itepifanio/ipyannotator | eac99f71d8d39e02a10c21807508b0c064077806 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00c_annotation_types.ipynb (unless otherwise specified).
__all__ = []
# Internal Cell
from pathlib import Path
from collections.abc import MutableMapping
from typing import Dict, Optional, Iterable, Any, Union
from ipywidgets import Layout
from ..mltypes import OutputImageLabel, OutputLabel
from ..custom_input.buttons import ImageButton, ImageButtonSetting, ActionButton
# Internal Cell
class AnnotationStore(MutableMapping):
def __init__(self, annotations: Optional[Dict] = None):
self._annotations = annotations or {}
def __getitem__(self, key: str):
return self._annotations[key]
def __delitem__(self, key: str):
if key in self:
del self._annotations[key]
def __setitem__(self, key: str, value: Any):
self._annotations[key] = value
def __iter__(self):
return iter(self._annotations)
def __len__(self):
return len(self._annotations)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, self._annotations)
# Internal Cell
class LabelStore(AnnotationStore):
def __getitem__(self, key: str):
assert isinstance(key, str)
return self._annotations[key]
def __delitem__(self, key: str):
assert isinstance(key, str)
if key in self:
del self._annotations[key]
def __setitem__(self, key: str, value: Optional[Dict[str, bool]]):
assert isinstance(key, str)
if value:
assert isinstance(value, dict)
self._annotations[key] = value
# Internal Cell
def _label_store_to_image_button(
annotation: LabelStore,
width: int = 150,
height: int = 150,
disabled: bool = False
) -> Iterable[ImageButton]:
button_setting = ImageButtonSetting(
display_label=False,
image_width=f'{width}px',
image_height=f'{height}px'
)
buttons = []
for path, value in annotation.items():
image_button = ImageButton(button_setting)
image_button.image_path = str(path)
image_button.label_value = Path(path).stem
image_button.active = value.get('answer', False)
image_button.disabled = disabled
buttons.append(image_button)
return buttons
# Internal Cell
def _label_store_to_button(
annotation: LabelStore,
disabled: bool
) -> Iterable[ActionButton]:
layout = {
'width': 'auto',
'height': 'auto'
}
buttons = []
for label, value in annotation.items():
button = ActionButton(layout=Layout(**layout))
button.description = label
button.value = label
button.tooltip = label
button.disabled = disabled
if value.get('answer', True):
button.layout.border = 'solid 2px #f7f01e'
buttons.append(button)
return buttons
# Internal Cell
class LabelStoreCaster: # pylint: disable=too-few-public-methods
"""Factory that casts the correctly widget
accordingly with the input"""
def __init__(
self,
output: Union[OutputImageLabel, OutputLabel],
width: int = 150,
height: int = 150,
widgets_disabled: bool = False
):
self.width = width
self.height = height
self.output = output
self.widgets_disabled = widgets_disabled
def __call__(self, annotation: LabelStore) -> Iterable:
if isinstance(self.output, OutputImageLabel):
return _label_store_to_image_button(
annotation,
self.width,
self.height,
self.widgets_disabled
)
if isinstance(self.output, OutputLabel):
return _label_store_to_button(
annotation,
disabled=self.widgets_disabled
)
raise ValueError(
f"output should have type OutputImageLabel or OutputLabel. {type(self.output)} given"
) | 29.102941 | 104 | 0.641991 | 2,212 | 0.558868 | 0 | 0 | 0 | 0 | 0 | 0 | 492 | 0.124305 |
df4403fc1193fbecc76726cf60c46006752e81e6 | 3,001 | py | Python | optapy-core/tests/test_inverse_relation.py | optapy/optapy | 0240f744f592d964110c86d1054ce63cf8c38452 | [
"Apache-2.0"
] | 85 | 2021-07-26T11:42:51.000Z | 2022-03-29T14:14:27.000Z | optapy-core/tests/test_inverse_relation.py | optapy/optapy | 0240f744f592d964110c86d1054ce63cf8c38452 | [
"Apache-2.0"
] | 22 | 2021-07-21T21:10:16.000Z | 2022-03-31T17:49:00.000Z | optapy-core/tests/test_inverse_relation.py | optapy/optapy | 0240f744f592d964110c86d1054ce63cf8c38452 | [
"Apache-2.0"
] | 9 | 2021-07-15T12:07:32.000Z | 2022-02-09T02:11:55.000Z | import optapy
import optapy.score
import optapy.config
import optapy.constraint
@optapy.planning_entity
class InverseRelationEntity:
def __init__(self, code, value=None):
self.code = code
self.value = value
@optapy.planning_variable(object, ['value_range'])
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
@optapy.planning_entity
class InverseRelationValue:
def __init__(self, code, entities=None):
self.code = code
if entities is None:
self.entities = []
else:
self.entities = entities
@optapy.inverse_relation_shadow_variable(InverseRelationEntity, source_variable_name='value')
def get_entities(self):
return self.entities
@optapy.planning_solution
class InverseRelationSolution:
def __init__(self, values, entities, score=None):
self.values = values
self.entities = entities
self.score = score
@optapy.planning_entity_collection_property(InverseRelationEntity)
def get_entities(self):
return self.entities
@optapy.planning_entity_collection_property(InverseRelationValue)
@optapy.value_range_provider('value_range')
def get_values(self):
return self.values
@optapy.planning_score(optapy.score.SimpleScore)
def get_score(self):
return self.score
def set_score(self, score):
self.score = score
@optapy.constraint_provider
def inverse_relation_constraints(constraint_factory):
return [
constraint_factory.forEach(optapy.get_class(InverseRelationValue))
.filter(lambda value: len(value.entities) > 1)
.penalize('Only one entity per value', optapy.score.SimpleScore.ONE)
]
def test_inverse_relation():
termination = optapy.config.solver.termination.TerminationConfig()
termination.setBestScoreLimit('0')
solver_config = optapy.config.solver.SolverConfig() \
.withSolutionClass(optapy.get_class(InverseRelationSolution)) \
.withEntityClasses(optapy.get_class(InverseRelationEntity), optapy.get_class(InverseRelationValue)) \
.withConstraintProviderClass(optapy.get_class(inverse_relation_constraints)) \
.withTerminationConfig(termination)
solver = optapy.solver_factory_create(solver_config).buildSolver()
solution = solver.solve(InverseRelationSolution(
[
InverseRelationValue('A'),
InverseRelationValue('B'),
InverseRelationValue('C')
],
[
InverseRelationEntity('1'),
InverseRelationEntity('2'),
InverseRelationEntity('3'),
]
))
assert solution.score.getScore() == 0
visited_set = set()
for value in solution.values:
assert len(value.entities) == 1
assert value.entities[0] is not None
assert value.entities[0] not in visited_set
visited_set.add(value.entities[0])
| 31.260417 | 109 | 0.683439 | 1,292 | 0.430523 | 0 | 0 | 1,709 | 0.569477 | 0 | 0 | 81 | 0.026991 |
df4589c5936fa29d3d24eba4e44f5da8d49bf94e | 714 | py | Python | pizza_store/models/user.py | astsu-dev/pizza-store-backend | 902f6e5e2c88ba029b2bff61da8fc4684664ead9 | [
"MIT"
] | 2 | 2021-07-10T15:47:45.000Z | 2021-12-13T18:09:30.000Z | pizza_store/models/user.py | astsu-dev/pizza-store-backend | 902f6e5e2c88ba029b2bff61da8fc4684664ead9 | [
"MIT"
] | null | null | null | pizza_store/models/user.py | astsu-dev/pizza-store-backend | 902f6e5e2c88ba029b2bff61da8fc4684664ead9 | [
"MIT"
] | null | null | null | import datetime
import uuid
from pizza_store.enums.role import Role
from pydantic import BaseModel
class UserBase(BaseModel):
username: str
email: str
class UserCreate(UserBase):
"""User register model"""
password: str
class UserIn(BaseModel):
"""User login model."""
username: str
password: str
class User(UserBase):
id: uuid.UUID
role: Role
class Config:
orm_mode = True
class UserInDB(User):
password_hash: str
class UserInToken(User):
pass
class TokenResponse(BaseModel):
access_token: str
token_type: str
expires_in: int
class Token(BaseModel):
exp: datetime.datetime
iat: datetime.datetime
user: UserInToken
| 13.730769 | 39 | 0.683473 | 590 | 0.826331 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.067227 |
df45b74210e776a21d70e6e881d80b85c80991df | 874 | py | Python | mautrix/types/event/batch.py | tulir/mautrix-appservice-python | d180603445bb0bc465a7b2ff918c4ac28a5dbfc2 | [
"MIT"
] | 1 | 2018-08-24T13:33:30.000Z | 2018-08-24T13:33:30.000Z | mautrix/types/event/batch.py | tulir/mautrix-appservice-python | d180603445bb0bc465a7b2ff918c4ac28a5dbfc2 | [
"MIT"
] | 4 | 2018-07-10T11:43:46.000Z | 2018-09-03T22:08:02.000Z | mautrix/types/event/batch.py | tulir/mautrix-appservice-python | d180603445bb0bc465a7b2ff918c4ac28a5dbfc2 | [
"MIT"
] | 2 | 2018-07-03T04:07:08.000Z | 2018-09-10T03:13:59.000Z | # Copyright (c) 2022 Tulir Asokan, Sumner Evans
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from typing import Any
from attr import dataclass
import attr
from ..primitive import UserID
from ..util import SerializableAttrs
from .base import BaseEvent
@dataclass
class BatchSendEvent(BaseEvent, SerializableAttrs):
"""Base event class for events sent via a batch send request."""
sender: UserID
timestamp: int = attr.ib(metadata={"json": "origin_server_ts"})
content: Any
@dataclass
class BatchSendStateEvent(BatchSendEvent, SerializableAttrs):
"""
State events to be used as initial state events on batch send events. These never need to be
deserialized.
"""
state_key: str
| 26.484848 | 96 | 0.735698 | 437 | 0.5 | 0 | 0 | 459 | 0.525172 | 0 | 0 | 458 | 0.524027 |
df46b6684766e46f0cbbc49ea5d3900fae1ae16f | 885 | py | Python | dbks/runtime.py | vincentlam/dbks | a6dedd6281100666b0e8eb6a5a838362d3260fc5 | [
"MIT"
] | null | null | null | dbks/runtime.py | vincentlam/dbks | a6dedd6281100666b0e8eb6a5a838362d3260fc5 | [
"MIT"
] | null | null | null | dbks/runtime.py | vincentlam/dbks | a6dedd6281100666b0e8eb6a5a838362d3260fc5 | [
"MIT"
] | null | null | null | class Runtime:
@staticmethod
def v3(major: str, feature: str, ml_type: str = None, scala_version: str = "2.12"):
if ml_type and ml_type.lower() not in ["cpu", "gpu"]:
raise ValueError('"ml_type" can only be "cpu" or "gpu"!')
return "".join(
[
f"{major}.",
f"{feature}.x",
"" if not ml_type else f"-{ml_type}-ml",
f"-scala{scala_version}",
]
)
@staticmethod
def v2(
major: str,
feature: str,
maintenance: str,
runtime_version: str,
scala_version: str = "2.11",
):
raise ValueError("This version of runtime is no longer supported!")
@staticmethod
def light(major: str, feature: str, scala_version: str = "2.11"):
return f"apache-spark.{major}.{feature}.x-scala{scala_version}"
| 31.607143 | 87 | 0.531073 | 884 | 0.99887 | 0 | 0 | 853 | 0.963842 | 0 | 0 | 241 | 0.272316 |
df46e87e30c896bf3a86559f322c509578a51604 | 118 | py | Python | dvc/path/s3.py | zb0th/dvc | 5fdbc1882f73162419d5b84ed47a33e9e321f151 | [
"Apache-2.0"
] | null | null | null | dvc/path/s3.py | zb0th/dvc | 5fdbc1882f73162419d5b84ed47a33e9e321f151 | [
"Apache-2.0"
] | null | null | null | dvc/path/s3.py | zb0th/dvc | 5fdbc1882f73162419d5b84ed47a33e9e321f151 | [
"Apache-2.0"
] | null | null | null | from dvc.scheme import Schemes
from .base import PathCloudBASE
class PathS3(PathCloudBASE):
scheme = Schemes.S3
| 16.857143 | 31 | 0.779661 | 52 | 0.440678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
df471a58961450769299367eb0ca2a94b53911da | 2,729 | py | Python | filter_distance_01-1.py | jgpattis/Desres-sars-cov-2-apo-mpro | 90c07414040c0ea0bf54028e2f194d6509c8f526 | [
"MIT"
] | null | null | null | filter_distance_01-1.py | jgpattis/Desres-sars-cov-2-apo-mpro | 90c07414040c0ea0bf54028e2f194d6509c8f526 | [
"MIT"
] | null | null | null | filter_distance_01-1.py | jgpattis/Desres-sars-cov-2-apo-mpro | 90c07414040c0ea0bf54028e2f194d6509c8f526 | [
"MIT"
] | null | null | null | #! /usr/bin/env/ python
# filter out CA distances with large minimum
# filter out CA distances with small standard deviations
# save to file for later use
# will plot distances used
import mdtraj as md
import pyemma.coordinates as coor
import numpy as np
import pickle
from util.plot_structure_util import plot_vmd_cylinder_from_inds, plot_pymol_cylinder_from_inds
dis_cutoff = 1.2
std_cutoff = 0.03
outfile = 'filtered_distance_featurization_01/filtered_dis_ind_12_03'
save = True
plot = 'all' # should be all, pymol, vmd, or none
traj_num = [f'{i:04d}' for i in range(100)]
traj_path = '../DESRES-Trajectory_sarscov2-10880334-no-water-no-ion-glueCA/sarscov2-10880334-no-water-no-ion-glueCA/sarscov2-10880334-no-water-no-ion-glueCA-'
traj_list = [ traj_path + str(i) + '.dcd' for i in traj_num]
feat = coor.featurizer('../DESRES_protease_chainid.pdb')
feat.add_distances(feat.pairs(feat.select('name == CA and chainid == 0'), excluded_neighbors=3))
traj = coor.load(traj_list, feat, stride=5)
traj_cat = np.concatenate(traj)
feat1 = coor.featurizer('../DESRES_protease_chainid.pdb')
feat1.add_distances(feat1.pairs(feat1.select('name == CA and chainid == 1'), excluded_neighbors=3))
traj1 = coor.load(traj_list, feat, stride=5)
traj_cat1 = np.concatenate(traj)
traj_cat_pair = np.concatenate((traj_cat, traj_cat1), axis=0)
min_dist = traj_cat_pair.min(axis=0)
std_dist = traj_cat_pair.std(axis=0)
new_dists = np.where((min_dist < dis_cutoff) & (std_dist > std_cutoff))[0]
print('new distances:', new_dists.shape)
if save == True:
out = np.zeros((len(new_dists), 2), dtype=np.int16)
label = feat.describe()
for i,j in enumerate(new_dists):
tmp = label[j].split()
out[i,0] = int(tmp[4])
out[i,1] = int(tmp[10])
with open(outfile+'chainA.npy','wb') as handle:
np.save(handle, out)
out1 = np.zeros((len(new_dists), 2), dtype=np.int16)
label1 = feat1.describe()
for i,j in enumerate(new_dists):
tmp1 = label1[j].split()
out1[i,0] = int(tmp1[4])
out1[i,1] = int(tmp1[10])
with open(outfile+'chainB.npy','wb') as handle:
np.save(handle, out1)
if plot == 'all':
plot_vmd_cylinder_from_inds(pdb, out, outfile + '_draw_cylinder')
print(plot_vmd_cylinder_from_inds.__doc__)
plot_pymol_cylinder_from_inds(pdb, out, outfile + '_draw_cylinder')
print(plot_pymol_cylinder_from_inds.__doc__)
elif plot == 'vmd':
plot_vmd_cylinder_from_inds(pdb, out, outfile + '_draw_cylinder')
print(plot_vmd_cylinder_from_inds.__doc__)
elif plot == 'pymol':
plot_pymol_cylinder_from_inds(pdb, out, outfile + '_draw_cylinder')
print(plot_pymol_cylinder_from_inds.__doc__)
| 36.878378 | 158 | 0.706486 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 690 | 0.25284 |
df48425c447623027c723c6ed55ecb2c6c99a346 | 4,206 | py | Python | src/panoptoindexconnector/implementations/coveo_implementation.py | bschlintz/panopto-index-connector | 54599c972c00f44f1c2e7b40583e3e0a439f7f4e | [
"Apache-2.0"
] | null | null | null | src/panoptoindexconnector/implementations/coveo_implementation.py | bschlintz/panopto-index-connector | 54599c972c00f44f1c2e7b40583e3e0a439f7f4e | [
"Apache-2.0"
] | null | null | null | src/panoptoindexconnector/implementations/coveo_implementation.py | bschlintz/panopto-index-connector | 54599c972c00f44f1c2e7b40583e3e0a439f7f4e | [
"Apache-2.0"
] | null | null | null | """
Methods for the connector application to convert and sync content to the target endpoint
Start with this template to implement these methods for the connector application
"""
# Standard Library Imports
import json
import logging
import os
# Third party
import requests
# Global constants
DIR = os.path.dirname(os.path.realpath(__file__))
LOG = logging.getLogger(__name__)
#########################################################################
#
# Exported methods to implement
#
#########################################################################
#
# Example Coveo Doc
# {
# 'author': 'Alice Smith',
# 'date': '2017-11-08T12:18:41.666Z',
# 'documenttype': 'Text',
# 'filename': 'mytext.txt',
# 'language': [
# 'English'
# ],
# 'permanentid': 'sample95829alice84720permanent93829id',
# 'sourcetype': 'Push',
# 'title': 'My Text',
# 'fileExtension': '.txt',
# 'data': 'This is a sample text written by Alice Smith.',
# 'permissions': [
# {
# 'allowAnonymous': false,
# 'allowedPermissions': [
# {
# 'identity': 'AlphaTeam',
# 'identityType': 'Group'
# }
# ],
# 'deniedPermissions': [
# {
# 'identity': 'bob@example.com',
# 'identityType': 'User'
# }
# ]
# }
# ]
# }
#
def convert_to_target(panopto_content, field_mapping):
"""
Implement this method to convert from panopto content format to target format
"""
target_content = {
field_mapping['Id']: panopto_content['Id'],
'documenttype': 'Panopto',
field_mapping['Info']['Language']: panopto_content['VideoContent']['Language'],
field_mapping['Info']['Title']: panopto_content['VideoContent']['TItle'],
}
target_content['data'] = ' '.join([
panopto_content['VideoContent'][key]
for key, field in field_mapping['Metadata'].items()
if panopto_content['VideoContent'][key]
])
# Principals
target_content['permissions'] = [
{
'allowedPermissions': [
{
'identityType': 'Group' if 'Groupname' in principal else 'User',
'identity': principal.get('Email', principal.get('Groupname'))
}
for principal in panopto_content['VideoContent']['Principals']
if principal.get('Groupname') != 'Public'
]
}
]
target_content['permissions'][0]['allowAnonymous'] = any(
principal.get('Groupname') == 'Public'
for principal in panopto_content['VideoContent']['Principals']
)
LOG.debug('Converted document is %s', json.dumps(target_content, indent=2))
return target_content
def push_to_target(target_content, target_address, target_credentials, config):
"""
Implement this method to push converted content to the target
"""
field_mapping = config.field_mapping
url = '{coveo}/push/v1/organizations/{org}/sources/{source}/documents?documentId={id}'.format(
coveo=target_address,
org=target_credentials['organization'],
source=target_credentials['source'],
id=target_content[field_mapping['id']])
data = json.dumps(target_content)
headers = {'Content-Type': 'application/json', 'Authorization': target_credentials['oauthtoken']}
response = requests.put(url=url, data=data, headers=headers)
if not response.ok:
LOG.error('Failed response: %s, %s', response, response.text)
response.raise_for_status()
def delete_from_target(video_id, target_address, target_credentials):
"""
Implement this method to push converted content to the target
"""
url = '{coveo}/push/v1/organizations/{org}/sources/{source}/documents?documentId={id}'.format(
coveo=target_address, org=target_credentials['organization'], source=target_credentials['source'], id=video_id)
headers = {'Content-Type': 'application/json', 'Authorization': target_credentials['oauthtoken']}
response = requests.delete(url=url, headers=headers)
if not response.ok:
LOG.error('Failed response: %s, %s', response, response.text)
response.raise_for_status()
| 30.478261 | 119 | 0.612696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,031 | 0.482882 |
df4b2deb2486efb9c3adbc90f171c364722baebb | 6,255 | py | Python | dashboard/scripts/webscraper.py | lynetteoh/COVID19dashboard | 61193d35acf004999443c47a30f0b9f9c6220c03 | [
"MIT"
] | null | null | null | dashboard/scripts/webscraper.py | lynetteoh/COVID19dashboard | 61193d35acf004999443c47a30f0b9f9c6220c03 | [
"MIT"
] | null | null | null | dashboard/scripts/webscraper.py | lynetteoh/COVID19dashboard | 61193d35acf004999443c47a30f0b9f9c6220c03 | [
"MIT"
] | null | null | null | # script for daily update
from bs4 import BeautifulSoup
# from urllib.request import urlopen
import requests
import csv
import time
from datetime import datetime, timedelta
import os
from pathlib import Path
from dashboard.models import Case, Country, District, State, Zone
def run():
scrapeStateStats()
def scrapeCountryStats():
# scrape country level
country_url = "http://covid-19.livephotos123.com/en"
# page = urlopen(url)
# html = page.read().decode("utf-8")
# soup = BeautifulSoup(html, "html.parser")
country_page = requests.get(country_url)
country_soup = BeautifulSoup(country_page.content, "html.parser")
# print(soup.prettify())
# subtitle = soup.find_all('span', class_='sub-title')
# print(soup.find_all('div', class_='col-xs-12 col-md-6 text-center'))
# get country level covid information
section = country_soup.find('div', class_='col-xs-12 col-md-6 text-center')
types = section.find_all('div')
# print(types)
all_info = {}
for t in types:
info = t.find_all('span')
# print(info)
if info:
# get type
key = info[0].get_text()
cases_num = []
for i in range(1, len(info)):
#get changes
cases_num.append(info[i].get_text())
# todo : use pandas and save to csv file
all_info[key] = cases_num
print(all_info)
def scrapeStateStats():
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
name_path = os.path.join( BASE_DIR, 'data/state_name.csv')
s_path = os.path.join( BASE_DIR, 'data/state_fb.csv')
c_path = os.path.join(BASE_DIR, 'data/state_stats_scrapped.csv')
updated = False
with open(s_path, "r") as f:
reader = list(csv.reader(f))
today = time.strftime('%d/%m/%Y')
if reader[len(reader)-1][0] == today:
print("Updated state_fb.csv as of " + reader[len(reader)-1][0])
updated = True
state_stats_updated = False
with open(c_path, "r") as fs:
reader_fs = list(csv.reader(fs))
today = time.strftime('%d/%m/%Y')
if reader_fs[len(reader_fs)-1][0] == today:
print("Updated state_stats_scrapped.csv as of " + reader_fs[len(reader_fs)-1][0])
state_stats_updated = True
if not updated:
# get state info
with open(name_path, "r") as state_name:
reader = csv.reader(state_name)
next(reader) # skip first row
for row in reader:
print(row)
# get state cases
case_type = ["Active_14", "In treatment", "Deaths"]
state_url = "https://newslab.malaysiakini.com/covid-19/en/state/" + row[0]
state_page = requests.get(state_url)
state_soup = BeautifulSoup(state_page.content, "html.parser")
date_soup = state_soup.find('div', class_="jsx-3636536621 uk-text-large")
d = date_soup.get_text().strip("As of ").strip(" PM").strip(" AM")
latest_date = datetime.strptime(d, '%b %d, %Y %H:%M')
date_updated = latest_date.strftime('%d/%m/%Y')
state_info = {}
infected = state_soup.find("div", class_="jsx-3636536621 uk-text-center uk-text-large")
state_info['Infected'] = int(infected.find("strong", class_= "jsx-3636536621" ).get_text().replace(",", ""))
stats = state_soup.find('ul', class_="jsx-3636536621 uk-grid uk-grid-divider uk-grid-small uk-flex-center uk-child-width-1-2 uk-child-width-1-3@s uk-child-width-1-4@m uk-child-width-1-5@l uk-margin")
# get info
stats = stats.find_all('span', class_= "jsx-3636536621 uk-heading-medium")
for i in range(0, len(case_type)):
state_info[case_type[i]] = int(stats[i].get_text().replace(",", ""))
state_info["Recovered"] = state_info.get("Infected") - state_info.get("In treatment") - state_info.get("Deaths")
print(state_info)
with open(s_path, "a+") as state_stats:
csv_w = csv.writer(state_stats)
csv_w = csv_w.writerow([date_updated, row[0], state_info.get("Infected"), state_info.get("Recovered"), state_info.get("Deaths"), state_info.get("In treatment")])
state = State.objects.get(State_Name=row[0])
date = latest_date.strftime('%Y-%m-%d')
yesterday = latest_date - timedelta(1)
try:
case = Case.objects.get(Ref_Key=2, Reference_ID = state.State_ID, Date=yesterday)
except Case.DoesNotExist:
case = None
if not case:
state_case, created = Case.objects.get_or_create(Reference_ID = state.State_ID, Date=date, Is_actual=True, defaults = { 'Ref_Key': 2, 'Total_infected' : state_info.get("Infected"), 'Total_deaths': state_info.get("Deaths"),'Total_recoveries': state_info.get("Recovered"), 'Active_cases': state_info.get("In treatment")})
else:
state_case, created = Case.objects.get_or_create(Reference_ID = state.State_ID, Date=date, Is_actual=True, defaults = { 'Ref_Key': 2, 'Total_infected' : state_info.get("Infected"), 'Total_deaths': state_info.get("Deaths"),'Total_recoveries': state_info.get("Recovered"), 'Active_cases': state_info.get("In treatment"), 'Daily_infected': state_info.get("Infected") - case.Total_infected if int(state_info.get("Infected")) - case.Total_infected > 0 else 0, 'Daily_deaths' : state_info.get("Deaths") - case.Total_deaths if state_info.get("Deaths") - case.Total_deaths > 0 else 0})
if not created:
state_case.Total_infected = state_info.get("Infected")
state_case.Total_deaths = state_info.get("Deaths")
state_case.Total_recoveries = state_info.get("Recovered")
state_case.Active_cases = state_info.get("In treatment")
if case:
state_case.Daily_deaths = state_info.get("Deaths") - case.Total_deaths if state_info.get("Deaths") - case.Total_deaths > 0 else 0
state_case.Daily_infected = state_info.get("Infected") - case.Total_infected if state_info.get("Infected") - case.Total_infected > 0 else 0
state_case.save()
if state_case.Active_cases == 0:
zone_colour = "1"
elif state_case.Active_cases > 40:
zone_colour = "3"
else:
zone_colour = "2"
zone = Zone(Ref_Key=1, Reference_ID=state.State_ID, Date=state_case.Date, Zone_colour=zone_colour)
zone.save()
if not state_stats_updated:
date = latest_date.strftime('%d/%m/%Y')
with open(c_path, "a+") as state_stats:
csv_w = csv.writer(state_stats)
csv_w = csv_w.writerow([date, row[0], state_info.get("Infected"), state_info.get("Recovered"), state_info.get("Deaths"), state_info.get("In treatment")])
| 41.151316 | 582 | 0.692886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,802 | 0.28809 |
df4cf960f5cccdcad0be7331412c12f64dc1a259 | 9,022 | py | Python | GAN/Architectures/models/conditional_gan.py | FlipWebApps/computer-vision-playground | 94cf8f0ac49c5d01ea5d1ebb14f857d4af2b0aef | [
"MIT"
] | null | null | null | GAN/Architectures/models/conditional_gan.py | FlipWebApps/computer-vision-playground | 94cf8f0ac49c5d01ea5d1ebb14f857d4af2b0aef | [
"MIT"
] | 4 | 2020-01-28T22:52:04.000Z | 2021-08-25T15:38:35.000Z | GAN/Architectures/models/conditional_gan.py | FlipWebApps/computer-vision-playground | 94cf8f0ac49c5d01ea5d1ebb14f857d4af2b0aef | [
"MIT"
] | null | null | null | import numpy as np
from keras.layers import Concatenate, Input, Dense, Reshape, Flatten, Dropout, multiply, \
BatchNormalization, Activation, Embedding, ZeroPadding2D, Conv2DTranspose
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model, load_model
from keras.optimizers import Adam
from matplotlib import pyplot
from models.gan import GAN
class ConditionalGAN(GAN):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build_discriminator(self):
"""define the standalone discriminator model"""
# label input
in_label = Input(shape=(1,))
# embedding for categorical input
li = Embedding(self.n_classes, 50)(in_label)
# scale up to image dimensions with linear activation
n_nodes = self.in_shape[0] * self.in_shape[1]
li = Dense(n_nodes)(li)
# reshape to additional channel
li = Reshape((self.in_shape[0], self.in_shape[1], 1))(li)
# image input
in_image = Input(shape=self.in_shape)
# concat label as a channel
merge = Concatenate()([in_image, li])
# downsample
fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(merge)
fe = LeakyReLU(alpha=0.2)(fe)
# downsample
fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(fe)
fe = LeakyReLU(alpha=0.2)(fe)
# flatten feature maps
fe = Flatten()(fe)
# dropout
fe = Dropout(0.4)(fe)
# output
out_layer = Dense(1, activation='sigmoid')(fe)
# define model
self.d_model = Model([in_image, in_label], out_layer)
# compile model
opt = Adam(lr=0.0002, beta_1=0.5)
self.d_model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
def build_generator(self):
"""define the standalone generator model"""
# label input
in_label = Input(shape=(1,))
# embedding for categorical input
li = Embedding(self.n_classes, 50)(in_label)
# linear multiplication
n_nodes = 7 * 7
li = Dense(n_nodes)(li)
# reshape to additional channel
li = Reshape((7, 7, 1))(li)
# image generator input
in_lat = Input(shape=(self.latent_dim,))
# foundation for 7x7 image
n_nodes = 128 * 7 * 7
gen = Dense(n_nodes)(in_lat)
gen = LeakyReLU(alpha=0.2)(gen)
gen = Reshape((7, 7, 128))(gen)
# merge image gen and label input
merge = Concatenate()([gen, li])
# upsample to 14x14
gen = Conv2DTranspose(128, (4,4), strides=(2,2), padding='same')(merge)
gen = LeakyReLU(alpha=0.2)(gen)
# upsample to 28x28
gen = Conv2DTranspose(128, (4,4), strides=(2,2), padding='same')(gen)
gen = LeakyReLU(alpha=0.2)(gen)
# output
out_layer = Conv2D(1, (7,7), activation='tanh', padding='same')(gen)
# define model
self.g_model = Model([in_lat, in_label], out_layer)
def build_gan(self):
"""define the combined generator and discriminator model, for updating the generator"""
# make weights in the discriminator not trainable
self.d_model.trainable = False
# get noise and label inputs from generator model
gen_noise, gen_label = self.g_model.input
# get image output from the generator model
gen_output = self.g_model.output
# connect image output and label input from generator as inputs to discriminator
gan_output = self.d_model([gen_output, gen_label])
# define gan model as taking noise and label and outputting a classification
self.gan_model = Model([gen_noise, gen_label], gan_output)
# compile model
opt = Adam(lr=0.0002, beta_1=0.5)
self.gan_model.compile(loss='binary_crossentropy', optimizer=opt)
def generate_real_samples(self, X, labels, n_samples):
"""select real samples"""
# choose random instances
ix = np.random.randint(0, X.shape[0], n_samples)
# select images and labels
selected_X, selected_labels = X[ix], labels[ix]
# generate 'real' class labels (1)
y = np.ones((n_samples, 1))
return [selected_X, selected_labels], y
def generate_latent_points(self, n_samples):
"""generate points in latent space as input for the generator"""
# generate points in the latent space
latent_points = super().generate_latent_points(n_samples)
# generate labels
labels = np.random.randint(0, self.n_classes, n_samples)
return [latent_points, labels]
def generate_generator_prediction_samples(self, n_samples):
"""use the generator to generate n fake examples"""
# generate points in latent space
latent_points, labels = self.generate_latent_points(n_samples)
# predict outputs
X = self.generator_prediction(latent_points, labels)
# create class labels
y = np.zeros((n_samples, 1))
return [X, labels], y
def generator_prediction(self, latent_points, labels):
return self.g_model.predict([latent_points, labels])
def train(self, X, labels, n_epochs=100, n_batch=128, reporting_period=10):
"""train the generator and discriminator"""
bat_per_epo = int(X.shape[0] / n_batch)
half_batch = int(n_batch / 2)
# for recording metrics.
self.g_loss = np.zeros((n_epochs * bat_per_epo, 1))
self.d_loss_real = np.zeros((n_epochs * bat_per_epo, 1))
self.d_loss_fake = np.zeros((n_epochs * bat_per_epo, 1))
self.d_acc_real = np.zeros((n_epochs * bat_per_epo, 1))
self.d_acc_fake = np.zeros((n_epochs * bat_per_epo, 1))
self.d_acc_real_epochs = np.zeros((n_epochs, 1))
self.d_acc_fake_epochs = np.zeros((n_epochs, 1))
# manually enumerate epochs
for i in range(n_epochs):
# enumerate batches over the training set
for j in range(bat_per_epo):
# update discriminator model weights on randomly selected 'real' samples
[X_real, labels_real], y_real = self.generate_real_samples(X, labels, half_batch)
d_loss1, d_acc1 = self.d_model.train_on_batch([X_real, labels_real], y_real)
# update discriminator model weights on generated 'fake' examples
[X_fake, labels_fake], y_fake = self.generate_generator_prediction_samples(half_batch)
d_loss2, d_acc2 = self.d_model.train_on_batch([X_fake, labels_fake], y_fake)
# prepare points in latent space as input for the generator
[z_input, labels_input] = self.generate_latent_points(n_batch)
# create inverted labels for the fake samples
y_gan = np.ones((n_batch, 1))
# update the generator via the discriminator's error
g_loss = self.gan_model.train_on_batch([z_input, labels_input], y_gan)
# summarize loss on this batch
print('>%d, %d/%d, d1=%.3f, d2=%.3f g=%.3f' %
(i+1, j+1, bat_per_epo, d_loss1, d_loss2, g_loss))
# record losses for later
self.g_loss[i*bat_per_epo + j] = g_loss
self.d_loss_real[i*bat_per_epo + j] = d_loss1
self.d_loss_fake[i*bat_per_epo + j] = d_loss2
self.d_acc_real[i*bat_per_epo + j] = d_acc1
self.d_acc_fake[i*bat_per_epo + j] = d_acc2
# save per epoch metrics
# evaluate discriminator on real examples
n_samples = 100
x_real, y_real = self.generate_real_samples(X, labels, n_samples)
_, acc_real = self.d_model.evaluate(x_real, y_real, verbose=0)
self.d_acc_real_epochs[i] = acc_real
# evaluate discriminator on fake examples
x_fake, y_fake = self.generate_generator_prediction_samples(n_samples)
_, acc_fake = self.d_model.evaluate(x_fake, y_fake, verbose=0)
self.d_acc_fake_epochs[i] = acc_fake
# every reporting_period, plot out images.
if i == 0 or (i+1) % reporting_period == 0 or (i+1) == n_epochs:
self.summarize_performance(i+1, 'conditional-gan')
# save the generator model
self.save_model('conditional-gan')
def plot_random_generated_images(self):
"""create a plot of randomly generated images (reversed grayscale)"""
dimensions=(10, 10)
figsize=(10, 10)
n_samples=100
(X, _), _ = self.generate_generator_prediction_samples(n_samples)
self.grid_plot(X, dimensions=dimensions, figsize=figsize) | 44.885572 | 102 | 0.615606 | 8,561 | 0.948903 | 0 | 0 | 0 | 0 | 0 | 0 | 2,238 | 0.24806 |
df4d062b612a14812a64e95f016eeeac5bbd7645 | 9,704 | py | Python | ema.py | rsprouse/ema_head_correction | fda63950951cdcfd04d56604d18a7f448c9fde03 | [
"BSD-3-Clause"
] | null | null | null | ema.py | rsprouse/ema_head_correction | fda63950951cdcfd04d56604d18a7f448c9fde03 | [
"BSD-3-Clause"
] | 1 | 2019-04-09T22:00:44.000Z | 2019-04-09T22:00:44.000Z | ema.py | rsprouse/ema_head_correction | fda63950951cdcfd04d56604d18a7f448c9fde03 | [
"BSD-3-Clause"
] | 1 | 2019-03-27T19:54:47.000Z | 2019-03-27T19:54:47.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 21:19:42 2017
@author: ubuntu
"""
import numpy as np
from numpy import cross,dot
from numpy.linalg import norm
import pandas as pd
import os
def read_ndi_data(mydir, file_name,sensors,subcolumns):
'''
Read data produced by NDI WaveFront software
skip empty columns, sensor not OK data set to 'nan'
Input
mydir- directory where biteplate file will be found
file_name - name of a biteplate calibration recording
sensors - a list of sensors in the recording
subcolumns - a list of info to be found for each sensor
Output
df - a pandas dataframe representation of the whole file
'''
fname = os.path.join(mydir, file_name)
better_head = ['time'] + \
['{}_{}'.format(s, c) for s in sensors for c in subcolumns]
# Deal with empty columns, which have empty header values.
with open(fname, 'r') as f:
filehead = f.readline().rstrip()
headfields = filehead.split('\t')
indices = [i for i, x in enumerate(headfields) if x == ' ']
for count, idx in enumerate(indices):
better_head.insert(idx, 'EMPTY{:}'.format(count))
ncol_file = len(headfields)
ncol_sens = len(better_head)
if ncol_file > ncol_sens:
raise ValueError("too few sensors are specified")
if ncol_file < ncol_sens:
raise ValueError("too many sensors are specified")
df = pd.read_csv(fname, sep='\t', index_col = False,
header=None, # The last three parameters
skiprows=1, # are used to override
names=better_head # the existing file header.
)
for s in sensors: # clean up the data - xyz are nan if state is not ok
state = '{}_state'.format(s)
if str(df.loc[0,state])=='nan': # here skipping non-existant sensors,
continue # perhaps a cable not plugged in
locx = '{}_x'.format(s)
locz = '{}_z'.format(s)
cols = list(df.loc[:,locx:locz])
df.loc[df.loc[:,state]!="OK",cols]=[np.nan,np.nan,np.nan]
return df
def get_referenced_rotation(df):
'''
given a dataframe representation of a biteplate recording, find rotation matrix
to put the data on the occlusal plane coordinate system
Input
df - a dataframe read from a biteplate calibration recording
sensor OS is the origin of the occlusal plane coordinate system
sensor MS is located on the biteplate some distance posterior to OS
Output
OS - the origin of the occlusal plane coordinate system
m - a rotation matrix
'''
MS = df.loc[:, ['MS_x', 'MS_y', 'MS_z']].mean(skipna=True).as_matrix()
OS = df.loc[:, ['OS_x', 'OS_y', 'OS_z']].mean(skipna=True).as_matrix()
REF = np.array([0, 0, 0])
ref_t = REF-OS # the origin of this space is OS, we will rotate around this
ms_t = MS-OS
z = cross(ms_t,ref_t) # z is perpendicular to ms and ref vectors
z = z/norm(z)
y = cross(z,ms_t) # y is perpendicular to z and ms
y = y/norm(y)
x = cross(z,y)
x = x/norm(x)
m = np.array([x, y, z]) # rotion matrix directly
return OS, m
def get_desired_head_location(df, protractor=False):
''' get the desired positions of three points - nasion, right mastoid, left mastoid (REF, RMA, LMA)
so that the translation and rotation of these points will correct for head movement, and put
the data onto an occlusal plane coordinate system.
The location of the occlusal plane is given by a bite-plate, and the triangle formed by REF (nasion),
OS (origin sensor), and MS (molar sensor), which are in the saggital plane.
Input - a dataframe that has points
REF, RMA, LMA, OS, and MS
Output -
desired positions of REF, RMA, and LMA
'''
# The relative locations of these is fixed - okay to operate on means
if (protractor): # if we are using a protractor instead of a wax biteplate
RO = df.loc[:, ['RO_x', 'RO_y', 'RO_z']].mean(skipna=True).values # right occlusal (protractor)
LO = df.loc[:, ['LO_x', 'LO_y', 'LO_z']].mean(skipna=True).values # left occlusal
MS = df.loc[:, ['FO_x', 'FO_y', 'FO_z']].mean(skipna=True).values # front occlusal
OS = (RO + LO)/2 # choose this as the origin of the space
else:
MS = df.loc[:, ['MS_x', 'MS_y', 'MS_z']].mean(skipna=True).values
OS = df.loc[:, ['OS_x', 'OS_y', 'OS_z']].mean(skipna=True).values
REF = df.loc[:,['REF_x', 'REF_y', 'REF_z']].mean(skipna=True).values
RMA= df.loc[:, ['RMA_x', 'RMA_y', 'RMA_z']].mean(skipna=True).values
LMA = df.loc[:, ['LMA_x', 'LMA_y', 'LMA_z']].mean(skipna=True).values
# 1) start by translating the space so OS is at the origin
ref_t = REF-OS
ms_t = MS-OS
rma_t = RMA-OS
lma_t = LMA-OS
os_t = np.array([0,0,0])
# 2) now find the rotation matrix to the occlusal coordinate system
z = cross(ms_t,ref_t) # z is perpendicular to ms and ref vectors
z = z/norm(z)
y = cross(z,ms_t) # y is perpendicular to z and ms
y = y/norm(y)
x = cross(z,y)
x = x/norm(x)
m = np.array([x, y, z]) # rotion matrix directly
# 3) now rotate the mastoid points - using the rotation matrix
rma_t = dot(rma_t,m.T)
lma_t = dot(lma_t,m.T)
return OS, ref_t, rma_t, lma_t
def read_referenced_biteplate(my_dir,file_name,sensors,subcolumns):
'''
Input
mydir- directory where biteplate file will be found
file_name - name of a biteplate calibration recording
sensors - a list of sensors in the recording
subcolumns - a list of info to be found for each sensor
Output
OS - the origin of the occlusal plane coordinate system
m - a rotation matrix based on the quaternion
'''
bpdata = read_ndi_data(my_dir,file_name,sensors,subcolumns)
[OS,m] = get_rotation(bpdata)
return OS, m
def read_3pt_biteplate(my_dir,file_name,sensors,subcolumns):
'''
Input
mydir- directory where biteplate file will be found
file_name - name of a biteplate calibration recording
sensors - a list of sensors in the recording
subcolumns - a list of info to be found for each sensor
Output
desired positions of the head location sensors: REF, RMA, and LMA
(nasion, right mastoid, left mastoid)
'''
bpdata = read_ndi_data(my_dir,file_name,sensors,subcolumns)
[REF, RMA, LMA] = get_desired_head_location(bpdata)
return REF, RMA, LMA
def rotate_referenced_data(df,m,origin, sensors):
'''
This function can be used when NDI head correction is used. All we need is a translation vector
and a rotation matrix, to move the data into the occlusal coordinate system.
Input
df - a pandas dataframe read by read_ndi_data
m - a rotation matrix computed by read_biteplate
sensors - a list of the sensors to expect in the file
specifically we exect to find columns with these names plus "_x", "_y" and "_z"
Output
df - the dataframe with the xyz locations of the sensors translatedd and rotated
'''
# TODO: remove quaternion data, or fix it.
for s in sensors: # read xyz one sensor at a time
locx = '{}_x'.format(s)
locz = '{}_z'.format(s)
cols = list(df.loc[:,locx:locz]) # get names of columns to read
points = df.loc[:,cols].values # read data
if s=="REF":
points = [0,0,0]
points = points - origin # translate
df.loc[:,cols] = dot(points,m.T) # rotate - put back in the dataframe
return df
def head_correct_and_rotate(df,REF,RMA,LMA):
'''This function uses the previously calculated desired locations of three sensors
on the head -- nasion (REF), right mastoid (RMA), and left mastoid (LMA) and based
on the locations of those sensors in each frame, finds a translation and rotation
for the frame's data, and then applies those to each sensor in that frame.
'''
# for each frame
# 1) find the translation and rotation that will move the head into the occlusal coordinate system
# this is where we use Horns direct method of fitting to an ideal triangle
R, t = rowan.mapping.kabsch(hdvals, idealhd)
q = rowan.from_matrix(R) # Convert to quaternion
# 2) apply the translation and rotation to each sensor in the frame.
return rowan.rotate(q, allvals) + t
''' Question: should we smooth the head position sensors prior to head correction?
A reason to do this is that we can then avoid loosing any data due to calibration sensor dropout
(assuming that missing frames are rare and can be interpolated). Smoothing might also produce more
accurate data because we constrain our estimate of the the location of the head (a very slow moving
structure) by neighboring points in time.
'''
def save_rotated(mydir,fname,df,myext = 'ndi'):
'''
save the rotated data as *.ndi
Input
mydir - directory where the data will be found
fname - the name of the original .tsv file
df - a pandas dataframe containing the processed/rotated data
'''
fname = os.path.join(mydir,fname)
name,ext = os.path.splitext(fname)
processed = name + '.' + myext
df.to_csv(processed, sep="\t", index=False)
| 37.038168 | 110 | 0.629637 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,869 | 0.604802 |
df4d44c34dc573aeaadace851604ddc345ba3d4d | 2,879 | py | Python | applications/convection_diffusion_application/tests/test_apply_thermal_face_process.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 2 | 2020-04-30T19:13:08.000Z | 2021-04-14T19:40:47.000Z | applications/convection_diffusion_application/tests/test_apply_thermal_face_process.py | Jacklwln/Kratos | 12ffe332622d7e8ea3e4a10bc061beb9d8e6e8de | [
"BSD-4-Clause"
] | 1 | 2020-04-30T19:19:09.000Z | 2020-05-02T14:22:36.000Z | applications/convection_diffusion_application/tests/test_apply_thermal_face_process.py | Jacklwln/Kratos | 12ffe332622d7e8ea3e4a10bc061beb9d8e6e8de | [
"BSD-4-Clause"
] | 1 | 2020-06-12T08:51:24.000Z | 2020-06-12T08:51:24.000Z | from __future__ import print_function, absolute_import, division
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as UnitTest
import KratosMultiphysics.ConvectionDiffusionApplication as ConvectionDiffusionApplication
class ApplyThermalFaceProcessTest(UnitTest.TestCase):
def runTest(self):
# Create a model part containing some properties
self.model = KratosMultiphysics.Model()
root_model_part = self.model.CreateModelPart("MainModelPart")
for i in range(5):
sub_model_part = root_model_part.CreateSubModelPart("SubModelPart" + str(i))
new_property_i = KratosMultiphysics.Properties(i)
sub_model_part.AddProperties(new_property_i)
# Create a fake interface condition
root_model_part.CreateNewNode(1, 0.0, 0.0, 0.0)
root_model_part.CreateNewNode(2, 1.0, 0.0, 0.0)
root_model_part.CreateNewNode(3, 2.0, 0.0, 0.0)
root_model_part.CreateNewCondition("ThermalFace2D2N", 1, [1,2], root_model_part.GetProperties()[1])
root_model_part.CreateNewCondition("ThermalFace2D2N", 2, [2,3], root_model_part.GetProperties()[2])
# Create a fake interface model part
interface_model_part = root_model_part.CreateSubModelPart("FaceModelPart")
interface_model_part.AddCondition(root_model_part.GetCondition(1))
interface_model_part.AddCondition(root_model_part.GetCondition(2))
# Call the apply_thermal_interface_process
interface_model_part = self.model.GetModelPart("FaceModelPart")
import apply_thermal_face_process
settings = KratosMultiphysics.Parameters(r'''{
"model_part_name": "FaceModelPart",
"ambient_temperature": 300.0,
"add_ambient_radiation": true,
"emissivity": 0.1,
"add_ambient_convection": true,
"convection_coefficient": 0.0
}''')
apply_thermal_face_process.ApplyThermalFaceProcess(self.model, settings)
def checkResults(self):
# Check the interface properties
face_model_part = self.model.GetModelPart("FaceModelPart")
face_properties = face_model_part.GetCondition(1).Properties
self.assertEqual(face_model_part.NumberOfProperties(), 1)
self.assertEqual(face_model_part.GetRootModelPart().NumberOfProperties(), 6)
self.assertAlmostEqual(face_properties.GetValue(KratosMultiphysics.EMISSIVITY), 0.1, 1e-12)
self.assertAlmostEqual(face_properties.GetValue(KratosMultiphysics.AMBIENT_TEMPERATURE), 300.0, 1e-12)
self.assertAlmostEqual(face_properties.GetValue(KratosMultiphysics.CONVECTION_COEFFICIENT), 0.0, 1e-12)
def testThermalFaceProcess(self):
self.runTest()
self.checkResults()
if __name__ == '__main__':
test = ApplyThermalFaceProcessTest()
test.runTest()
test.checkResults()
| 48.79661 | 111 | 0.72421 | 2,530 | 0.878777 | 0 | 0 | 0 | 0 | 0 | 0 | 579 | 0.201111 |