text stringlengths 8 6.05M |
|---|
"""
LeetCode - Easy
"""
class Solution(object):
def removeVowels(self, S):
"""
:type S: str
:rtype: str
"""
dictOfVowels = {'a': 0, 'e': 1, 'i': 2, 'o': 3, 'u': 4, 'A': 5, 'E': 6, 'I': 7, 'O': 8, 'U': 9}
char = 0
while len(S) != 0:
if S[char] in dictOfVowels.keys():
before = S[: char]
after = S[char + 1:]
S = before + after
if after == '':
break
else:
if len(S) == 1 or len(S) == char + 1:
break
if len(S) != 1:
char = char + 1
return S
if __name__ == '__main__':
Solution().removeVowels("leetcodeisacommunityforcoders")
Solution().removeVowels("aeiou")
Solution().removeVowels("njto")
|
output = pd.DataFrame()
# these are all variables that can be tweaked to see which ones generate the best performing model:
param_grid = {
"DEG_log2FC_cutoff": [0],
"DEG_pvalue": [0.05],
"H3K27ac_var_cutoff": [0],
"H3K27ac_cutoff": [0],#[2.3],
"H3K4me3_cutoff": [0],#[2.7],
"H3K4me3_var_cutoff": [0.0],
"distance_prom_regions": [20000],
"min_counts":[100],
}
grid = ParameterGrid(param_grid)
# loop over all potential variable settings and run the code for all,
#append the output of each itteration to the 'output' list to compare how
#well different versions of the model performed.
for params in grid:
# lets first find all differential genes based on the parameters of the model, using a
# deseq2 result table and filtering on p-vallue
DEG_pd = pd.read_table(
DEG_file,
sep=",",
index_col=0,
dtype={
"baseMean": float,
"log2FoldChange": float,
"lfcSE": float,
"stat": float,
"pvalue": float,
"padj": float,
"gene": str,
"total_counts": float,
},
)
#filter away genes with to few counts
DEG_pd = DEG_pd[DEG_pd.total_counts > params['min_counts']]
# make a better annotation column, for now I give all DEGS a 1, however I sometimes play
#arround with the type of prediction, therefore this 'overengeneerded' solution:
conditions = [(
(DEG_pd.log2FoldChange > params["DEG_log2FC_cutoff"])
& (DEG_pd.padj < params["DEG_pvalue"])
),
((DEG_pd.log2FoldChange < -params["DEG_log2FC_cutoff"])
& (DEG_pd.padj < params["DEG_pvalue"] )
),
]
choices = [
1,
1,
]
DEG_pd["gene_annotation"] = np.select(conditions, choices, default=0)
# find the variable regions for the promoter and enhancer data, potentially I can
# set a threshold on how high a cis-regulatory element should have a signal
# in order to be included in the analysis.
# however, this was adding pretty little info so right now both cutoffs are at 0.
H3K27ac_regions = find_active_and_variable_regions(
H3K27ac_norm,
params["H3K27ac_cutoff"],
params["H3K27ac_var_cutoff"],
"KC_enh",
"LSC_enh",
)
H3K27ac_var_regions = H3K27ac_regions[
(
(H3K27ac_regions.loc[:, "variable"] == True)
& (H3K27ac_regions.loc[:, "active"] == True)
)
]
make_bedfile_from_column(
H3K27ac_var_regions, "ensmbl_loc", f"{output_dir}/variable_H3K27ac.bed"
)
##H3K4me3
H3K4me3_regions = find_active_and_variable_regions(
H3K4me3_norm,
params["H3K4me3_cutoff"],
params["H3K4me3_var_cutoff"],
"KC_prom",
"LSC_prom",
)
H3K4me3_var_regions = H3K4me3_regions[
(
(H3K4me3_regions.loc[:, "variable"] == True)
& (H3K4me3_regions.loc[:, "active"] == True)
)
]
H3K4me3_var_regions[["chrom", "start", "end"]] = H3K4me3_var_regions[
"ensmbl_loc"
].str.split("[:-]", expand=True)
# Get the summit location of each H3K4me3 cis-regulatory region
H3K4me3_var_regions["summit_start"] = (
H3K4me3_var_regions["start"].astype(int) + H3K4me3_window / 2
)
H3K4me3_var_regions["summit_end"] = H3K4me3_var_regions["summit_start"] + 1
H3K4me3_var_regions["summit_end"] = H3K4me3_var_regions["summit_end"].astype(int)
H3K4me3_var_regions["summit_start"] = H3K4me3_var_regions["summit_start"].astype(
int
)
H3K4me3_var_regions
H3K4me3_var_regions["ensmbl_loc_summit"] = (
H3K4me3_var_regions["chrom"]
+ ":"
+ H3K4me3_var_regions["summit_start"].astype(str)
+ "-"
+ H3K4me3_var_regions["summit_end"].astype(str)
)
make_bedfile_from_column(
H3K4me3_var_regions, "ensmbl_loc_summit", f"{output_dir}/variable_H3K4me3.bed"
)
# link all TSS's to the variable bedfile locations.
# the h3K4me3 regions are maped to the closest TSS
# while the enhancers are mapped to all TSS regions within a 200kb window
TSS_2_H3K27ac = TSS_window_to_region(
genome_path_gtf_small, f"{output_dir}/variable_H3K27ac.bed", f"100000"
)
TSS_2_H3K4me3 = TSS_to_region(
genome_path_gtf_small, f"{output_dir}/variable_H3K4me3.bed", f"-k 1 -d"
)
# load the output bedfile with the included distance between each TSS and their closest variable H3K4me3
# histone modification,add the variable histone intensity score
TSS_2_H3K27ac["loc"] = (
TSS_2_H3K27ac["Chrom"]
+ ":"
+ TSS_2_H3K27ac["ChromStart"].astype(str)
+ "-"
+ TSS_2_H3K27ac["ChromEnd"].astype(str)
)
TSS_2_H3K27ac = TSS_2_H3K27ac.merge(
H3K27ac_var_regions.iloc[:, [0, 1]],
how="left",
left_on="loc",
right_index=True,
)
TSS_2_H3K4me3["loc"] = (
TSS_2_H3K4me3["Chrom"]
+ ":"
+ TSS_2_H3K4me3["ChromStart"].astype(str)
+ "-"
+ TSS_2_H3K4me3["ChromEnd"].astype(str)
)
TSS_2_H3K4me3 = TSS_2_H3K4me3.merge(
H3K4me3_var_regions.iloc[:, [0, 1, 12]],
how="left",
left_on="loc",
right_on="ensmbl_loc_summit",
)
# calculate the intensity of each histone mark. For the enhancers use a distance weight matrix (from ANANSE).
# For the H3K27me3, we will use a window acros the TSS, while for the
# promoter marks we will map each TSS to the closest (variable) H3K4me3-ATAC region within 20kb.
##H3K27ac
weighted_TSS_2_H3K27ac = distance_weight_region_average(TSS_2_H3K27ac, weight_dict)
weighted_TSS_2_H3K27ac["gene_name"] = weighted_TSS_2_H3K27ac.index
weighted_TSS_2_H3K27ac = weighted_TSS_2_H3K27ac.rename(
columns={
0: "KC",
1: "LSC",
2: "n_enh",
"mean_int": "mean_int_ac",
"abs_FC": "abs_FC_ac",
"FC": "FC_ac",
}
)
weighted_TSS_2_H3K27ac = weighted_TSS_2_H3K27ac[
["gene_name", "mean_int_ac", "FC_ac", "abs_FC_ac", "n_enh"]
]
## calculate the mean H3K4me3 signal and the (absolute) FC between KC and LSC
TSS_2_H3K4me3["mean_int_prom"] = np.mean(
TSS_2_H3K4me3.loc[:, ["KC", "LSC"]], axis=1
)
TSS_2_H3K4me3["FC_prom"] = np.log2(
TSS_2_H3K4me3["KC"].astype(float) / TSS_2_H3K4me3["LSC"]
).astype(float)
TSS_2_H3K4me3["FC_abs_prom"] = abs(TSS_2_H3K4me3["FC_prom"])
averaged_TSS_2_H3K4me3 = TSS_2_H3K4me3[
["gene_name", "mean_int_prom", "FC_prom", "FC_abs_prom", "gene_region_dist"]
]
averaged_TSS_2_H3K4me3 = averaged_TSS_2_H3K4me3.drop_duplicates(subset="gene_name")
averaged_TSS_2_H3K4me3 = averaged_TSS_2_H3K4me3[
averaged_TSS_2_H3K4me3["gene_region_dist"] < params["distance_prom_regions"]
]
TSS_2_all = weighted_TSS_2_H3K27ac.merge(
averaged_TSS_2_H3K4me3, how="outer", on="gene_name"
).merge(averaged_TSS_2_H3K27me3, how="outer", left_on="gene_name", right_index=True)
TSS_2_all = TSS_2_all.merge(
DEG_pd[["log2FoldChange", "padj", "gene_annotation", "baseMean"]],
how="left",
left_on="gene_name",
right_index=True,
)
TSS_2_all = TSS_2_all[TSS_2_all["gene_annotation"].notna()]
TSS_2_all = TSS_2_all.replace([np.inf, -np.inf], np.nan)
# lets make the X and y df for the logistic regression model:
X = TSS_2_all.loc[
:,
[
"gene_name",
"mean_int_ac",
"abs_FC_ac",
"n_enh",
"mean_int_prom",
"FC_abs_prom",
"gene_region_dist",
"mean_int_repr",
"FC_abs_repr",
],
]
X = X.astype(
{
"gene_name": str,
"mean_int_ac": np.float32,
"abs_FC_ac": np.float32,
"n_enh": np.float32,
"mean_int_prom": np.float32,
"FC_abs_prom": np.float32,
"gene_region_dist": np.float32,
"mean_int_repr": np.float32,
"FC_abs_repr": np.float32,
}
)
y = TSS_2_all.loc[:, ["gene_annotation", "gene_name"]]
# next we will split the dataset in test and train data, using all genes on chrom 1 as a test dataset
chr1_gene_names = gtf_df[gtf_df["Chrom_TSS"] == "1"]["gene"]
X_test = X[X["gene_name"].isin(chr1_gene_names)].drop("gene_name", axis=1)
y_test = y[y["gene_name"].isin(chr1_gene_names)].drop("gene_name", axis=1)
X_train = X[-X["gene_name"].isin(chr1_gene_names)].drop("gene_name", axis=1)
y_train = y[-y["gene_name"].isin(chr1_gene_names)].drop("gene_name", axis=1)
X = X.drop("gene_name", axis=1)
y = y.drop("gene_name", axis=1)
# since it is an unbalanced dataste (way more genes are non-diff than diff), we will calculate the ratio and
# feed this to the model so it gives a higher weight to guessing the diff genes right.
weights = {
1: TSS_2_all["gene_annotation"].value_counts(normalize=True)[0],
0: TSS_2_all["gene_annotation"].value_counts(normalize=True)[1],
}
# Create a logistic regression pipeline,
# first we impute values for the NaN vallues present in the DF, then wer
#perform scaling (mostly for the n_enh, and distance to promoter values)
#while finally running logistic regression
steps = [
(
"imputation",
SimpleImputer(missing_values=np.nan, strategy='most_frequent', fill_value=0),
),
("scaler", StandardScaler()),
(
"model",
LogisticRegression(
solver="liblinear", class_weight=weights, penalty="l1", C=1,
),
),
]
pipeline = Pipeline(steps)
#train the entire pipeline based on all genes except the ones on chr1
pipeline.fit(
X_train, y_train,
)
#calculate the ROC curve with 3 different subset from all the data
cv_auc = cross_val_score(pipeline, X, y, cv=3, scoring="roc_auc")
#predict if the genes on chr1 are differential or not
y_pred = pipeline.predict(X_test)
y_pred_prob = pipeline.predict_proba(X_test)[:, 1]
#how precise was the model if we compare
av_PC = average_precision_score(y_test, y_pred_prob)
#save all the QC metrics of the settings with this model.
result = {
"cutoff_vallues": str(params),
"accuracy": str(metrics.accuracy_score(y_test, y_pred)),
"cv_AUC": np.mean(cv_auc),
"average_precision_score": av_PC,
}
print(result)
output = output.append(result, ignore_index=True)
|
#-*- coding: utf-8 -*-
class ResultOutputer(object):
def __init__(self):
self.datas = []
def collect_data(self, data):
if data is None:
return
self.datas.append(data)
def output_html(self, fileName):
with open(fileName+'.html', 'w', encoding = 'utf-8') as f:
f.write('<html>')
f.write("<head><meta http-equiv=\"content-type\" content=\"text/html;charset=utf-8\"></head>")
f.write('<body>')
f.write('<table class="table table-striped">')
f.write('<tr><th>词条名</th><th>简介</th></tr>')
for data in self.datas:
f.write('<tr>')
f.write('<td><button type="button" class="btn btn-link"><a href="%s">%s</a></button></td>' % (data['url'], data['title']))
f.write('<td>%s</td>' % data['summary'])
f.write('</tr>')
f.write('</table>')
f.write('<link rel="stylesheet" href="https://cdn.bootcss.com/bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">')
f.write('</body>')
f.write('</html>')
|
#!/usr/bin/python
#Apply operation for every combination in collection.
def ForEachCombination(collection, operation, pre_seq=[]):
if len(collection)==0:
return
if len(collection)==1:
operation(pre_seq+list(collection))
return
for item in collection:
ForEachCombination(set(collection)-set([item]), operation, pre_seq+[item])
if __name__=='__main__':
def Operation(x):
print x
ForEachCombination([1,2,3], Operation)
cmb= []
ForEachCombination([1,2,3,4], lambda x:cmb.append(x) if 2*x[1]+x[2]==7 else None)
print cmb
|
# Find the best selling item for each month where the biggest total invoice was paid.
# The best selling item is calculated using the formula (unitprice * quantity).
# Output the description of the item along with the amount paid.
# Import your libraries
import pandas as pd
# Start writing code
online_retail['month'] = online_retail['invoicedate'].dt.month
online_retail['total_invoice'] = online_retail['quantity'] * online_retail['unitprice']
or_max = online_retail[['month', 'total_invoice', 'description']]
or_max = or_max.groupby(['month','description'], as_index=False)['total_invoice'].sum()
or_mo_max = or_max.groupby('month', as_index=False)['total_invoice'].max()
or_max.merge(or_mo_max, on=['month','total_invoice'])
# df['rank'] = df.groupby('month')['total_amount'].rank(method='max',ascending=False)
# final=df[df['rank']==1][['month', 'description','total_amount']]
|
# Generated by Django 3.0.2 on 2020-06-03 19:55
import apps.bboard.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bboard', '0007_img'),
]
operations = [
migrations.AlterField(
model_name='img',
name='img',
field=models.ImageField(height_field=100, upload_to=apps.bboard.models.get_timestamp_path, verbose_name='Изображение', width_field=100),
),
]
|
from .api import LinkedDataFrame
from .constants import LinkageSpecificationError, LinkAggregationRequired
|
capital = 'London is the capitel of Great Britain'
#template = '{1} is the capital of {0}'
#print(template.format("London", "Great Britain"))
#print(template.format("Vaduz", "Liechtenstein"))
#print(template.format.__doc__)
#template = '{capital} is the capital of {country}'
#print(template.format(capital="London", country="GB"))
#print(template.format(country="Australia", capital="Kanberra"))
#import requests
#template - "Response from {0.url} with code {0.status_code}"
#res = requests.get("https://docs.python.org/3.5/")
#print(template.format(res))
#res = requests.get("https://docs.python.org/3.5/random")
#print(template.format(res))
from random import random
x = random()
print(x)
print("{:.3}".format(x))
|
#!/usr/bin/env python
import xml.etree.ElementTree as ET
from math import fabs,log
import sys
ns = {'pep': 'http://regis-web.systemsbiology.net/pepXML'}
mass_tol = 0.01
mass_norm_AA = {
"G" : 57.02146,
"A" : 71.03711,
"S" : 87.03203,
"P" : 97.05276,
"V" : 99.06841,
"T" :101.04768,
"C" :103.00918,
"L" :113.08406,
"I" :113.08406,
"N" :114.04293,
"D" :115.02694,
"Q" :128.05858,
"K" :128.09496,
"E" :129.04259,
"M" :131.04048,
"H" :137.05891,
"F" :147.06841,
"R" :156.10111,
"Y" :163.06333,
"W" :186.07931,
"n" :1.0078
}
mass_nPter_mod = 42.01060
mass_C_blocker = 57.02416
sc_cut = float(sys.argv[1])
modstr = sys.argv[2]
style = sys.argv[3]
# parsing the modification description string
# elements split by |
# each element has 3/4 components split by ,
# AA, diffmass, [H/L], [marker]
mod_lst = []
std_lst = {}
for mod in modstr.split('|'):
elems = mod.split(',')
AA = elems[0]
mass = float(elems[1])
if AA=="C": mass = mass + mass_C_blocker
tag = elems[2]
if mass == 0:
std_lst[AA] = tag
else:
if len(elems)>3:
mod_lst.append((AA,mass,tag,elems[3]))
else:
mod_lst.append((AA,mass,tag))
ipi_lst = {}
scan_lst = []
cross_tab = {}
def get_label( labels, seq ):
# label H/L based on modified pos
# return all H or all L, otherwise NULL
for n,a in enumerate(seq):
# has aa
if a in std_lst.keys():
# not labeled
if n not in labels.keys():
labels[n]=std_lst[a]
uniq_label = []
for label in labels.values():
if label not in uniq_label: uniq_label.append(label)
if len(uniq_label) == 0:
if style in ["pro", "pos"]:
return "light"
else:
return "NULL"
if len(uniq_label) == 1:
return uniq_label[0]
return "NULL"
def mark_seq(uAA, seq, dAA, markers):
#print markers
ans = ""
if len(markers)==0:
ans = uAA + "." + seq + "." + dAA
else:
#all_seq = uAA + "." + seq[:ndx+1] + "*" + seq[ndx+1:] + "." + dAA
ans = uAA + "."
prev = 0
for marker, ndx in markers:
ans = ans + seq[prev:ndx+1] + marker
prev = ndx+1
ans = ans + seq[prev:] + "." + dAA
#print ans
return ans
def process_scan( hit, scan_id, neutral_mass, rt, charge ):
score_map = {}
for sc in hit.findall('pep:search_score', ns ):
score_map[sc.attrib['name']]= float(sc.attrib['value'])
#score = score_map["hyperscore"]
score = -log(score_map["expect"])
if score < sc_cut: return
#good scan
pro = hit.attrib['protein'].strip()
seq = hit.attrib['peptide']
pep_mass = float( hit.attrib['calc_neutral_pep_mass'] )
uAA = hit.attrib['peptide_prev_aa']
dAA = hit.attrib['peptide_next_aa']
num_tryp_ter = int(hit.attrib['num_tol_term'])
#num_tot_proteins
rank = int( hit.attrib['hit_rank'] )
tot_num_ions = int( hit.attrib['tot_num_ions'] )
num_matched_ions = int( hit.attrib['num_matched_ions'] )
delta_mass = float(hit.attrib['massdiff'])
num_miss_clv = int( hit.attrib['num_missed_cleavages'] )
labels = {}
markers = []
for mod_info in hit.findall('pep:modification_info', ns):
for mod in mod_info.findall('pep:mod_aminoacid_mass', ns):
ndx = int(mod.attrib['position'])-1
mass = float(mod.attrib['mass'])
ideal_mass = mass_norm_AA[seq[ndx]]
diff_mass = mass - ideal_mass
#print seq[ndx], diff_mass
for m in mod_lst:
if m[0] == "n" and ndx == 0: #nTerm
#ideal_mass = mass_norm_AA[seq[0]]
if fabs(diff_mass-m[1])<mass_tol or ( uAA=="-" and fabs(diff_mass-mass_nPter_mod-m[1])<mass_tol ):
#print "nTerm"
if len(m[2])>0: labels[0]=m[2]
if len(m)>3: markers.append((m[3],0))
elif m[0] == seq[ndx] and fabs(diff_mass-m[1])<mass_tol: #normal
#print "K-heavy"
if len(m[2])>0: labels[ndx]=m[2]
if len(m)>3: markers.append((m[3],ndx))
if style in ["pep", "pos"]:
if len(markers)==0:
return
# check labels
label = get_label( labels, seq )
if label != "NULL":
# add protein
if pro not in ipi_lst.keys():
ipi_lst[pro] = 1
else:
ipi_lst[pro] = ipi_lst[pro]+1
gene = pro.split()[0].split('|')[1]
if pro[:7] == "Reverse": gene = "Reverse_" + gene
# add scan
all_seq = mark_seq(uAA, seq, dAA, markers)
#print all_seq
scan_key = gene + ":" + all_seq + ":" + str(charge) + ":" + fn_ndx
scan_lst.append(scan_key + " " + com_fn + " " + str(scan_id) + " " + label + "\n")
# add cross
if scan_key not in cross_tab.keys():
cross_tab[scan_key] = []
cross_tab[scan_key].append((neutral_mass, scan_id, score))
def get_symbol(disc):
gene = ""
elems = disc.split()
if "GN=" in disc:
#get from record
for elem in elems:
if elem[:3] == "GN=":
gene = elem[3:]
elif len(elems)>0:
gene = elems[0]
else:
gene = "NULL"
print disc
return gene
#loop all pepXML file
for fn in sys.argv[4:]:
raw_fn = fn.split('/')[-1]
elems = raw_fn.split('_')
com_fn = ""
for elem in elems[:-1]:
com_fn = com_fn + elem + "_"
com_fn = com_fn[:-1]
fn_ndx = elems[-1].split(".")[0]
try:
tree = ET.parse(fn)
except:
print "Open pepXML file", fn, "failed!"
sys.exit()
root = tree.getroot()
for summary in root.findall('pep:msms_run_summary', ns):
for query in summary.findall('pep:spectrum_query', ns):
#print query.attrib['spectrum'], query.attrib['index'], query.attrib['assumed_charge'], query.attrib['precursor_neutral_mass']
scan_id = int(query.attrib['start_scan'])
if scan_id == 0: #for fixed mgf
scan_id = int(query.attrib['spectrum'].split('|')[-1])
mass = float(query.attrib['precursor_neutral_mass'])
rt = 0.0
if 'retention_time_sec' in query.attrib.keys():
rt = float(query.attrib['retention_time_sec'])
charge = int(query.attrib['assumed_charge'])
for result in query.findall('pep:search_result', ns):
for hit in result.findall('pep:search_hit', ns):
process_scan(hit, scan_id, mass, rt, charge)
#save ipi_name.table
ipiout = open("ipi_name.table", 'w')
ipiout.write("name\n")
if style == "pro":
out_ipi_lst = [ p for p in ipi_lst.keys() if ipi_lst[p]>=2 ]
else:
out_ipi_lst = [ p for p in ipi_lst.keys() if ipi_lst[p]>=1 ]
out_uni_lst = []
for pro in out_ipi_lst:
#skip rev decoys ?
elems = pro.split('|')
ipi = elems[1] #uniprot actually
out_uni_lst.append(ipi)
disc = elems[2]
tmp = disc.split()
gene = tmp[0]
disc = disc[len(gene)+1:]
symbol = get_symbol(disc)
if pro[:7] == "Reverse":
ipi = "Reverse_" + ipi
disc = disc.split("[REVIEWED]")[0]
label = disc.find("[NOT_REVIEWED]")
if label>0:
disc = disc[:label]
elems = disc.split()
disc = ""
for elem in elems:
if "=" in elem: break
disc = disc + " " + elem
disc = disc[:min(60,len(disc))]
disc = disc.replace("'", "")
#ipiout.write(pro)
#ipiout.write("\n")
ipiout.write(ipi)
ipiout.write("\t")
ipiout.write(symbol)
ipiout.write(" ")
ipiout.write(disc)
ipiout.write("\n")
ipiout.close()
#save all_scan.table
scanout = open("all_scan.table", 'w')
scanout.write("key run scan HL\n")
for scan in scan_lst:
uni = scan.split(":")[0]
if uni in out_uni_lst: scanout.write(scan)
scanout.close()
#save cross_scan.table
crossout = open("cross_scan.table", 'w')
crossout.write( "key mass %s\n" % (com_fn) )
for scan_core in cross_tab.keys():
uni = scan_core.split(":")[0]
if uni not in out_uni_lst: continue
rank = sorted( cross_tab[scan_core], key=lambda x: x[2] )
neutral_mass = rank[-1][0]
id_scan = rank[-1][1]
crossout.write(scan_core+" "+str(neutral_mass)+" "+str(id_scan)+"\n")
crossout.close()
|
#!/usr/bin/env python
'''
We shall say that an n-digit number is pandigital if it
makes use of all the digits 1 to n exactly once.
For example,2143 is a 4-digit pandigital and is also prime.
What is the largest n-digit pandigital prime that exists?
'''
from itertools import permutations
import math
def is_prime(n):
i = 3
if n == 1:
return False
elif n == 2:
return True
elif n % 2:
while i < math.sqrt(n) + 1:
if not n % i:
return False
i += 2
return True
else:
return False
maxPrime = 0
for i in xrange(1, 10):
for num in permutations(str('1234567890')[:i]):
if is_prime(int(''.join(num))):
maxPrime = max(maxPrime,int(''.join(num)))
print 'The n-digit pandigital prime number is',maxPrime #7652413
|
# Copyright, the authors - see LICENSE.rst
try:
from .version import version as __version__
except ImportError:
# TODO: Issue a warning...
__version__ = ''
# The version number can be found in the "version" variable of version.py
# set up the test command
from astropy.tests.helper import TestRunner
_test_runner = TestRunner(__path__[0])
del TestRunner
test = _test_runner.run_tests
from survey import IPHAS
|
#The football.csv file contains the results from the English Premier League.
# The columns labeled ‘Goals’ and ‘Goals Allowed’ contain the total number of
# goals scored for and against each team in that season (so Arsenal scored 79 goals
# against opponents, and had 36 goals scored against them). Write a program to read the file,
# then print the name of the team with the smallest difference in ‘for’ and ‘against’ goals.
# The below skeleton is optional. You can use it or you can write the script with an approach of your choice.
import csv
f= open('football.csv')
csv_f = csv.reader(f)
csv_f.next()
parsed_data = [row for row in csv_f]
diff = [int(row[5]) - int(row[6]) for row in parsed_data]
index_value = diff.index(min(diff)) # index value of the smallest difference in 'for' and 'against' goals
teams = [row[0] for row in parsed_data]
print "The team with the smallest differnece in 'for' and 'against' goals is ", teams[index_value]
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from fake_useragent import UserAgent
CITY_NAME = '长沙'
JOB_NAME = '爬虫'
COLUMN = ['职业名', '月薪', '公司名', '公司url', '职位条件', '岗位职责与技能要求', '工作地址',
'创建日期', '更新日期','截止日期', '职位亮点', '职位url']
HEADERS = {'User-Agent': UserAgent().random}
WORK_LIST = []
|
class SlackError(Exception):
"""General exception class for all Slack-related errors"""
pass
|
from django import forms
from django.forms import ModelForm
from smile.models import USER
'''
GENDER=[
('male', 'Male')
('female', 'Female')
]
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
gender = forms.CharField(label='Gender', widget=forms.Select(choices=GENDER))
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2', 'gender']
GENDER=[
('male', 'Male')
('female', 'Female')
]
'''
class UserForm(ModelForm):
PASSWORD = forms.CharField(widget=forms.PasswordInput)
SEX_CD = forms.CharField(widget=forms.Select(choices=USER.SEX_CHOICES))
class Meta:
model = USER
fields = ['EMAIL', 'PASSWORD', 'USER_NM', 'SEX_CD', 'USER_AGE']
class LoginForm(forms.Form):
EMAIL = forms.CharField(widget=forms.TextInput)
PASSWORD = forms.CharField(widget=forms.PasswordInput)
# class Meta:
# model = USER
# fields = ['EMAIL', 'PASSWORD']
|
# Copyright 2016 Husky Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import zmq
context = zmq.Context()
socket = zmq.Socket(context, zmq.DEALER)
TYPE_SESSION_BEGIN_PY = 0x3bfa1c58
TYPE_SESSION_END_PY = 0x2dfb1c58
NEW_TASK = 0x30fa1258
QUERY_TASK = 0x40fa1257
def init(identity, master_addr):
socket.setsockopt(zmq.IDENTITY, identity)
socket.connect(master_addr)
def send(msg_type, content=None):
socket.send('', zmq.SNDMORE)
if content != None:
socket.send(struct.pack('=i', msg_type), zmq.SNDMORE)
socket.send(content)
else:
socket.send(struct.pack('=i', msg_type))
def ask(msg_type, content=''):
socket.send('', zmq.SNDMORE)
socket.send(struct.pack('=i', msg_type), zmq.SNDMORE)
socket.send(content)
socket.recv()
return socket.recv()
|
import os
from unittest import TestCase
from testfixtures.comparison import compare
from testfixtures.tempdirectory import TempDirectory
import hex_generator as hg
class BoardGenerationTests(TestCase):
def test_generate_board_parallelogram(self):
board = hg.generate_parallelogrammatic_board(5, 3)
expected_board = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(board, expected_board)
def test_generate_board_triangle(self):
board = hg.generate_triangular_board(5)
expected_board = [
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 0],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
]
self.assertEqual(board, expected_board)
def test_generate_board_triangle_lower(self):
board = hg.generate_triangular_board(5, mirrored=True)
expected_board = [
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1],
[0, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
]
self.assertEqual(board, expected_board)
def test_generate_board_hexagon(self):
board = hg.generate_hexagonal_board(3)
expected_board = [
[0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0, 0],
]
self.assertEqual(board, expected_board)
class BoardFileTests(TestCase):
def test_write_board_to_text_file(self):
board = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 9, 9],
]
with TempDirectory() as d:
hg.write_board_to_text_file(board, os.path.join(d.path, 'test.txt'))
compare(d.read('test.txt', 'utf-8'), '0 3 6 9\n1 4 7 9\n2 5 8 9\n')
def test_write_board_to_text_file_padding(self):
board = [
[0, 1, 2],
[3, 4],
[6, 7, 8],
[9],
]
with TempDirectory() as d:
hg.write_board_to_text_file(board, os.path.join(d.path, 'test.txt'))
compare(d.read('test.txt', 'utf-8'), '0 3 6 9\n1 4 7 0\n2 0 8 0\n')
def test_read_board_from_text_file(self):
expected_board = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 9, 9],
]
with TempDirectory() as d:
d.write('test.txt', '0 3 6 9\n1 4 7 9\n2 5 8 9\n', 'utf-8')
board = hg.read_board_from_text_file(os.path.join(d.path, 'test.txt'))
self.assertEqual(board, expected_board)
def test_read_board_from_text_file_padding(self):
expected_board = [
[0, 1, 2],
[3, 4, 5],
[6, 0, 8],
[9, 0, 0],
]
with TempDirectory() as d:
d.write('test.txt', '0 3 6 9\n1 4\n2 5 8\n', 'utf-8')
board = hg.read_board_from_text_file(os.path.join(d.path, 'test.txt'))
self.assertEqual(board, expected_board)
class HexagonsTests(TestCase):
def test_board_no_trim(self):
board = [
[0, 1, 2],
[3, 4, 5],
[0, 0, 8],
[9, 0, 0],
]
hexagons = hg.transform_board_into_hexagons(board, 1, 1, trim_board=False)
self.assertEqual(len(hexagons), 12)
def test_board_trim(self):
board = [
[0, 1, 2],
[3, 4, 5],
[0, 0, 8],
[9, 0, 0],
]
hexagons = hg.transform_board_into_hexagons(board, 1, 1, trim_board=True)
self.assertEqual(len(hexagons), 7)
|
from rest_framework import serializers
from rest_framework.serializers import Serializer, ModelSerializer
from TestOnline.exam.serializers import QuestionSerializer
from TestOnline.models import Paper, PaperPermission, Question, Type
class TypeSeiralizer(ModelSerializer):
class Meta:
model = Type
fields =('id','type')
class PaperCreateSerializer(ModelSerializer):
questions = serializers.ListField()
permission = serializers.IntegerField(source='default_permission')
type = serializers.IntegerField(source='type_id')
difficulty = serializers.IntegerField(required=False)
class Meta:
depth = 0
model = Paper
# fields = ("paper_type_id",'questions','permission')
exclude = ['createdAt','updatedAt','deletedAt','default_permission']
class PaperSerializer(ModelSerializer):
questions = serializers.PrimaryKeyRelatedField(many=True,queryset=Question.objects.filter(deletedAt=None))
class Meta:
depth =0
model = Paper
exclude = ['createdAt','updatedAt','deletedAt']
class SimplePaperSerializer(ModelSerializer):
class Meta:
model = Paper
fields = ('id','name')
class PaperDetailSerializer(ModelSerializer):
# type = TypeSeiralizer()
type = serializers.IntegerField(source='type.id')
questions = QuestionSerializer(many=True)
class Meta:
depth =2
model = Paper
exclude = ['createdAt','updatedAt','deletedAt']
class PermissionSerializer(ModelSerializer):
class Meta:
model = PaperPermission
exclude = ['createdAt','updatedAt','deletedAt']
|
from django.db.models import *
# Create your models here.
class WJ(Model):
id = AutoField(primary_key=True)
q1 = BooleanField(default=True, verbose_name="是否需要公共交通")
q2 = CharField(max_length=8, verbose_name="交通方式",
choices=(("minibus", "小型公交车"), ("ecar", "电瓶车"), ("railway", "轨道交通")))
q3 = CharField(max_length=32, verbose_name="现在在何处")
q4 = CharField(max_length=32, verbose_name="去往何处")
q5 = TextField(verbose_name="最希望开通的路线")
def __str__(self):
return "{}, {}, {}, {}, {}".format(self.q1, self.q2, self.q3, self.q4, self.q5)
class Site(Model):
name = CharField(max_length=32, primary_key=True)
location = TextField(default="")
class Station(Model):
id = AutoField(primary_key=True)
site = ForeignKey(Site, on_delete=CASCADE)
waiting = PositiveIntegerField(verbose_name="正在等待的人数", default=0)
class Route(Model):
id = AutoField(primary_key=True)
stations = ManyToManyField(Station)
class Bus(Model):
id = AutoField(primary_key=True)
payload = PositiveIntegerField(verbose_name="车上人数", default=0)
route = ForeignKey(Route, on_delete=CASCADE)
station = ForeignKey(Station, on_delete=CASCADE)
site_names = ['丁香宿舍', '竹园宿舍', '海棠宿舍', '丁香食堂', '竹园食堂', '海棠食堂', 'A教学楼', 'B教学楼', 'C教学楼',
'D教学楼', 'E教学楼', 'F教学楼', 'G教学楼', '信远楼', '工训中心', '新综', '老综', '行政楼', '家属区',
'北操场', '南操场', '大学生活动中心', "北门", "东门", "图书馆"]
def site_init():
for i in site_names:
s, _ = Site.objects.get_or_create(name=i)
s.save()
# site_init()
def create_wj(q1, q2, q3, q4, q5, q6):
a = WJ(q1=q1, q2=q2, q3=q3, q4=q4, q5=q5, q6=q6)
a.save()
#
#
# _locations = [
# ('丁香宿舍', '34.1207047372,108.8284850121'),
# ('竹园宿舍', '34.1269218732,108.8397073746'),
# ('海棠宿舍', '34.1292842650,108.8344931602'),
# ('ABC教学楼', '34.1268685854,108.8310170174'),
# ('D教学楼', '34.1248969134,108.8350939751'),
# ('EFG教学楼', '34.1238844153,108.8372182846'),
# ('信远楼', '34.1250212544,108.8389778137'),
# ('工训中心', '34.1263889937,108.8382267952'),
# ('综合楼', '34.1281297208,108.8365101814'),
# ('行政楼', '34.1221436007,108.8383340836'),
# ('家属区', '34.1208823760,108.8289141655'),
# ('北操场', '34.1301190794,108.8307595253'),
# ('南操场', '34.1250923063,108.8275408745'),
# ('北门', '34.1287869248,108.8375186920'),
# ('东门', '34.1219126737,108.8403725624'),
# ('E区家属区与远望谷', '34.1203761044,108.8363385201'),
# ('图书馆', '34.1239865538,108.8333237171'),
# ]
#
#
# class Location:
# def __init__(self, name, x, y):
# self.name = name
# self.x = x
# self.y = y
#
#
# locations = [Location for name, l in _locations]
|
from google.appengine.ext import ndb
class TaskListSettings(ndb.Model):
user_id = ndb.StringProperty()
list_id = ndb.StringProperty()
settings = ndb.JsonProperty() |
#!/usr/bin/env python
#!-*-coding:utf-8 -*-
# Time :2020/5/19 15:05
# Author : zhoudong
# File : target_data.py
import numpy as np
import matplotlib.pyplot as plt
class Target_Position:
def __init__(self, N):
self.s = np.zeros((4, N, 4), dtype=np.float32) # 状态向量
self.n = np.zeros((4, N)) # 4行N列 1 有真实目标
def target_form_nonoise(F, N, G, q_noise):
"""
生成5个目标真实运动数据,
:param F: 状态转移矩阵
:param N: 时刻数目
:param G: 过程噪声转移矩阵
:param q_noise: 噪声标准差
:return:
"""
#target = Target_Position(N)
target = []
state = np.zeros((4, N, 4)) # 运动状态
state_flag = np.zeros((4, N)) # 是否存在目标 1 存在 0 不存在
state_flag[0, :8] = 1
state_flag[1, 8:26] = 1
state_flag[2, 12:38] = 1
state_flag[3, 26:] = 1
q_noise = np.reshape(q_noise, (-1, 1)) # 列向量
# 目标1
for i in range(N):
if (i == 0):
u = np.array([0, 2.6, 0, -1.2])
state[0, i, :] = u
elif i > 1 and i < 8:
u = np.dot(F, u)
state[0, i, :] = u
# 目标2
for i in range(N):
if (i ==8):
u = np.array([0, 0.6, 0, -2.1])
state[1, i, :] = u
elif i > 8 and i < 26:
u = np.dot(F, u)
state[1, i, :] = u
# 目标3
for i in range(N):
if (i == 12):
u = np.array([-10, 1.2, 0, 1.8])
state[2, i, :] = u
elif i > 12 and i < 38:
u = np.dot(F, u)
state[2, i, :] = u
# target.s[2] = np.array(target.s[2])
# 目标4
for i in range(N):
if (i == 26):
u = np.array([0, 1.4, -5, -2.1])
state[3, i, :] = u
elif (i > 26):
u = np.dot(F, u)
state[3, i, :] = u
plt.figure(1)
plt.plot(state[0, :8, 0], state[0, :8, 2], color="blue", label="1")
plt.plot(state[1, 8:26, 0], state[1, 8:26, 2], color="red", label="2")
plt.plot(state[2, 12:38, 0], state[2, 12:38, 2], color="green", label="3")
plt.plot(state[3, 26:, 0], state[3, 26:, 2], color="black", label="4")
plt.xlabel("X")
plt.ylabel("Y")
plt.legend(loc="upper left")
plt.figure(2)
plt.plot(state[0, :8, 0], state[0, :8, 2], color="blue", label="1")
plt.plot(state[1, 8:26, 0], state[1, 8:26, 2], color="red", label="2")
plt.plot(state[2, 12:38, 0], state[2, 12:38, 2], color="green", label="3")
plt.plot(state[3, 26:, 0], state[3, 26:, 2], color="black", label="4")
plt.xlabel("X")
plt.ylabel("Y")
plt.legend(loc="upper left")
target.append(state)
target.append(state_flag)
return target
def measures(H, r_noise, target, N, pd, r):
"""
生成观测数据,在真实数据上加上噪声
:param H: 观测矩阵
:param r_noise: 量测噪声标准差
:param target: 真实运动数据
:param N: 目标数
:param pd: 目标检测概率
:param r: 杂波数目平均值
:return: 返回量测数据 和 对于时刻的数目
"""
# a = -100 # 范围
# b = 100
a = -100
b = 100
measuredatas = []
nummeasures = []
target_size = len(target[0]) # 目标数
r_noise = np.reshape(r_noise, (-1, 1)) # 列向量
for i in range(N):
clutter = np.random.poisson(r) # 泊松分布的随机数
measuredata = []
for j in range(clutter):
y = a + (b-a)*np.random.rand(2,1)
measuredata.append(y)
# print("i: ", measuredata)
measuredatas.append(measuredata)
#numtruetargets = [len(measuredatas[i]) for i in range(N)]
numtruetargets = []
for i in range(N):
count = 0
for j in range(target_size):
if target[1][j, i] == 1:
count +=1
s = np.random.rand(1)
if s < pd:
y = np.dot(H, np.reshape(target[0][j, i, :], (np.shape(target[0][j, i, :])[0], 1))) + r_noise * np.random.randn(2,1)
measuredatas[i].append(y)
nummeasures.append(len(measuredatas[i]))
numtruetargets.append(count)
plt.figure(1)
for i in range(N):
for j in range(nummeasures[i]):
plt.scatter(measuredatas[i][j][0], measuredatas[i][j][1], alpha=0.4, linestyle="-")
#
#
# plt.show()
return measuredatas, nummeasures, numtruetargets |
thisdict = dict(brand="Ford", model="Mustang", year=1964)
# note that keywords are not string literals
# note the use of equals rather than colon for the assignment
print(thisdict) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 11:46:11 2020
@author: matty
"""
"""
This script is used to examine specific regions of interest by comparing area-average regions between model output and observations.
The script compares multiple observations to the model output, in order to account for potential variation between observations.
Scripts to perform the pre-processing are include in the tool-box named:
INPUT:
- a netcdf file containing the model output a
- the netcdf files containing the observations datasets
- the title and co-oridantes of the region of interest
- the path and name of the mutliple netcdf files
- the name of observation and model dataset for headings and legends
- the name of the variables to be compared (! as written in netcdf file !)
- the name of the lon and lat dimesion of the model data (! as written in netcdf file !)
OUTPUT:
- A map of the region of interest
- A timeseries of the daily climatology for each dataset
- A timeseries of the daily anomalies for each dataset
- A taylor diagram using the an array of the median for each of the datasets as the taylor diagram observations
- A taylor diagram of the daily anomalies using the an array of the median for each of the datasets as the taylor diagram observations
REQUIRMENTS:
-The netcdf files need to be formatted so that the variabled used for comparsion are a 3 dimesional matrix of (time, longitude, latitude)
-The netcdf files MUST be regridded to identical grids
-The netcdf files MUST be sampled at a daily frequency and the time dimesion should have the same length (there can not be missing days in the data)
The inputs to the script should be changed where nessaccary is the section below called USER INPUTS
"""
# %% USER INPUTS
#Name of the region (used for graph title and file saving)
position_name='Agulhas Bank '
#Cooridates of the region of interest
west=20.1
east=21.1
south=-35.8
north=-34.8
#Path to where the netcdf data is stored
path_to_model = '/media/matthew/Seagate_Expansion_Drive/Model_output/'
path_to_satelite = '/media/matthew/Seagate_Expansion_Drive/Model_output/'
#The name of the netcdf file
model_dataset = "model_dataset_TD.nc"
satelite_dataset1 = "satelite_dataset_OSTIA.nc" #OSTIA
satelite_dataset2 = "satelite_dataset_ODYSSEA.nc" #ODYSSEA
satelite_dataset3 = "satelite_dataset_REMSS.nc" #REMSS
#The name of observations for legends and headings
model_name = "ROMS"
satelite_name1 = "OSTIA"
satelite_name2 = "ODYSSEA"
satelite_name3 = "REMSS"
#The variables to be analysed as written in netcdf files
model_variable = "temp"
satelite_variable= "analysed_sst"
#The lat and lon of the model netcdf file
model_lon_name = "lon_rho"
model_lat_name = "lat_rho"
#Path to where the figures should be stored
savepath= '/media/matthew/Seagate_Expansion_Drive/Model_evaluations_figures/Taylor_diagrams/'
#Name of figure (png file) which shows the position of the area of interest
savename_fig1 = 'Position_area_of_interest_'+str(position_name)
#Name of figure (png file) which shows the full timeseries
savename_fig2 = 'Daily_climatology_'+str(position_name)
#Name of figure (png file) which shows the timeseries of the daily climatology for the region of interest
savename_fig3 = 'Daily_anomaly_'+str(position_name)
#Name of figure (png file) which shows the timeseries of the daily anomaly for the region of interest
savename_fig4 = 'Taylor_diagram'+str(position_name)
#Name of figure (png file) which shows the scatter graph of the daily anomaly for the region of interest
savename_fig5 = 'Taylor_diagram_daily_anomaly'+str(position_name)
#Extension
ext = '.png'
# %% Importing packages
import numpy as np
import xarray as xr
import pylab as plt
import scipy.stats as stat
import cartopy.crs as ccrs
import cartopy
import skill_metrics as sm
# %% The function used in the script below
#This function calculates the find a values in a array closest to the specificed element
#The script uses the function to find closest lat/lon points in the dataset so users do not need the exact lat/lon coordinates
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
#This function calculates the mean for each timestep of a 3d dimesional matrix, producing a array the same length of the matrix.
#The spript uses this function to average selected subset into arrays for statistical comparisions
def mean_matrix_3d(matrix):
matrix_array = np.zeros(len(matrix))
for x in np.arange(len(matrix[:,1,1])):
array_tmp = np.nanmean(matrix[x,:,:])
matrix_array[x] = array_tmp
return matrix_array
# %% Loading netcdf files using xarray
model_dataset = xr.open_dataset(path_to_model+model_dataset)
satelite_dataset1 = xr.open_dataset(path_to_satelite+satelite_dataset1)
satelite_dataset2 = xr.open_dataset(path_to_satelite+satelite_dataset2)
satelite_dataset3 = xr.open_dataset(path_to_satelite+satelite_dataset3)
#loading lon and lat
longitude = model_dataset[model_lon_name].values[0,:]
latitude = model_dataset[model_lat_name].values[:,0]
#Loading time
time = satelite_dataset3.time.values
# Time grouped by seasonal climatolgies for the time dimesion of scatter plot
#time_cat = np.zeros(len(time))
#Finding the exact cordinates within the model and satelite grids for subsetting
west_co = find_nearest(longitude,west)
east_co = find_nearest(longitude,east)
south_co = find_nearest(latitude,south)
north_co = find_nearest(latitude,north)
#Subsetting the datasets
model_subset = model_dataset.where((model_dataset.lat_rho > south_co) &
(model_dataset.lat_rho < north_co) & (model_dataset.lon_rho > west_co) &
(model_dataset.lon_rho < east_co), drop=True)
satelite_subset1 = satelite_dataset1.where((satelite_dataset1.lat > south_co) &
(satelite_dataset1.lat < north_co) & (satelite_dataset1.lon > west_co) &
(satelite_dataset1.lon < east_co), drop=True)
satelite_subset2 = satelite_dataset2.where((satelite_dataset2.lat > south_co) &
(satelite_dataset2.lat < north_co) & (satelite_dataset2.lon > west_co) &
(satelite_dataset2.lon < east_co), drop=True)
satelite_subset3 = satelite_dataset3.where((satelite_dataset3.lat > south_co) &
(satelite_dataset3.lat < north_co) & (satelite_dataset3.lon > west_co) &
(satelite_dataset3.lon < east_co), drop=True)
# %% Calculating the daily anomalies and daily climatologies using xr grouby functions
#Calculating the daily anomalies in the model and satelite data using xarray groupby methods
model_daily_ano = model_subset.groupby("time.dayofyear") - model_subset.groupby("time.dayofyear").mean("time")
sat_daily_ano1 = satelite_subset1.groupby("time.dayofyear") - satelite_subset1.groupby("time.dayofyear").mean("time")
sat_daily_ano2= satelite_subset2.groupby("time.dayofyear") - satelite_subset2.groupby("time.dayofyear").mean("time")
sat_daily_ano3 = satelite_subset3.groupby("time.dayofyear") - satelite_subset3.groupby("time.dayofyear").mean("time")
#Calculating the daily climatology of satelite and model datasets
model_daily_clim = model_subset.groupby("time.dayofyear").mean("time")
sat_daily_clim1 = satelite_subset1.groupby("time.dayofyear").mean("time")
sat_daily_clim2 = satelite_subset2.groupby("time.dayofyear").mean("time")
sat_daily_clim3 = satelite_subset3.groupby("time.dayofyear").mean("time")
# %% Loading the variables from the dataset
#Converting zeros to nans in the model data (land is denoted as zeros instead of nans which affects averaging)
model_subset[model_variable].values[np.isnan(satelite_subset3[satelite_variable].values)] = np.nan
model_daily_ano[model_variable].values[np.isnan(sat_daily_ano3[satelite_variable].values)] = np.nan
model_daily_clim[model_variable].values[np.isnan(sat_daily_clim3[satelite_variable].values)] = np.nan
# %% #Finding the mean of the subset for each timestep using the function mean_matrix_array. (Converts a 3d matrix into area averaged array)
#Finding the mean for full timseries
model_array = mean_matrix_3d(model_subset[model_variable].values)
satelite_array1 = mean_matrix_3d(satelite_subset1[satelite_variable].values)
satelite_array2 = mean_matrix_3d(satelite_subset2[satelite_variable].values)
satelite_array3 = mean_matrix_3d(satelite_subset3[satelite_variable].values)
#Finding the mean of the daily anomalies
model_ano_array = mean_matrix_3d(model_daily_ano[model_variable].values)
satelite_ano1_array = mean_matrix_3d(sat_daily_ano1[satelite_variable].values)
satelite_ano2_array = mean_matrix_3d(sat_daily_ano2[satelite_variable].values)
satelite_ano3_array = mean_matrix_3d(sat_daily_ano3[satelite_variable].values)
#Finding the mean of the daily climatology
model_clim_array = mean_matrix_3d(model_daily_clim[model_variable].values)
satelite_clim1_array = mean_matrix_3d(sat_daily_clim1[satelite_variable].values)
satelite_clim2_array = mean_matrix_3d(sat_daily_clim2[satelite_variable].values)
satelite_clim3_array = mean_matrix_3d(sat_daily_clim3[satelite_variable].values)
# %% Calculating medium
#Median of timeseries
combined_array = np.stack([model_array, satelite_array1, satelite_array2,satelite_array3])
median_array = np.median(combined_array, axis = 0)
#Median of daily anomalies timeseries
combined_ano_array = np.stack([model_ano_array, satelite_ano1_array, satelite_ano2_array,satelite_ano3_array])
median_ano_array = np.median(combined_ano_array, axis = 0)
#Median of daily climatology timeseries
combined_clim_array = np.stack([model_clim_array, satelite_clim1_array, satelite_clim2_array,satelite_clim3_array])
median_clim_array = np.median(combined_clim_array, axis = 0)
# %% Taylor iagram stats
crmsd_model = sm.centered_rms_dev(model_array, median_array);
crmsd1 = sm.centered_rms_dev(satelite_array1, median_array)
crmsd2 = sm.centered_rms_dev(satelite_array2, median_array)
crmsd3 = sm.centered_rms_dev(satelite_array3, median_array)
r_model = stat.pearsonr(model_array, median_array)[0]
r1 = stat.pearsonr(satelite_array1, median_array)[0]
r2 = stat.pearsonr(satelite_array2, median_array)[0]
r3 = stat.pearsonr(satelite_array3, median_array)[0]
std_model = stat.tstd(model_array)
std1 = stat.tstd(satelite_array1)
std2 = stat.tstd(satelite_array2)
std3 = stat.tstd(satelite_array3)
# (e.g. taylor_stats1[1:]) are those for the predicted series.
taylor_stats1 = sm.taylor_statistics(model_array,median_array,'data')
taylor_stats2 = sm.taylor_statistics(satelite_array1,median_array,'data')
taylor_stats3 = sm.taylor_statistics(satelite_array2,median_array,'data')
taylor_stats4 = sm.taylor_statistics(satelite_array3,median_array,'data')
# Store statistics in arrays
sdev = np.array([taylor_stats1['sdev'][0], taylor_stats1['sdev'][1],
taylor_stats2['sdev'][1], taylor_stats3['sdev'][1],taylor_stats4['sdev'][1]])
crmsd = np.array([taylor_stats1['crmsd'][0], taylor_stats1['crmsd'][1],
taylor_stats2['crmsd'][1], taylor_stats3['crmsd'][1],taylor_stats4['crmsd'][1]])
ccoef = np.array([taylor_stats1['ccoef'][0], taylor_stats1['ccoef'][1],
taylor_stats2['ccoef'][1], taylor_stats3['ccoef'][1], taylor_stats4['ccoef'][1]])
# (e.g. taylor_stats1[1:]) are those for the predicted series.
taylor_stats_ano1 = sm.taylor_statistics(model_ano_array,median_ano_array,'data')
taylor_stats_ano2 = sm.taylor_statistics(satelite_ano1_array,median_ano_array,'data')
taylor_stats_ano3 = sm.taylor_statistics(satelite_ano2_array,median_ano_array,'data')
taylor_stats_ano4 = sm.taylor_statistics(satelite_ano3_array,median_ano_array,'data')
# Store statistics in arrays
sdev_ano = np.array([taylor_stats_ano1['sdev'][0], taylor_stats_ano1['sdev'][1],
taylor_stats_ano2['sdev'][1], taylor_stats_ano3['sdev'][1],taylor_stats_ano4['sdev'][1]])
crmsd_ano = np.array([taylor_stats_ano1['crmsd'][0], taylor_stats_ano1['crmsd'][1],
taylor_stats_ano2['crmsd'][1], taylor_stats_ano3['crmsd'][1],taylor_stats_ano4['crmsd'][1]])
ccoef_ano = np.array([taylor_stats_ano1['ccoef'][0], taylor_stats_ano1['ccoef'][1],
taylor_stats_ano2['ccoef'][1], taylor_stats_ano3['ccoef'][1], taylor_stats_ano4['ccoef'][1]])
# %% Selecting subset area
#Finding the correct indices of the above cooridates
west_index = np.where(longitude == find_nearest(longitude,west))[0][0]
east_index = np.where(longitude == find_nearest(longitude,east))[0][0]
south_index = np.where(latitude == find_nearest(latitude,south))[0][0]
north_index = np.where(latitude == find_nearest(latitude,north))[0][0]
# %% Plotting postion of the selected area
fig = plt.figure(figsize=(10, 5),facecolor='white')
ax = plt.axes(projection=ccrs.PlateCarree())
cs = plt.contourf(longitude,latitude,model_dataset[model_variable].values[1,:,:],60, transform=ccrs.PlateCarree());
ln0 = plt.plot([longitude[west_index], longitude[east_index]], [latitude[south_index], latitude[south_index]], '-k')
ln1 = plt.plot([longitude[west_index], longitude[east_index]], [latitude[north_index], latitude[north_index]], '-k')
ln2 = plt.plot([longitude[east_index], longitude[east_index]], [latitude[north_index], latitude[south_index]], '-k')
ln3 = plt.plot([longitude[west_index], longitude[west_index]], [latitude[north_index], latitude[south_index]], '-k')
ax.coastlines()
ax.add_feature(cartopy.feature.LAND, zorder=0)
plt.savefig(savepath+savename_fig1+ext)
# %% Ploting timeseries daily clim
fig,ax = plt.subplots()
line_sat1 = plt.plot(satelite_clim1_array,label= satelite_name1);
line_sat2 = plt.plot(satelite_clim2_array,label= satelite_name2);
line_sat3 = plt.plot(satelite_clim3_array,label= satelite_name3);
line_roms = plt.plot(model_clim_array,label = model_name);
plt.xlabel('Day of year')
plt.ylabel('SST ($^\circ$C)')
plt.title(str(position_name))
plt.legend()
plt.savefig(savepath+savename_fig2+ext,bbox_inches='tight', pad_inches=0.2)
# %% Ploting timeseries of daily anomaly
fig,ax = plt.subplots()
line_sat1 = plt.plot(time,satelite_ano1_array,label = satelite_name1);
line_sat2 = plt.plot(time,satelite_ano2_array,label = satelite_name2);
line_sat3 = plt.plot(time,satelite_ano3_array,label = satelite_name3);
line_roms = plt.plot(time,model_ano_array,label = model_name);
plt.xlabel('Date in years')
plt.ylabel('SST ($^\circ$C)')
plt.title(str(position_name))
plt.legend()
plt.savefig(savepath+savename_fig3+ext,bbox_inches='tight', pad_inches=0.2)
# %% Taylor diagram
label = ['Non-Dimensional Observation', model_name, satelite_name1 , satelite_name2, satelite_name3]
fig,ax = plt.subplots()
plt.title(str(position_name))
sm.taylor_diagram(sdev,crmsd,ccoef,markerLabel = label,markerLegend = 'on', styleOBS = '-', colOBS = 'k', markerobs = 'o',titleOBS = 'Median')
plt.savefig(savepath+savename_fig4)
# %% Taylor diagram daily anomalies
label = ['Non-Dimensional Observation', model_name, satelite_name1 , satelite_name2, satelite_name3]
crmsd_ano = np.round(crmsd_ano,2)
fig,ax = plt.subplots()
plt.title(str(position_name)+"_daily_anomalies")
sm.taylor_diagram(sdev_ano,crmsd_ano,ccoef_ano, rmsLabelFormat= '0:.2f' , markerLabel = label,markerLegend = 'on', styleOBS = '-', colOBS = 'k', markerobs = 'o',titleOBS = 'Median')
plt.savefig(savepath+savename_fig5)
|
from django import forms
from accounts.models import User
from payments.models import CreditCard
from django.contrib.auth.forms import UserCreationForm
class UserSignupForm(UserCreationForm):
class Meta:
model = User
fields = ['email', 'first_name', 'last_name', 'password1', 'password2']
class UserUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = ['email', 'first_name', 'last_name']
|
"""Author Arianna Delgado
Created on May 28, 2020
"""
"""Showing List is mutable"""
l1 = [2,3,4,5,8,4,3,5,2,1,8,8,6,3,4,5,7,9]
set1 =(set(l1))
l2 = list(set1)
print(l2)
#list are mutable
l2[0] = 100
print(l2)
"""Showing tuples are immutable"""
#tuples are immutable
tuple1 = tuple(l2)
tuple1[0] = 1 #this line will trigger an error
print(tuple1)
|
# @Title: 相交链表 (Intersection of Two Linked Lists)
# @Author: 2464512446@qq.com
# @Date: 2019-11-06 15:28:23
# @Runtime: 212 ms
# @Memory: 40.7 MB
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if not headA or not headB:
return None
pA, pB = headA,headB
while pA != pB:
# print(pA,pB)
pA = pA.next if pA else headB
pB = pB.next if pB else headA
return pA
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/9/22 15:03
# @Author : TheTAO
# @Site :
# @File : build_inputs.py
# @Software: PyCharm
import pickle
import jieba.posseg as psg
from cnradical import Radical, RunOption
# 全角转半角
def full_to_half(s):
n = []
for char in s:
num = ord(char)
if num == 0x3000:
num = 32
elif 0xFF01 <= num <= 0xFF5E:
num -= 0xfee0
char = chr(num)
n.append(char)
return ''.join(n)
# 简单清理数据
def replace_html(stxt):
stxt = stxt.replace('"', '"')
stxt = stxt.replace('&', '&')
stxt = stxt.replace('<', '<')
stxt = stxt.replace('>', '>')
stxt = stxt.replace(' ', ' ')
stxt = stxt.replace("“", "")
stxt = stxt.replace("”", "")
stxt = stxt.replace("—", "")
stxt = stxt.replace("\xa0", " ")
return stxt
def input_from_line_with_feature(line):
"""
此函数将单一输入句子进行实体识别,构造为具体如下形式
[[[raw_text]], [[word]], [[bound]], [[flag]], [[label]], [[radical]], [[pinyin]]]
这里多一列,到时候输入为[1:]
:param line:输入的单一句子
:param char_to_id:词典转索引
:return:
"""
with open(f'datas/prepare_data/dict.pkl', 'rb') as f:
map_dict = pickle.load(f)
def item2id(data, w2i):
return [w2i[x] if x in w2i else w2i['UNK'] for x in data]
inputs = list()
feature_names = ['word', 'bound', 'flag', 'radical', 'pinyin', 'label']
line = full_to_half(line)
line = replace_html(line)
chars = [[char for char in line]]
# 获取标签,先全部打上O
tag_list = ['O' for _ in line]
# 提取词性和词边界特征
word_bounds = ['M' for _ in tag_list] # 保存每个词的边界
word_flags = [] # 保存词性
# 遍历带词性的切分
for word, flag in psg.cut(line):
# 单个词的时候
if len(word) == 1:
start = len(word_flags)
word_bounds[start] = 'S'
word_flags.append(flag)
else:
start = len(word_flags)
word_bounds[start] = 'B'
word_flags += [flag] * len(word)
# 这里end需要-1
end = len(word_flags) - 1
word_bounds[end] = 'E'
bounds = [word_bounds]
flags = [word_flags]
# 由于是测试将label置为空
targets = [[]]
# 获取偏旁和拼音特征
radical = Radical(RunOption.Radical)
pinyin = Radical(RunOption.Pinyin)
# 这里循环迭代去获取,None的去填充
radicals = [[radical.trans_ch(x) if radical.trans_ch(x) is not None else 'UNK' for x in line]]
pinyins = [[pinyin.trans_ch(x) if pinyin.trans_ch(x) is not None else 'UNK' for x in line]]
inputs.append(chars)
inputs.append(bounds)
inputs.append(flags)
inputs.append(radicals)
inputs.append(pinyins)
inputs.append(targets)
# 开始循环转化为数字索引
id_inputs = [[line]]
for i, feature in enumerate(feature_names):
id_inputs.append([item2id(inputs[i][0], map_dict[feature][2])])
return id_inputs[0][0], id_inputs[1:]
if __name__ == '__main__':
lines = '我是中国人'
id_input = input_from_line_with_feature(lines)
print(id_input[0][0])
|
from django.db import models
class Term(models.Model):
details=models.CharField(max_length=500)
last_updated_at=models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.details
|
# coding:utf-8
__author__ = 'yann'
import datetime
import functions
import time
from flashsale.dinghuo.models_stats import SupplyChainStatsOrder, DailySupplyChainStatsOrder
from shopback.items.models import Product, ProductSku
from flashsale.dinghuo.models import OrderDetail
def get_daily_order_stats(prev_day):
"""统计每天的订单里面的商品的平均下单时间"""
today = datetime.date.today()
target_day = today - datetime.timedelta(days=prev_day)
start_dt = datetime.datetime(target_day.year, target_day.month, target_day.day)
end_dt = datetime.datetime(target_day.year, target_day.month, target_day.day, 23, 59, 59)
order_qs = functions.get_source_orders(start_dt, end_dt)
order_dict = {}
for order in order_qs:
pay_time = time.mktime(order['pay_time'].timetuple()) or 0
num = order["num"]
if order["outer_id"] in order_dict:
if order["outer_sku_id"] in order_dict[order["outer_id"]]:
old_num = order_dict[order["outer_id"]][order["outer_sku_id"]]["num"]
old_pay_time = order_dict[order["outer_id"]][order["outer_sku_id"]]["pay_time"]
pay_time = (pay_time * num + old_pay_time * old_num) / (old_num + num)
order_dict[order["outer_id"]][order["outer_sku_id"]]["num"] = old_num + num
order_dict[order["outer_id"]][order["outer_sku_id"]]["pay_time"] = pay_time
else:
order_dict[order["outer_id"]][order["outer_sku_id"]] = {"num": num, 'pay_time': pay_time}
else:
order_dict[order["outer_id"]] = {order["outer_sku_id"]: {"num": num, 'pay_time': pay_time}}
for product_outer_id, product_dict in order_dict.items():
pro_bean = Product.objects.filter(outer_id=product_outer_id)
if pro_bean.count() > 0 and pro_bean[0].sale_time and not (pro_bean[0].sale_time > target_day):
for outer_sku_id, product in product_dict.items():
temp_new_data = SupplyChainStatsOrder.objects.filter(product_id=product_outer_id,
outer_sku_id=outer_sku_id,
sale_time=target_day)
if temp_new_data.count() > 0:
stats_new = temp_new_data[0]
stats_new.sale_num = product['num']
stats_new.trade_general_time = product['pay_time']
stats_new.save()
else:
stats_new = SupplyChainStatsOrder(product_id=product_outer_id, outer_sku_id=outer_sku_id,
sale_time=target_day, sale_num=product['num'],
shelve_time=pro_bean[0].sale_time,
trade_general_time=product['pay_time'])
stats_new.save()
def get_daily_out_order_stats(prev_day):
"""统计每天的订单里面的商品的平均出货时间"""
today = datetime.date.today()
target_day = today - datetime.timedelta(days=prev_day)
start_dt = datetime.datetime(target_day.year, target_day.month, target_day.day)
end_dt = datetime.datetime(target_day.year, target_day.month, target_day.day, 23, 59, 59)
order_qs = functions.get_source_orders_consign(start_dt, end_dt)
order_dict = {}
for order in order_qs:
pay_time = time.mktime(order['merge_trade__weight_time'].timetuple()) or 0
sale_num = order["num"]
if order["outer_id"] in order_dict:
if order["outer_sku_id"] in order_dict[order["outer_id"]]:
old_sale_num = order_dict[order["outer_id"]][order["outer_sku_id"]]["num"]
old_pay_time = order_dict[order["outer_id"]][order["outer_sku_id"]]["pay_time"]
pay_time = (pay_time * sale_num + old_pay_time * old_sale_num) / (old_sale_num + sale_num)
order_dict[order["outer_id"]][order["outer_sku_id"]]["num"] = old_sale_num + sale_num
order_dict[order["outer_id"]][order["outer_sku_id"]]["pay_time"] = pay_time
else:
order_dict[order["outer_id"]][order["outer_sku_id"]] = {"num": sale_num, 'pay_time': pay_time}
else:
order_dict[order["outer_id"]] = {order["outer_sku_id"]: {"num": sale_num, 'pay_time': pay_time}}
for product_outer_id, product_dict in order_dict.items():
pro_bean = Product.objects.filter(outer_id=product_outer_id)
if pro_bean.count() > 0 and pro_bean[0].sale_time and not (pro_bean[0].sale_time > target_day):
for outer_sku_id, product in product_dict.items():
temp_new_data = SupplyChainStatsOrder.objects.filter(product_id=product_outer_id,
outer_sku_id=outer_sku_id,
sale_time=target_day)
if temp_new_data.count() > 0:
stats_new = temp_new_data[0]
stats_new.goods_out_num = product['num']
stats_new.goods_out_time = product['pay_time']
stats_new.save()
else:
stats_new = SupplyChainStatsOrder(product_id=product_outer_id, outer_sku_id=outer_sku_id,
sale_time=target_day, goods_out_num=product['num'],
shelve_time=pro_bean[0].sale_time,
goods_out_time=product['pay_time'])
stats_new.save()
def get_daily_ding_huo_stats(prev_day):
"""统计每天的大货里面的商品的平均拍货时间"""
today = datetime.date.today()
target_day = today - datetime.timedelta(days=prev_day)
start_dt = datetime.datetime(target_day.year, target_day.month, target_day.day)
end_dt = datetime.datetime(target_day.year, target_day.month, target_day.day, 23, 59, 59)
order_details_dict = OrderDetail.objects.values("outer_id", "chichu_id", "buy_quantity", "created"). \
exclude(orderlist__status=u'作废').filter(created__gte=start_dt, created__lte=end_dt)
ding_huo_dict = {}
for order_detail in order_details_dict:
order_deal_time = time.mktime(order_detail['created'].timetuple()) or 0
ding_huo_num = order_detail['buy_quantity']
sku = ProductSku.objects.get(id=order_detail["chichu_id"])
if order_detail["outer_id"] in ding_huo_dict:
if sku.outer_id in ding_huo_dict[order_detail["outer_id"]]:
old_num = ding_huo_dict[order_detail["outer_id"]][sku.outer_id]["num"]
old_time = ding_huo_dict[order_detail["outer_id"]][sku.outer_id]["order_deal_time"]
order_deal_time = (order_deal_time * ding_huo_num + old_time * old_num) / (old_num + ding_huo_num)
ding_huo_dict[order_detail["outer_id"]][sku.outer_id]["num"] = old_num + ding_huo_num
ding_huo_dict[order_detail["outer_id"]][sku.outer_id]["order_deal_time"] = order_deal_time
else:
ding_huo_dict[order_detail["outer_id"]][sku.outer_id] = {"num": ding_huo_num,
"order_deal_time": order_deal_time}
else:
ding_huo_dict[order_detail["outer_id"]] = {
sku.outer_id: {"num": ding_huo_num, "order_deal_time": order_deal_time}}
for product_outer_id, product_dict in ding_huo_dict.items():
pro_bean = Product.objects.filter(outer_id=product_outer_id)
if pro_bean.count() > 0 and pro_bean[0].sale_time and not (pro_bean[0].sale_time > target_day):
for outer_sku_id, product in product_dict.items():
temp_new_data = SupplyChainStatsOrder.objects.filter(product_id=product_outer_id,
outer_sku_id=outer_sku_id,
sale_time=target_day)
if temp_new_data.count() > 0:
stats_new = temp_new_data[0]
stats_new.ding_huo_num = product['num']
stats_new.order_deal_time = product['order_deal_time']
stats_new.save()
else:
stats_new = SupplyChainStatsOrder(product_id=product_outer_id, outer_sku_id=outer_sku_id,
sale_time=target_day, ding_huo_num=product['num'],
shelve_time=pro_bean[0].sale_time,
order_deal_time=product['order_deal_time'])
stats_new.save()
def get_daily_goods_arrival_stats(prev_day):
"""统计每天的大货里面的商品的平均到货时间"""
today = datetime.date.today()
target_day = today - datetime.timedelta(days=prev_day)
start_dt = datetime.datetime(target_day.year, target_day.month, target_day.day)
end_dt = datetime.datetime(target_day.year, target_day.month, target_day.day, 23, 59, 59)
order_details_dict = OrderDetail.objects.values("outer_id", "chichu_id", "arrival_quantity", "inferior_quantity",
"updated").exclude(orderlist__status=u'作废').filter(
arrival_time__gte=start_dt, arrival_time__lte=end_dt)
ding_huo_dict = {}
for order_detail in order_details_dict:
order_deal_time = time.mktime(order_detail['updated'].timetuple()) or 0
ding_huo_num = order_detail['arrival_quantity'] + order_detail['inferior_quantity']
sku = ProductSku.objects.get(id=order_detail["chichu_id"])
if ding_huo_num > 0:
if order_detail["outer_id"] in ding_huo_dict:
if sku.outer_id in ding_huo_dict[order_detail["outer_id"]]:
old_num = ding_huo_dict[order_detail["outer_id"]][sku.outer_id]["num"]
old_time = ding_huo_dict[order_detail["outer_id"]][sku.outer_id]["order_deal_time"]
order_deal_time = (order_deal_time * ding_huo_num + old_time * old_num) / (old_num + ding_huo_num)
ding_huo_dict[order_detail["outer_id"]][sku.outer_id]["num"] = old_num + ding_huo_num
ding_huo_dict[order_detail["outer_id"]][sku.outer_id]["order_deal_time"] = order_deal_time
else:
ding_huo_dict[order_detail["outer_id"]][sku.outer_id] = {"num": ding_huo_num,
"order_deal_time": order_deal_time}
else:
ding_huo_dict[order_detail["outer_id"]] = {
sku.outer_id: {"num": ding_huo_num, "order_deal_time": order_deal_time}}
for product_outer_id, product_dict in ding_huo_dict.items():
pro_bean = Product.objects.filter(outer_id=product_outer_id)
if pro_bean.count() > 0 and pro_bean[0].sale_time and not (pro_bean[0].sale_time > target_day):
for outer_sku_id, product in product_dict.items():
temp_new_data = SupplyChainStatsOrder.objects.filter(product_id=product_outer_id,
outer_sku_id=outer_sku_id,
sale_time=target_day)
if temp_new_data.count() > 0:
stats_new = temp_new_data[0]
stats_new.arrival_num = product['num']
stats_new.goods_arrival_time = product['order_deal_time']
stats_new.save()
else:
stats_new = SupplyChainStatsOrder(product_id=product_outer_id, outer_sku_id=outer_sku_id,
sale_time=target_day, arrival_num=product['num'],
shelve_time=pro_bean[0].sale_time,
goods_arrival_time=product['order_deal_time'])
stats_new.save()
def daily_data_stats():
all_data = SupplyChainStatsOrder.objects.all()
all_data_dict = {}
for data in all_data:
if data.product_id in all_data_dict and data.shelve_time in all_data_dict[data.product_id]:
temp_var = all_data_dict[data.product_id][data.shelve_time]
if data.ding_huo_num > 0:
ding_huo_num = temp_var['ding_huo_num']
ding_huo_time = temp_var['order_deal_time']
ding_huo_time = (data.ding_huo_num * data.order_deal_time + ding_huo_num * ding_huo_time) / (
ding_huo_num + data.ding_huo_num)
temp_var['order_deal_time'] = ding_huo_time
temp_var['ding_huo_num'] += data.ding_huo_num
if data.sale_num > 0:
sale_num = temp_var['sale_num']
trade_general_time = temp_var['trade_general_time']
trade_general_time = (data.sale_num * data.trade_general_time + trade_general_time * sale_num) / (
sale_num + data.sale_num)
temp_var['trade_general_time'] = trade_general_time
temp_var['sale_num'] += data.sale_num
if data.arrival_num > 0:
arrival_num = temp_var['arrival_num']
goods_arrival_time = temp_var['goods_arrival_time']
goods_arrival_time = \
(data.arrival_num * data.goods_arrival_time + goods_arrival_time * arrival_num) / (
arrival_num + data.arrival_num)
temp_var['goods_arrival_time'] = goods_arrival_time
temp_var['arrival_num'] += data.arrival_num
if data.goods_out_num > 0:
goods_out_num = temp_var['goods_out_num']
goods_out_time = temp_var['goods_out_time']
goods_out_time = (data.goods_out_num * data.goods_out_time + goods_out_time * goods_out_num) / (
goods_out_num + data.goods_out_num)
temp_var['goods_out_time'] = goods_out_time
temp_var['goods_out_num'] += data.goods_out_num
else:
all_data_dict[data.product_id] = {data.shelve_time: {"sale_num": data.sale_num,
"trade_general_time": data.trade_general_time,
"ding_huo_num": data.ding_huo_num,
"order_deal_time": data.order_deal_time,
"arrival_num": data.arrival_num,
"goods_arrival_time": data.goods_arrival_time,
"goods_out_num": data.goods_out_num,
"goods_out_time": data.goods_out_time}}
for pro_id, temp_data in all_data_dict.items():
for shelve_time, data in temp_data.items():
product = DailySupplyChainStatsOrder.objects.filter(product_id=pro_id, sale_time=shelve_time)
pro_bean = ProductSku.objects.filter(product__outer_id=pro_id)
cost = 0
sale_price = 0
if pro_bean.count() > 0:
cost = pro_bean[0].cost * data['sale_num']
sale_price = pro_bean[0].agent_price * data['sale_num']
if product.count() > 0:
daily_order = product[0]
daily_order.return_num = get_return_num_by_product_id(pro_id)
daily_order.inferior_num = get_inferior_num_by_product_id(pro_id)
daily_order.sale_num = data['sale_num']
daily_order.ding_huo_num = data['ding_huo_num']
daily_order.cost_of_product = cost
daily_order.sale_cost_of_product = sale_price
daily_order.trade_general_time = data['trade_general_time']
daily_order.order_deal_time = data['order_deal_time']
daily_order.goods_arrival_time = data['goods_arrival_time']
daily_order.goods_out_time = data['goods_out_time']
daily_order.save()
else:
temp = DailySupplyChainStatsOrder(product_id=pro_id, sale_time=shelve_time, sale_num=data['sale_num'],
ding_huo_num=data['ding_huo_num'],
cost_of_product=cost, sale_cost_of_product=sale_price,
return_num=get_return_num_by_product_id(pro_id),
inferior_num=get_inferior_num_by_product_id(pro_id),
trade_general_time=data['trade_general_time'],
order_deal_time=data['order_deal_time'],
goods_arrival_time=data['goods_arrival_time'],
goods_out_time=data['goods_out_time'])
temp.save()
from django.db import connection
def get_return_num_by_product_id(outer_id):
sql = "select sum(num) as return_num from " \
"shop_trades_mergeorder where status in ('TRADE_CLOSED','TRADE_REFUNDED','TRADE_REFUNDING') and " \
"sys_status not in('INVALID','ON_THE_FLY') and outer_id = '{0}' group by outer_id".format(outer_id)
cursor = connection.cursor()
cursor.execute(sql)
raw = cursor.fetchall()
return raw[0][0] if len(raw) else 0
def get_inferior_num_by_product_id(outer_id):
sql = "select sum(inferior_quantity) as inferior_num from " \
"suplychain_flashsale_orderdetail where outer_id='{0}' group by outer_id".format(outer_id)
cursor = connection.cursor()
cursor.execute(sql)
raw = cursor.fetchall()
return raw[0][0] if len(raw) else 0 |
from django.shortcuts import render, HttpResponse
from apscheduler.schedulers.background import BackgroundScheduler
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from MeasureThickness.settings import Base_img_path
from django.utils.safestring import mark_safe
from django.db.models import Q
from django.http import JsonResponse
from django.core.cache import cache
from decimal import Decimal
from utils.handel_data import *
from utils import readfiles
from utils import file_type
from utils import auth
from utils.readfiles import *
from django.views import View
from thickness import models
import random
import json
scheduler = BackgroundScheduler()
handledataset = HandleDataSet()
handleimgs = HandleImgs()
file_count = 0
@login_required
@csrf_exempt
def tag_manage(request):
"""
标签管理
:param request:
:return:
"""
file_tag_obj = models.DataTag.objects.values('id', 'file_name', 'tag_content', 'create_time').all().order_by('-id')
count = file_tag_obj.count()
try:
version = select_version()
for item in file_tag_obj:
file_id = item['id']
true_thickness = get_most_true_thickness(file_id)
item['true_thickness'] = true_thickness
item['create_time'] = str(item['create_time']).split('.')[0]
if item['tag_content']:
tag_content_dict = eval(item['tag_content'])
item['file_explain'] = tag_content_dict['file_explain']
item['img_path'] = tag_content_dict['img_path']
except:
pass
if request.method == "GET":
return render(request, "thickness/tag_manage.html", locals())
if request.method == "POST":
# 分页
result = pager(request, file_tag_obj)
result['data_list'] = result['data_list']
return HttpResponse(json.dumps(result))
@csrf_exempt
def tag_manage_save_ajax(request):
"""
退出保存
:param request:
:return:
"""
result = {'status': False, 'message': None}
try:
file_explain = request.POST.get('file-explain')
file_id = request.POST.get('nid')
true_thickness = request.POST.get('true-thickness')
if true_thickness == 'null' or true_thickness == '':
pass
else:
# 重跑算法
restart_run_alg(file_id, true_thickness)
img_obj = request.FILES.get('img_obj')
tag_content = models.DataTag.objects.values('tag_content').filter(id=file_id)[0]['tag_content']
if tag_content:
tag_content = eval(tag_content)
old_img_path = tag_content['img_path']
else:
tag_content = {}
old_img_path = ''
if img_obj:
# 写 && 压缩 图片
with open(Base_img_path + img_obj.name, 'wb') as f:
f.write(img_obj.read())
handleimgs.compress_image(Base_img_path + img_obj.name)
tag_content['file_explain'] = file_explain
tag_content['img_path'] = '/' + Base_img_path + img_obj.name
else:
tag_content['file_explain'] = file_explain
tag_content['img_path'] = old_img_path
models.DataTag.objects.filter(id=file_id).update(tag_content=tag_content)
result = {'status': True, 'message': tag_content['img_path']}
except Exception as e:
print(e, '上传失败')
return HttpResponse(json.dumps(result))
@login_required
def restart_run_alg(file_id, true_thickness):
"""
修改整个文件的手测厚度值后,每个版本算法来重跑算法
:param file_id: 文件ID
:param true_thickness: 手测厚度值
:return:
"""
prev_true_thickness = get_most_true_thickness(file_id)
# 如果true_thickness更改,需要重跑算法
if prev_true_thickness != float(true_thickness):
models.DataFile.objects.filter(file_name_id=file_id).update(true_thickness=true_thickness)
version_obj = models.Version.objects.values('version').all().order_by('-id')[:5]
version_list = [version['version'] for version in version_obj]
data_id_list = models.DataFile.objects.values('nid').filter(file_name_id=file_id)
data_id_list = [item['nid'] for item in data_id_list] # int
handle_alg_process(data_id_list, version_list, only_run_alg=False)
@csrf_exempt
def generate_dataset_by_file_ajax(request):
"""
通过文件生成数据集
:param request:
:return:
"""
try:
time_and_id = []
data_set_id = []
selected_data_id_list = eval(request.POST.get('selected_data_id_list'))
if selected_data_id_list:
for file_id in selected_data_id_list:
create_time = str(
models.DataFile.objects.values('create_time').filter(file_name_id=file_id)[0]['create_time'])
temp_id = models.DataFile.objects.values('nid').filter(file_name_id=file_id)
first_id = temp_id.first()['nid']
last_id = temp_id.last()['nid']
time_and_id_temp = [create_time, first_id, last_id]
time_and_id.append(time_and_id_temp)
for item in temp_id:
data_set_id.append(item['nid'])
# 存储数据集条件
models.DataSetCondition.objects.create(time_and_id=time_and_id, data_set_id=data_set_id)
result = {'status': True, 'message': '成功生成数据集'}
else:
result = {'status': False, 'message': '生成数据集失败'}
except Exception as e:
result = {'status': False, 'message': '生成数据集失败'}
print(e)
return HttpResponse(json.dumps(result))
@csrf_exempt
def remove_file_ajax(request):
"""
删除文件
:param request:
:return:
"""
try:
selected_file_id_list = eval(request.POST.get('selected_file_id_list'))
if selected_file_id_list:
for file_id in selected_file_id_list:
models.DataTag.objects.filter(id=file_id).delete()
result = {'status': True, 'message': '删除文件成功'}
except Exception as e:
result = {'status': False, 'message': '删除文件失败'}
print(e)
return HttpResponse(json.dumps(result))
@csrf_exempt
def search_file_ajax(request):
"""
按文件tag搜索
:param request:
:return:
"""
try:
search_value = request.POST.get('search_value')
q = Q()
q.connector = "OR"
q.children.append(("tag_content__contains", search_value))
search_obj = list(models.DataTag.objects.filter(q).values('id', 'file_name', 'tag_content', 'create_time').order_by('-id'))
for item in search_obj:
file_id = item['id']
true_thickness = models.DataFile.objects.values('true_thickness').filter(file_name_id=file_id)[0][
'true_thickness']
item['true_thickness'] = true_thickness
item['create_time'] = str(item['create_time']).split('.')[0]
if item['tag_content']:
tag_content_dict = eval(item['tag_content'])
item['file_explain'] = tag_content_dict['file_explain']
item['img_path'] = tag_content_dict['img_path']
result = {'status': True, 'message': 'success', 'data_list': search_obj}
except Exception as e:
result = {'status': False, 'message': 'fail', 'data_list': []}
print(e)
return HttpResponse(json.dumps(result))
@login_required
@csrf_exempt
def single_file_data(request, file_id, version):
"""
单个文件的数据列表
:param request:
:param file_id: 文件ID
:param version: 算法版本
:return:
"""
# 从session中获取selected_version
data_type = "file"
selected_version = request.session.get('selected_version')
if not selected_version: # 如果没有选择版本,默认使用最新版本
selected_version = select_version()
version_obj = models.Version.objects.values('version').order_by('-id')
data_obj = models.DataFile.objects.filter(file_name_id=file_id).order_by('nid')
count = data_obj.count()
try:
file_obj = data_obj.values('file_name__file_name').first()
file_name = file_obj['file_name__file_name']
# file_id = file_obj['file_name_id']
except:
pass
if request.method == "GET":
return render(request, "thickness/single_file_list.html", locals())
elif request.method == "POST":
# 分页
result = pager(request, data_obj)
single_file_obj = []
for item in result['data_list']:
try:
versionTothcikness_obj = \
item.versiontothcikness_set.filter(version__version=selected_version).values('data_id',
'version__version',
'run_alg_thickness')[0]
except: # 如果该数据没有跑算法
versionTothcikness_obj = {}
versionTothcikness_obj['data_id'] = item.nid
versionTothcikness_obj['version__version'] = selected_version
versionTothcikness_obj['run_alg_thickness'] = None
true_thickness = \
models.DataFile.objects.values('true_thickness').filter(nid=versionTothcikness_obj['data_id'])[0][
'true_thickness']
versionTothcikness_obj['true_thickness'] = true_thickness
single_file_obj.append(versionTothcikness_obj)
result['data_list'] = single_file_obj
return HttpResponse(json.dumps(result))
@csrf_exempt
def single_file_run_alg_ajax(request):
"""
跑算法得出 单个文件 厚度值
:param request:
:return:
"""
result = {'status': False, 'message': '算法执行失败'}
try:
file_id = request.POST.get('file_id')
selected_version = request.POST.get('version')
request.session['selected_version'] = selected_version
version_list = [selected_version]
data_id_dict = models.DataFile.objects.values('nid').filter(file_name_id=file_id)
data_id_list = []
for item in data_id_dict:
data_id_list.append(item['nid'])
t1 = time.time()
# 跑算法
print(data_id_list)
handle_alg_process(data_id_list, version_list, only_run_alg=True)
t2 = time.time()
print('文件算法时间:', t2 - t1)
result = {'status': True, 'message': '算法执行成功'}
except Exception as e:
print(e, "单个文件跑算法出错!")
return HttpResponse(json.dumps(result))
@login_required
@csrf_exempt
def dataset_condition_list(request):
"""
数据集条件列表
:param request:
:return:
"""
all_dataset_obj = models.DataSetCondition.objects.all().order_by('-id')
count = all_dataset_obj.count()
try:
version_obj = models.Version.objects.values('version').order_by('-id')
# 从session中获取selected_version
selected_version = request.session.get('selected_version')
if not selected_version:
selected_version = select_version()
except:
pass
if request.method == "GET":
return render(request, 'thickness/dataset_condition_list.html', locals())
else:
# 分页
result = pager(request, all_dataset_obj)
result['data_list'] = list(result['data_list'].values('id', 'time_and_id', 'dataset_tag'))
return HttpResponse(json.dumps(result))
@csrf_exempt
def save_dataset_tag_ajax(request):
"""
保存数据集tag
:param request:
:return:
"""
result = {'status': False, 'message': ''}
try:
dataset_id = request.POST.get('dataset_id')
dataset_tag = request.POST.get('dataset-tag')
models.DataSetCondition.objects.filter(id=dataset_id).update(dataset_tag=dataset_tag)
result = {'status': True, 'message': '数据集tag修改成功'}
except Exception as e:
print(e)
return HttpResponse(json.dumps(result))
@login_required
@csrf_exempt
def single_dataset_list(request, dataset_id):
"""
单个数据集列表
:param request:
:param dataset_id: 数据集ID
:return:
"""
data_type = "data_set"
version_obj = models.Version.objects.values('version').order_by('-id')
data_time_condition_obj = models.DataSetCondition.objects.filter(id=dataset_id).values('time_and_id', 'data_set_id')[0]
data_set_id = eval(data_time_condition_obj['data_set_id'])
data_id_list = true_data_id_list(data_set_id)
print('data_id_list', data_id_list)
count = len(data_id_list)
# 从session中获取selected_version
selected_version = request.session.get('selected_version')
if not selected_version:
selected_version = select_version()
if request.method == "GET":
return render(request, 'thickness/single_dataset_list.html', locals())
elif request.method == "POST":
# 分页
result = pager(request, data_id_list)
# 填充列表
data_list = []
for data_id in result['data_list']:
try: # 防止删除了文件中的某条数据,导致报错
true_thickness = models.DataFile.objects.values('true_thickness').get(nid=data_id)['true_thickness']
data_obj = models.DataFile.objects.get(nid=data_id)
run_alg_thickness_obj = data_obj.versiontothcikness_set.filter(
version__version=selected_version).values('run_alg_thickness')
if run_alg_thickness_obj: # 如果该数据已跑算法,取出算法厚度值
run_alg_thickness = run_alg_thickness_obj[0]['run_alg_thickness']
else:
run_alg_thickness = None
data_list.append(
{'data_id': data_id, 'run_alg_thickness': run_alg_thickness, 'true_thickness': true_thickness})
except Exception as e:
pass
result['data_list'] = data_list
return HttpResponse(json.dumps(result))
def true_data_id_list(id_list):
"""
查找所有的存在的数据id,防止删除了文件或者部分数据,已存储的数据集中的数据列表中的id不对
:param id_list: 要查询的数据id列表
:return:
"""
# id_list = list(list_to_str_tuple(id_list))
# print(id_list)
data_id_list = []
# data_id_list_obj = models.DataFile.objects.raw("select nid from thickness_datafile where nid in %s" % id_list)
data_id_list_obj = models.DataFile.objects.filter(nid__in=id_list)
for i in data_id_list_obj:
data_id_list.append(i.nid)
return data_id_list
@csrf_exempt
def dataset_run_alg_ajax(request):
"""
跑算法算出 数据集 厚度值
:param request:
:return:
"""
result = {'status': False, 'message': '算法执行失败'}
try:
import time
tt1 = time.time()
dataset_id = request.POST.get('dataset_id')
selected_version = request.POST.get('selected_version')
version_list = [selected_version]
data_set_id_obj = models.DataSetCondition.objects.filter(id=dataset_id).values('data_set_id')
data_id_list = eval(data_set_id_obj[0]['data_set_id'])
handle_alg_process(data_id_list, version_list, only_run_alg=True)
tt2 = time.time()
print('数据集跑算法时间:', tt2 - tt1)
result = {'status': True, 'message': '算法执行成功'}
except Exception as e:
print(e, "跑算法出错!")
return HttpResponse(json.dumps(result))
def handle_alg_process(data_id_list, version_list, only_run_alg):
"""
处理算法过程
:param data_id_list: 要跑算法的数据id列表
:param version_list: 选择的版本列表
:return:
"""
# data_id_list = list_to_str_tuple(data_id_list)
for version_item in version_list:
# print('version_item', version_item)
update_data_id_set = set()
version_id = models.Version.objects.values('id').get(version=version_item)['id']
# data_id_list_obj = models.VersionToThcikness.objects.raw(
# "select id, data_id_id, run_alg_thickness from thickness_versiontothcikness where data_id_id in %s and version_id=%s order by data_id_id" % (
# data_id_list, version_id))
data_id_list_obj = models.VersionToThcikness.objects.filter(data_id_id__in=data_id_list, version_id=version_id).order_by('data_id_id')
print('data_id_list_obj', data_id_list_obj)
# 用于update
update_devation(data_id_list_obj, version_item, version_id, update_data_id_set, only_run_alg)
# 需要创建的数据id列表
create_data_id_set = set(data_id_list) - update_data_id_set
print('create_data_id_set', create_data_id_set)
# 用于create
create_run_alg_thickness_and_devation(create_data_id_set, version_item, version_id)
def update_devation(data_id_list_obj, version_item, version_id, update_data_id_set, only_run_alg):
"""
only_run_alg=False:用于更新已跑算法的厚度差
only_run_alg=True:用于更新已跑算法的厚度差和重新计算出run_alg_thickness
:param data_id_list_obj: 所选的所有数据对象
:param version_id: 版本id
:param update_data_id_set: 需要更新的数据id列表
:return:
"""
for data_item in data_id_list_obj:
try:
data_id = data_item.data_id_id
# 如果没有跑算法,直接except-->create_data_id_set.add(data_id),结束后会对其进行create操作
run_alg_thickness = data_item.run_alg_thickness
# print('run_alg_thickness', run_alg_thickness)
update_data_id_set.add(data_id)
true_thickness = models.DataFile.objects.values('true_thickness').get(nid=data_id)['true_thickness']
if not true_thickness:
true_thickness = 0
# 如果only_run_alg==True,表示所有选中的数据都要跑一遍算法,重新计算出run_alg_thickness
if only_run_alg:
thickness_dict = handledataset.handle_data_and_run_alg([data_id, data_id], version_item)
run_alg_thickness = thickness_dict[data_id]
# 保留一位小数
deviation = export_result(abs(Decimal(str(true_thickness)) - Decimal(str(run_alg_thickness))))
temp_dict = {'run_alg_thickness': run_alg_thickness, 'deviation': deviation}
models.VersionToThcikness.objects.filter(data_id=data_id, version=version_id).update(**temp_dict)
print('update', version_item, data_id)
except:
pass
def create_run_alg_thickness_and_devation(data_id_list, version_item, version_id):
"""
用于创建没有跑算法的算法厚度值和厚度差
:param data_id_list: 需要创建的数据id列表
:param version_item: 版本名
:param version_id: 版本id
:return:
"""
# print('create_data_id_set', create_data_id_set)
# 计算厚度差,并创建deviation和run_alg_thickness
if data_id_list:
thickness_dict = handledataset.handle_data_and_run_alg(list(data_id_list), version_item) # 把没有跑算法的数据去跑算法
# print('thickness_dict', thickness_dict)
for data_id in data_id_list:
try:
true_thickness = models.DataFile.objects.values('true_thickness').get(nid=data_id)['true_thickness']
if not true_thickness:
true_thickness = 0
run_alg_thickness = thickness_dict[data_id]
deviation = export_result(abs(Decimal(str(true_thickness)) - Decimal(str(run_alg_thickness)))) # 保留一位小数
temp_dict = {'data_id_id': data_id, 'run_alg_thickness': run_alg_thickness,
'version_id': version_id, 'deviation': deviation}
models.VersionToThcikness.objects.create(**temp_dict)
# print('create', version_item, data_id)
except:
pass
@csrf_exempt
def select_version_ajax(request):
"""
选择版本号,设置session值
:param request:
:return:
"""
result = {'status': False, 'message': None}
try:
select_version = request.POST.get('version')
request.session['selected_version'] = select_version
result = {'status': True, 'message': 'success'}
except Exception as e:
print(e)
return HttpResponse(json.dumps(result))
@csrf_exempt
def batch_save_true_thickness_ajax(request):
"""
批量保存手测厚度值
:param request:
:return:
"""
try:
true_thickness = float(request.POST.get('true_thickness'))
selected_data_id_list = eval(request.POST.get('selected_data_id_list'))
for nid in selected_data_id_list:
prev_true_thickness = models.DataFile.objects.values('true_thickness').filter(nid=nid)[0][
'true_thickness']
# 如果true_thickness更改,需要重跑算法
if prev_true_thickness != true_thickness:
print('重跑')
models.DataFile.objects.filter(nid=nid).update(true_thickness=true_thickness)
version_obj = models.Version.objects.values('version').all().order_by('-id')[:5]
version_list = [version['version'] for version in version_obj]
data_id_list = [int(nid), int(nid)] # 后面用到的数据库查询语句where XX in (1,2),不能只有一个条件
handle_alg_process(data_id_list, version_list, only_run_alg=False)
result = {'status': True, 'message': '批量设置成功'}
except Exception as e:
result = {'status': False, 'message': '厚度值类型错误,正确类型为:浮点型'}
# print(e)
return HttpResponse(json.dumps(result))
@csrf_exempt
def remove_data_ajax(request):
"""
删除数据
:param request:
:return:
"""
try:
selected_data_id_list = eval(request.POST.get('selected_data_id_list'))
for nid in selected_data_id_list:
models.DataFile.objects.filter(nid=nid).delete()
print(nid)
result = {'status': True, 'message': '删除成功'}
except Exception as e:
result = {'status': False, 'message': '删除失败'}
# print(e)
return HttpResponse(json.dumps(result))
@csrf_exempt
def remove_dataset_ajax(request):
"""
删除数据集
:param request:
:return:
"""
result = {'status': False, 'message': '删除数据集失败'}
try:
dataset_id = request.POST.get('dataset_id')
models.DataSetCondition.objects.filter(id=dataset_id).delete()
result = {'status': True, 'message': '删除数据集成功'}
except Exception as e:
print(e, "删除数据集出错!")
return HttpResponse(json.dumps(result))
def data_2048_chart(request, data_id, thickness):
"""
单条数据波形图和详细信息
:param request:
:param data_id: 数据ID
:param thickness: 跑算法的厚度值
:return:
"""
data_obj = models.DataFile.objects.values('message_body_data', 'message_head', 'create_time',
'message_body_param', 'file_name_id',
'true_thickness').get(nid=data_id)
# 处理数据
message_head = eval(data_obj['message_head'])
data_len = int(message_head.get('Range', '2048').strip('\n').split(',')[-1]) # ' 3X,6144'
message_body_data = data_obj['message_body_data'].tobytes()
data = list(struct.unpack("<%sh" % data_len, message_body_data))
if data_obj['message_body_param']:
message_body_param = eval(data_obj['message_body_param'])
else:
message_body_param = ""
# 传给前端的数据
true_thickness = data_obj['true_thickness']
data_list = json.dumps(list(enumerate(data)))
create_time = str(data_obj['create_time'])
file_name_id = data_obj['file_name_id']
data_tag_obj = models.DataTag.objects.values('tag_content', 'file_name').filter(id=file_name_id)[0]
tag_content_obj = data_tag_obj['tag_content']
file_name = data_tag_obj['file_name']
if tag_content_obj:
file_explain = eval(tag_content_obj)['file_explain']
img_path = eval(tag_content_obj)['img_path']
else:
img_path = ''
file_explain = ''
return render(request, 'thickness/data_2048_chart.html', locals())
class UploadFileView(View):
@method_decorator(csrf_exempt) # CSRF Token相关装饰器在CBV只能加到dispatch方法上
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(UploadFileView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
"""
上传文件
:param request:
:param args:
:param kwargs:
:return:
"""
return render(request, "thickness/upload_file.html")
def post(self, request, *args, **kwargs):
"""
读取文件并处理文件数据,数据库持久化,
计算成功存储数据条数、成功存储文件个数,
显示存储失败文件名列表
:param request:
:param args:
:param kwargs:
:return:
"""
result = {'status': False, 'code': 1, 'percent': 0, 'success_count': 0, 'file_fail_list': [],
'done_status': False}
try:
import time
start = time.time()
file_num = int(request.POST['file_num'])
# print('file_num', file_num)
file = request.FILES['file']
read_file = ReadFlies(file)
success_count, file_fail_list = read_file.handle_files()
# print(success_count, file_fail_list)
global file_count
file_count += 1
# print('file_count', file_count)
percent = round(file_count / file_num * 100)
# print('percent', percent)
done_status = False
if file_count >= file_num:
done_status = True
result = {'status': True, 'code': 0, 'percent': percent, 'success_count': success_count,
'file_fail_list': file_fail_list, 'done_status': done_status}
end = time.time()
print('总用时%s' % (end - start))
except Exception as e:
print(e, '上传失败')
return HttpResponse(json.dumps(result))
def callback_zero(request):
"""
上传文件的回调清零
:return:
"""
try:
global file_count
file_count = 0
readfiles.success_count = 0
file_type.file_fail_list = []
result = {'status': True, 'message': 'success'}
except Exception as e:
print(e)
result = {'status': False, 'message': 'false'}
return HttpResponse(json.dumps(result))
class GenerateDataSetView(View):
@method_decorator(csrf_exempt)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(GenerateDataSetView, self).dispatch(request, *args, **kwargs)
def get(self, request):
"""
生成数据集
:param request:
:return:
"""
i = 0
data_time_list = []
data_time_temp = []
# {"value": "1", "title": "2019-09-20"}
time_list = models.DataFile.objects.all().values('create_time').distinct()
for item in time_list:
data_time_temp.append(str(item['create_time']))
data_time_temp = sorted(data_time_temp) # ['2019-09-20', '2019-09-21', '2019-09-22', '2019-09-23']
data_time_temp.reverse()
for data_time in data_time_temp:
i += 1
data_time_dict = {}
data_time_dict['value'] = str(i)
data_time_dict['title'] = data_time
data_time_list.append(data_time_dict)
data_time_list = json.dumps(data_time_list)
# print(data_time_list) #[{"value": "1", "title": "2019-09-08"}, {"value": "2", "title": "2019-09-17"}]
choice_data_time_list = data_time_list
return render(request, 'thickness/generate_dataset.html', locals())
def post(self, request):
"""
生成数据集html代码
:param request:
:return:
"""
ele = ''
script = """layui.use('slider', function(){
var slider = layui.slider;"""
selected_data = json.loads(request.POST.get('selected_data'))
print('selected_data', selected_data)
if selected_data == []:
result = {'status': False, 'message': ele}
else:
for item in selected_data:
start_id = models.DataFile.objects.filter(create_time=item['title']).values('nid').first()['nid']
end_id = models.DataFile.objects.filter(create_time=item['title']).values('nid').last()['nid']
ele += """
<div style="width: 120px; height: 30px; margin-top: 30px; margin-left: 10px">
<input type="text"id="data-time" value="%s" disabled style="line-height: 30px; border: none; background-color: white">
</div>
<div style="margin-top: -15px; width:500px; margin-left: 120px">
<div id="slide-%s" class="demo-slider"></div>
<div id="slider-tips-%s" style=" left: 30px; margin-top: 10px;"></div>
<input type="text" style="display: none;" id="first-id-%s" value="">
<input type="text" style="display: none;" id="second-id-%s" value="">
</div>
""" % (item['title'], item['title'], item['title'], item['title'], item['title'])
script += """
slider.render({
elem: '#slide-%s'
//,value: 40 //初始值
,range: true //范围选择
,min: %s
,max: %s
,input: true
,change: function(value) {
$('#first-id-%s').val(value[0]);
$('#second-id-%s').val(value[1]);
}
});""" % (item['title'], start_id, end_id, item['title'], item['title'],)
script += "});"
# param_list.append({'time': item['title'], 'start_id': start_id, 'end_id': end_id})
result = {'status': True, 'message': mark_safe(ele), 'script': mark_safe(script)}
return HttpResponse(json.dumps(result))
@csrf_exempt
def generate_dataset_ajax(request):
"""
取出选中数据的时间和id范围,去数据库中取出数据id,并把数据id持久化
:param request:
:return:
"""
result = {'status': False, 'message': '生成id数据集失败'}
try:
import time
result_list = []
input_list = json.loads(request.POST.get(
'input_list')) # ['2019-09-18', '', '', '2019-09-08', '', '', '2019-09-19', '', '', '2019-09-17', '', '']
for i in range(0, len(input_list), 3):
result_list.append(input_list[i: i + 3])
print(result_list) # [['2019-09-20', '5', '7'], ] range(4,12) choose(5,7)
# 制作数据集时如果没有滑动滑条选取数据,默认选取当前日期的第一条数据
for data_item in result_list:
if data_item[1] == '' and data_item[2] == '':
nid = int(models.DataFile.objects.filter(create_time=data_item[0]).values('nid')[0]['nid'])
data_item[1] = data_item[2] = nid
# 取出已选中的数据id
choose_dataset_id_list = handledataset.get_selected_data(result_list) # choose_dataset_id_list = [5, 6, 7, 20, 21]
# 存入数据库
if choose_dataset_id_list:
start = time.time()
models.DataSetCondition.objects.create(time_and_id=result_list, data_set_id=choose_dataset_id_list)
end = time.time()
print('use time:', end - start)
result = {'status': True, 'message': '生成id数据集成功'}
except Exception as e:
print(e, "def(generate_dataset_ajax)出错")
return HttpResponse(json.dumps(result))
class DeviationRate(View):
"""
显示柱状图偏差率
"""
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(DeviationRate, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
version_obj = models.Version.objects.values('version').order_by('-id')
nid = args[0] # 数据集id或者文件id
data_type = args[1]
return render(request, 'thickness/deviation_rate.html', locals())
@csrf_exempt
def deviation_rate_ajax(request, nid, data_type):
"""
显示数据集偏差率ajax
:param request:
:param nid: 数据集id或者文件id
:return:
"""
try:
data_id_list = []
selected_version_list = request.POST.get('version').split(',')
if data_type == "data_set":
data_id_list = eval(models.DataSetCondition.objects.values('data_set_id').get(id=nid)['data_set_id'])
# data_id_list = list_to_str_tuple(data_id_list)
elif data_type == "file":
data_id_list = models.DataFile.objects.values('nid').filter(file_name_id=nid)
data_id_list = [i['nid'] for i in data_id_list]
# data_id_list = list_to_str_tuple(data_id_list)
result = handle_deviation_rate(data_id_list, selected_version_list, nid)
except Exception as e:
result = {'status': False, 'message': 'false', 'data_list': []}
return HttpResponse(json.dumps(result))
def handle_deviation_rate(data_id_list, selected_version_list, nid):
"""
处理显示偏差率的数据
:param data_id_list: 待处理的数据id列表
:param selected_version_list: 选择待显示版本
:param nid: 数据集id或者文件id
:return:
"""
data_list = []
for version_item in selected_version_list:
deviation_range = {0.0: 0, 0.1: 0, 0.2: 0, 0.3: 0, 0.4: 0, 0.5: 0, 0.6: 0, 0.7: 0, 0.8: 0, 0.9: 0, 1.0: 0}
data_id_and_deviation = {}
data_id_and_devation_dict = {}
version_id = models.Version.objects.values('id').get(version=version_item)['id']
try: # 批量查找
# dataset_id_list_obj = models.VersionToThcikness.objects.raw(
# "select id, data_id_id, deviation from thickness_versiontothcikness where data_id_id in %s and version_id=%s order by data_id_id" % (
# data_id_list, version_id))
dataset_id_list_obj = models.VersionToThcikness.objects.filter(data_id_id__in=data_id_list, version_id=version_id).order_by('data_id_id')
for data_item in dataset_id_list_obj:
data_id = data_item.data_id_id
deviation = data_item.deviation
data_id_and_deviation[data_id] = deviation
except:
pass
data_id_and_devation_dict[version_item] = data_id_and_deviation
for k_data_id, v_deviation_item in data_id_and_devation_dict[version_item].items():
judge_range = deviation_range.get(v_deviation_item)
if judge_range or judge_range == 0:
deviation_range[v_deviation_item] = deviation_range[v_deviation_item] + 1
else:
deviation_range[1.0] = deviation_range[1.0] + 1
# print(deviation_range)
cache.set('data_id_and_devation_dict_' + version_item + '_' + nid, data_id_and_devation_dict, 600)
cache.set('deviation_range_' + version_item + '_' + nid, deviation_range, 600)
deviation_num = [v for k, v in deviation_range.items()]
data_list.append({'name': version_item, 'data': deviation_num})
result = {'status': True, 'message': 'success', 'data_list': data_list}
return result
@csrf_exempt
def column_click_event_ajax(request, nid):
"""
柱状图点击事件
:param request:
:param nid: 数据集id或者文件id
:return:
"""
selected_data_id = []
version = request.POST.get('version')
deviation = request.POST.get('deviation')
data_id_and_devation_dict = cache.get('data_id_and_devation_dict_' + version + '_' + nid)
deviation_range = cache.get('deviation_range_' + version + '_' + nid)
if data_id_and_devation_dict and deviation_range: # 如果有缓存
selected_deviation = deviation.split('-')[0]
for k, v in data_id_and_devation_dict[version].items():
if float(selected_deviation) == 1.0:
if v >= 1.0:
selected_data_id.append(k)
if float(selected_deviation) == v and float(selected_deviation) != 1.0:
selected_data_id.append(k)
# 分页
result = pager(request, selected_data_id)
# 填充列表
data_list = []
for data_id in result['data_list']:
try: # 防止删除了文件中的某条数据,导致报错
true_thickness = models.DataFile.objects.values('true_thickness').get(nid=data_id)[
'true_thickness']
data_obj = models.DataFile.objects.get(nid=data_id)
run_alg_thickness_obj = data_obj.versiontothcikness_set.filter(
version__version=version).values('run_alg_thickness')
if run_alg_thickness_obj: # 如果该数据已跑算法,取出算法厚度值
run_alg_thickness = run_alg_thickness_obj[0]['run_alg_thickness']
else:
run_alg_thickness = None
data_list.append({'data_id': data_id, 'version': version, 'run_alg_thickness': run_alg_thickness,
'true_thickness': true_thickness})
except:
pass
result = {'status': True, 'message': 'have cache', 'data_list': data_list}
else:
result = {'status': False, 'message': 'no cache', 'data_list': []}
return HttpResponse(json.dumps(result))
@csrf_exempt
def submit_true_thickness(request):
"""
提交设置手测厚度
:param request:
:return:
"""
try:
true_thickness = float(request.POST.get('true_thickness'))
data_id = request.POST.get('nid')
prev_true_thickness = models.DataFile.objects.values('true_thickness').filter(nid=data_id)[0][
'true_thickness']
# 如果true_thickness更改,需要重跑算法
if prev_true_thickness != true_thickness:
print('重跑')
models.DataFile.objects.filter(nid=data_id).update(true_thickness=true_thickness)
version_obj = models.Version.objects.values('version').all().order_by('-id')[:5]
version_list = [version['version'] for version in version_obj]
data_id_list = [int(data_id), int(data_id)] # 后面用到的数据库查询语句where XX in (1,2),不能只有一个条件
handle_alg_process(data_id_list, version_list, only_run_alg=False)
result = {'status': True, 'message': '设置成功'}
except Exception as e:
result = {'status': False, 'message': '数据格式有误'}
print(e)
return HttpResponse(json.dumps(result))
class AlgAPI(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(AlgAPI, self).dispatch(request, *args, **kwargs)
# def get(self, request, *args, **kwargs):
# print(request.GET)
# response = {'status': True, 'data': ['V-2.0', 'V-3.0']}
# return JsonResponse(response)
@method_decorator(auth.alg_api_auth)
def post(self, request, *args, **kwargs):
"""
算法api:接收算法api发送过来的更新的算法信息
:param request:
:param args:
:param kwargs:
:return:
"""
alg_info = json.loads(request.body.decode('utf-8'))
print(alg_info)
result = {'status': True, 'message': None}
return JsonResponse(result)
def pager(request, data_obj):
"""
分页
:param request:
:param data_obj: 要分页的数据对象
:return:
"""
result = {'status': False, 'data_list': []}
limit = int(request.POST.get('limit')) # 每页显示的条数
curr_page = int(request.POST.get('curr_page'))
# print(limit)
# print(curr_page)
if curr_page == 1:
start_range = curr_page - 1
end_range = curr_page * limit
else:
start_range = (curr_page - 1) * limit
end_range = curr_page * limit
result['data_list'] = data_obj[start_range: end_range]
result['status'] = True
return result
def page_404(request):
"""
404页面
:param request:
:return:
"""
return render(request, 'thickness/404.html')
def clear_repeat_imgs():
"""
清理重复图片
:return:
"""
db_img_set = set()
tag_content_obj = models.DataTag.objects.values('tag_content').all()
for item in tag_content_obj:
if item['tag_content']:
img_path = eval(item['tag_content'])['img_path']
if img_path != '':
img_name = os.path.split(img_path)[1]
db_img_set.add(img_name)
local_img_set = set(os.listdir(Base_img_path))
repeat_imgs = local_img_set - db_img_set
for repeat_img in repeat_imgs:
os.remove(Base_img_path + repeat_img)
def export_result(num):
"""
不四舍五入保留1位小数
:param num: 要处理的浮点数
:return: 保留一位小数并且不丢失精度
"""
num_x, num_y = str(num).split('.')
num = float(num_x + '.' + num_y[0:1])
return num
def list_to_str_tuple(id_list):
"""
id列表转字符串形式元组
:param id_list: 要处理的列表
:return: 字符串形式的元组
"""
if len(id_list) == 1: # 只有一个查询id的情况下
id_list = [id_list[0], id_list[0]]
id_list = str(tuple(id_list))
return id_list
def get_most_true_thickness(file_id):
"""
一个文件中一般都是同一个手测的厚度值,但是也有可能有个别数据是其他手测厚度,
所以为了显示当前文件的手测厚度值,需要找出sample_list中出现最多的手测厚度数据,
以代表当前文件数据的手测厚度
:param file_id: 文件id
:return:
"""
true_thickness_obj = models.DataFile.objects.values('true_thickness').filter(file_name_id=file_id)
random.shuffle(list(true_thickness_obj))
sample_list = [item['true_thickness'] for item in true_thickness_obj[:20]]
# 找出sample_list中出现最多的数据
true_thickness = showmax(sample_list)
return true_thickness
def showmax(sample_list):
"""
找出出现次数最多的手测厚度值
:param sample_list:
:return:
"""
index1 = 0 # 记录出现次数最多的元素下标
max_num = 0 # 记录最大的元素出现次数
for i in range(len(sample_list)):
flag = 0 # 记录每一个元素出现的次数
for j in range(i + 1, len(sample_list)): # 遍历i之后的元素下标
if sample_list[j] == sample_list[i]:
flag += 1 # 每当发现与自己相同的元素,flag+1
if flag > max_num: # 如果此时元素出现的次数大于最大值,记录此时元素的下标
max_num = flag
index1 = i
return sample_list[index1]
def select_version():
"""
选择版本号,并判断版本号是否存在
:return:
"""
selected_version_obj = models.Version.objects.values('version').last()
if selected_version_obj:
selected_version = selected_version_obj['version']
else:
selected_version = 'None'
return selected_version
try:
"""定时初始化"""
scheduler.add_job(clear_repeat_imgs, 'cron', day_of_week='0,2,4', hour='10', id='cron_time')
scheduler.start()
except Exception as e:
print('err:', e)
scheduler.shutdown()
@csrf_exempt
def test(request):
t1 = time.time()
t2 = time.time()
print(t2 - t1)
return render(request, 'test.html')
|
from ED6ScenarioHelper import *
def main():
# 格兰赛尔
CreateScenaFile(
FileName = 'T4205 ._SN',
MapName = 'Grancel',
Location = 'T4205.x',
MapIndex = 1,
MapDefaultBGM = "ed60017",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'艾莉茜雅女王', # 9
'尤莉亚中尉', # 10
'约修亚', # 11
'奥利维尔', # 12
'金', # 13
'洛伦斯少尉', # 14
'洛伦斯残像', # 15
'洛伦斯残像', # 16
'洛伦斯残像', # 17
'洛伦斯残像', # 18
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH02010 ._CH', # 00
'ED6_DT07/CH02090 ._CH', # 01
'ED6_DT07/CH00010 ._CH', # 02
'ED6_DT07/CH00030 ._CH', # 03
'ED6_DT07/CH00070 ._CH', # 04
'ED6_DT07/CH00260 ._CH', # 05
'ED6_DT07/CH00262 ._CH', # 06
'ED6_DT07/CH00100 ._CH', # 07
'ED6_DT07/CH00101 ._CH', # 08
'ED6_DT07/CH00120 ._CH', # 09
'ED6_DT07/CH00121 ._CH', # 0A
'ED6_DT07/CH00140 ._CH', # 0B
'ED6_DT07/CH00141 ._CH', # 0C
'ED6_DT07/CH02200 ._CH', # 0D
'ED6_DT07/CH00264 ._CH', # 0E
'ED6_DT07/CH00104 ._CH', # 0F
'ED6_DT07/CH00124 ._CH', # 10
'ED6_DT07/CH00144 ._CH', # 11
'ED6_DT07/CH02460 ._CH', # 12
'ED6_DT07/CH02230 ._CH', # 13
'ED6_DT07/CH02240 ._CH', # 14
)
AddCharChipPat(
'ED6_DT07/CH02010P._CP', # 00
'ED6_DT07/CH02090P._CP', # 01
'ED6_DT07/CH00010P._CP', # 02
'ED6_DT07/CH00030P._CP', # 03
'ED6_DT07/CH00070P._CP', # 04
'ED6_DT07/CH00260P._CP', # 05
'ED6_DT07/CH00262P._CP', # 06
'ED6_DT07/CH00100P._CP', # 07
'ED6_DT07/CH00101P._CP', # 08
'ED6_DT07/CH00120P._CP', # 09
'ED6_DT07/CH00121P._CP', # 0A
'ED6_DT07/CH00140P._CP', # 0B
'ED6_DT07/CH00141P._CP', # 0C
'ED6_DT07/CH02200P._CP', # 0D
'ED6_DT07/CH00264P._CP', # 0E
'ED6_DT07/CH00104P._CP', # 0F
'ED6_DT07/CH00124P._CP', # 10
'ED6_DT07/CH00144P._CP', # 11
'ED6_DT07/CH02460P._CP', # 12
'ED6_DT07/CH02230P._CP', # 13
'ED6_DT07/CH02240P._CP', # 14
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 3,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 2,
ChipIndex = 0x2,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 3,
ChipIndex = 0x3,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 4,
ChipIndex = 0x4,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 13,
ChipIndex = 0xD,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
ScpFunction(
"Function_0_292", # 00, 0
"Function_1_301", # 01, 1
"Function_2_32E", # 02, 2
"Function_3_344", # 03, 3
"Function_4_576", # 04, 4
"Function_5_3151", # 05, 5
"Function_6_31F3", # 06, 6
)
def Function_0_292(): pass
label("Function_0_292")
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x0), scpexpr(EXPR_END)),
(100, "loc_29E"),
(SWITCH_DEFAULT, "loc_2B4"),
)
label("loc_29E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCC, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCC, 5)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_2B1")
OP_A2(0x666)
Event(0, 4)
label("loc_2B1")
Jump("loc_2B4")
label("loc_2B4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 2)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_2DE")
SetChrChipByIndex(0x0, 18)
SetChrChipByIndex(0x1, 19)
SetChrChipByIndex(0x138, 20)
SetChrFlags(0x0, 0x1000)
SetChrFlags(0x1, 0x1000)
SetChrFlags(0x138, 0x1000)
label("loc_2DE")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 4)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCD, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_300")
ClearChrFlags(0x8, 0x80)
SetChrPos(0x8, 2170, 12000, 62700, 0)
label("loc_300")
Return()
# Function_0_292 end
def Function_1_301(): pass
label("Function_1_301")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCE, 0)), scpexpr(EXPR_END)), "loc_31D")
SetMapFlags(0x2000000)
OP_4F(0x1, (scpexpr(EXPR_PUSH_LONG, 0x4A), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Event(0, 6)
Jump("loc_32D")
label("loc_31D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 4)), scpexpr(EXPR_END)), "loc_32D")
OP_4F(0x1, (scpexpr(EXPR_PUSH_LONG, 0x54), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
label("loc_32D")
Return()
# Function_1_301 end
def Function_2_32E(): pass
label("Function_2_32E")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_343")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("Function_2_32E")
label("loc_343")
Return()
# Function_2_32E end
def Function_3_344(): pass
label("Function_3_344")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xDF, 0)), scpexpr(EXPR_END)), "loc_3CC")
TurnDirection(0x8, 0x102, 0)
ChrTalk(
0x8,
(
"#090F艾丝蒂尔、约修亚,\x01",
"我好久没有像今天这样开心了。\x02\x03",
"可以的话,以后一定要再来\x01",
"和我一起喝喝茶、谈谈心哦。\x02",
)
)
CloseMessageWindow()
Jump("loc_572")
label("loc_3CC")
OP_A2(0x6F8)
TurnDirection(0x8, 0x138, 0)
OP_62(0x8, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(1000)
ChrTalk(
0x8,
(
"#097F咦……\x02\x03",
"#090F约修亚刚才竟然也能来到这里,\x01",
"我原本觉得很不可思议……\x02\x03",
"不过现在看来,我算是明白了。\x02\x03",
"#091F呵呵,特务兵们\x01",
"会被骗过去也是很正常的呢。\x02\x03",
"#090F……艾丝蒂尔、约修亚,\x01",
"我好久没有像今天这样开心了。\x02\x03",
"可以的话,以后一定要再来\x01",
"和我一起喝喝茶、谈谈心哦。\x02\x03",
"下次也把科洛蒂娅和卡西乌斯上校\x01",
"他们一起叫来聊聊天吧。\x02",
)
)
CloseMessageWindow()
label("loc_572")
TalkEnd(0xFE)
Return()
# Function_3_344 end
def Function_4_576(): pass
label("Function_4_576")
EventBegin(0x0)
OP_28(0x4E, 0x1, 0x8)
OP_6D(2240, 12000, 50930, 0)
OP_67(0, 11000, -10000, 0)
OP_6B(3000, 0)
OP_6C(135000, 0)
OP_6E(255, 0)
ClearChrFlags(0x8, 0x80)
ClearChrFlags(0xD, 0x80)
SetChrPos(0x8, 2170, 12000, 62700, 0)
SetChrPos(0xD, 4080, 12000, 64099, 180)
SetChrPos(0x101, 1870, 12000, 45230, 0)
SetChrPos(0x105, 1870, 12000, 45230, 0)
SetChrPos(0x103, 1870, 12000, 45230, 0)
SetChrChipByIndex(0x101, 7)
SetChrChipByIndex(0x103, 9)
SetChrChipByIndex(0x105, 11)
OP_9F(0x105, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
OP_9F(0x101, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
OP_9F(0x103, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
def lambda_650():
OP_6D(1570, 12000, 55660, 3000)
ExitThread()
QueueWorkItem(0x101, 3, lambda_650)
def lambda_668():
OP_9F(0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F4)
ExitThread()
QueueWorkItem(0x105, 2, lambda_668)
def lambda_67A():
OP_8E(0xFE, 0x730, 0x2EE0, 0xD566, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x105, 1, lambda_67A)
Sleep(500)
def lambda_69A():
OP_9F(0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F4)
ExitThread()
QueueWorkItem(0x101, 2, lambda_69A)
def lambda_6AC():
OP_8E(0xFE, 0xBCC, 0x2EE0, 0xD0AC, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x101, 1, lambda_6AC)
Sleep(500)
def lambda_6CC():
OP_9F(0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F4)
ExitThread()
QueueWorkItem(0x103, 2, lambda_6CC)
def lambda_6DE():
OP_8E(0xFE, 0x26C, 0x2EE0, 0xD19C, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x103, 1, lambda_6DE)
WaitChrThread(0x101, 0x3)
ChrTalk(
0x105,
"#040F祖母大人,您没事吧?\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F我们来救您了,女王陛下!\x02",
)
CloseMessageWindow()
TurnDirection(0x8, 0x105, 400)
ChrTalk(
0x8,
(
"#090F科洛蒂娅……\x01",
"还有艾丝蒂尔……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xD,
(
"总算来了……\x01",
"我已经等的有些不耐烦了。\x02",
)
)
CloseMessageWindow()
def lambda_7CC():
OP_8E(0xD, 0xAD2, 0x2EE0, 0xF3E8, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xD, 2, lambda_7CC)
def lambda_7E7():
label("loc_7E7")
TurnDirection(0xD, 0x105, 0)
OP_48()
Jump("loc_7E7")
QueueWorkItem2(0xD, 1, lambda_7E7)
def lambda_7F8():
OP_6D(2180, 13000, 59350, 3000)
ExitThread()
QueueWorkItem(0x101, 2, lambda_7F8)
def lambda_810():
OP_6C(45000, 3000)
ExitThread()
QueueWorkItem(0x101, 3, lambda_810)
def lambda_820():
OP_6E(321, 3000)
ExitThread()
QueueWorkItem(0x105, 2, lambda_820)
def lambda_830():
OP_67(0, 6360, -10000, 3000)
ExitThread()
QueueWorkItem(0x105, 3, lambda_830)
Sleep(800)
def lambda_84D():
OP_8F(0xFE, 0x88E, 0x2EE0, 0xF744, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0x8, 2, lambda_84D)
Sleep(200)
def lambda_86D():
OP_8E(0xFE, 0x83E, 0x2EE0, 0xDDC2, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x105, 1, lambda_86D)
Sleep(110)
def lambda_88D():
OP_8E(0xFE, 0xD20, 0x2EE0, 0xDFB6, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x101, 1, lambda_88D)
Sleep(100)
def lambda_8AD():
OP_8E(0xFE, 0x280, 0x2EE0, 0xE074, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x103, 1, lambda_8AD)
WaitChrThread(0x101, 0x2)
ChrTalk(
0x101,
(
"#000F洛、洛伦斯少尉!\x01",
"你怎么会在这里……\x02",
)
)
CloseMessageWindow()
OP_44(0xD, 0xFF)
ChrTalk(
0xD,
(
"#280F呵呵……\x01",
"我的任务是保护女王陛下。\x02\x03",
"出现在这里也没什么不可思议的吧?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F别开玩笑了!\x02\x03",
"不管你的实力有多强,\x01",
"我们这边可是有三个人的!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x103,
(
"#020F怎么会,这家伙……\x01",
"好强的压迫感。\x02\x03",
"到底是谁?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F他是情报部——特务部队队长,\x01",
"洛伦斯·博格少尉!\x02\x03",
"过去是猎兵出身,\x01",
"后来被上校招入麾下!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xD,
(
"#280F哦,竟然调查到这样的程度了。\x02\x03",
"不愧是S级游击士\x01",
"卡西乌斯·布莱特的女儿。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F!!!\x02",
)
CloseMessageWindow()
ChrTalk(
0x103,
(
"#020F居然连从未向外界公布过的\x01",
"老师的级别也知道……\x02\x03",
"这家伙,不是个等闲之辈啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xD,
(
"#280F呵呵……\x01",
"你的事情我也很清楚。\x02\x03",
"级别C、外号『银闪』的\x01",
"雪拉扎德·哈维。\x02\x03",
"近日似乎就要升成级别B了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x103,
"#020F……………………………\x02",
)
CloseMessageWindow()
ChrTalk(
0x105,
(
"#040F对、对不起……\x01",
"请把祖母交还给我好吗。\x02\x03",
"如果你只是被\x01",
"上校所雇佣的话,\x01",
"现在已经没有必要再战斗了啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xD,
(
"#280F呵呵,驱动着这个世界的,\x01",
"并非只有眼睛能够看得到的东西。\x02\x03",
"就像只看结晶回路盘是无法\x01",
"知晓齿轮的运动一样……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x105,
"#040F咦……\x02",
)
CloseMessageWindow()
ChrTalk(
0xD,
(
"#280F注意听好了,科洛蒂娅公主。\x02\x03",
"所谓国家,与巨大而复杂\x01",
"的导力器是相似的。\x02\x03",
"将人的力量像结晶回路一样充分调动起来的,\x01",
"就是所谓组织、制度这样的齿轮……\x02\x03",
"而将其包裹着的就是国土这样的框架。\x02\x03",
"对于这些知识,如果不能掌握,\x01",
"那你就没有作为女王的资格。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x105,
"#040F!?\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#090F非常有趣的比喻啊。\x02\x03",
"而且……\x01",
"的确有可能如你所说的那般。\x02\x03",
"真没有想到在这样的地方\x01",
"竟然能听到国家论……\x02",
)
)
CloseMessageWindow()
TurnDirection(0xD, 0x8, 400)
ChrTalk(
0xD,
(
"#280F呵呵……刚才失礼了。\x01",
"这些说法对于陛下您来说是没有必要的。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F虽然不太明白是怎么回事儿……\x02\x03",
"不过看起来,你好像\x01",
"没有释放女王陛下的打算了?\x02",
)
)
CloseMessageWindow()
TurnDirection(0xD, 0x101, 400)
ChrTalk(
0xD,
"#280F就算如此……你们想要怎样?\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F那还用说……\x01",
"拼尽全力也要救回女王陛下!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x103,
(
"#020F是啊……\x01",
"既然已经到了这里,就没有理由后退了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x105,
(
"#040F……虽然从你身上\x01",
"感觉不到什么敌意……\x02\x03",
"但是为了将祖母大人救回来,\x01",
"我会向你挥起手中的剑的!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xD,
"#280F哼哼,不错啊……\x02",
)
CloseMessageWindow()
TurnDirection(0xD, 0x8, 400)
OP_8E(0xD, 0x9E2, 0x2EE0, 0xF4E2, 0x7D0, 0x0)
def lambda_121E():
OP_90(0xFE, 0x0, 0x0, 0x7D0, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0xD, 1, lambda_121E)
def lambda_1239():
OP_91(0xFE, 0x0, 0x0, 0x7D0, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_1239)
def lambda_1254():
OP_67(0, 5500, -10000, 3000)
ExitThread()
QueueWorkItem(0x101, 3, lambda_1254)
def lambda_126C():
OP_6E(295, 3000)
ExitThread()
QueueWorkItem(0x105, 2, lambda_126C)
WaitChrThread(0x8, 0x1)
SetChrFlags(0xD, 0x20)
OP_51(0xD, 0x8, (scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0xD, 6)
TurnDirection(0xD, 0x105, 400)
def lambda_129D():
OP_99(0xFE, 0x0, 0xB, 0xBB8)
ExitThread()
QueueWorkItem(0xD, 1, lambda_129D)
OP_96(0xD, 0x8AC, 0x2EE0, 0xEEFC, 0x190, 0x1B58)
WaitChrThread(0xD, 0x1)
WaitChrThread(0x101, 0x3)
ChrTalk(
0xD,
"#280F来……我当你们的对手。\x02",
)
CloseMessageWindow()
Sleep(500)
Battle(0x39A, 0x0, 0x0, 0x0, 0xFF)
EventBegin(0x0)
SetChrPos(0x101, 3360, 12000, 57270, 0)
SetChrPos(0x105, 2110, 12000, 56770, 0)
SetChrPos(0x103, 640, 12000, 57460, 0)
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x3), scpexpr(EXPR_END)),
(0, "loc_1348"),
(1, "loc_1785"),
(SWITCH_DEFAULT, "loc_18D2"),
)
label("loc_1348")
OP_28(0x4E, 0x1, 0x10)
OP_51(0xD, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0xD, 14)
SetChrPos(0xD, 2220, 12000, 61180, 180)
OP_2B(0x4D, 0x3)
ChrTalk(
0xD,
(
"#280F……真令人吃惊啊……\x01",
"没想到你们可以达到这种程度。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F呼呼……\x02\x03",
"你、你这家伙!\x01",
"当初决赛的时候没有尽全力吧!?\x02\x03",
"和那时相比强悍得判若两人!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x103,
(
"#020F竟、竟然可以\x01",
"打败这样的怪物……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x105,
"#040F的、的确难以置信啊……\x02",
)
CloseMessageWindow()
ChrTalk(
0xD,
(
"#280F艾丝蒂尔·布莱特……\x01",
"刚才对你太过轻视,在下深表歉意。\x02\x03",
"你如果能继续这么走下去……\x01",
"达到你父亲那种境界也未尝不可。 \x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F哎……?\x02",
)
CloseMessageWindow()
ChrTalk(
0xD,
"#280F不过……现在还有一定差距。\x02",
)
CloseMessageWindow()
def lambda_1554():
OP_6D(2400, 12000, 57540, 2000)
ExitThread()
QueueWorkItem(0x8, 2, lambda_1554)
OP_99(0xD, 0x3, 0x0, 0x7D0)
OP_51(0xD, 0x8, (scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0xD, 6)
OP_9F(0xE, 0xFF, 0xFF, 0xFF, 0xC8, 0x0)
OP_9F(0xF, 0xFF, 0xFF, 0xFF, 0x96, 0x0)
OP_9F(0x10, 0xFF, 0xFF, 0xFF, 0x64, 0x0)
OP_9F(0x11, 0xFF, 0xFF, 0xFF, 0x32, 0x0)
SetChrPos(0xE, 2220, 12000, 61180, 180)
SetChrPos(0xF, 2220, 12000, 61180, 180)
SetChrPos(0x10, 2220, 12000, 61180, 180)
SetChrPos(0x11, 2220, 12000, 61180, 180)
OP_43(0xD, 0x1, 0x0, 0x5)
Sleep(70)
OP_43(0xE, 0x1, 0x0, 0x5)
Sleep(70)
OP_43(0xF, 0x1, 0x0, 0x5)
Sleep(70)
OP_43(0x10, 0x1, 0x0, 0x5)
Sleep(70)
OP_43(0x11, 0x1, 0x0, 0x5)
OP_A6(0x0)
def lambda_162F():
OP_6C(24000, 1000)
ExitThread()
QueueWorkItem(0x8, 3, lambda_162F)
OP_A6(0x0)
def lambda_1642():
OP_67(0, 6730, -10000, 1000)
ExitThread()
QueueWorkItem(0x8, 1, lambda_1642)
OP_A6(0x2)
PlayEffect(0x8, 0xFF, 0xFF, 2360, 14000, 57260, 0, 0, 0, 2400, 2400, 2400, 0xFF, 0, 0, 0, 0)
OP_51(0x105, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0x105, 17)
TurnDirection(0x105, 0xD, 0)
def lambda_16A9():
OP_94(0x1, 0xFE, 0xB4, 0x3E8, 0x1B58, 0x0)
ExitThread()
QueueWorkItem(0x105, 2, lambda_16A9)
def lambda_16BF():
OP_99(0xFE, 0x0, 0x3, 0x7D0)
ExitThread()
QueueWorkItem(0x105, 1, lambda_16BF)
OP_51(0x103, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0x103, 16)
TurnDirection(0x103, 0xD, 0)
def lambda_16E6():
OP_94(0x1, 0xFE, 0xB4, 0x3E8, 0x1B58, 0x0)
ExitThread()
QueueWorkItem(0x103, 2, lambda_16E6)
def lambda_16FC():
OP_99(0xFE, 0x0, 0x3, 0x7D0)
ExitThread()
QueueWorkItem(0x103, 1, lambda_16FC)
OP_51(0x101, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0x101, 15)
TurnDirection(0x101, 0xD, 0)
def lambda_1723():
OP_94(0x1, 0xFE, 0xB4, 0x3E8, 0x1B58, 0x0)
ExitThread()
QueueWorkItem(0x101, 2, lambda_1723)
def lambda_1739():
OP_99(0xFE, 0x0, 0x3, 0x7D0)
ExitThread()
QueueWorkItem(0x101, 1, lambda_1739)
Sleep(500)
ChrTalk(
0x101,
"#000F啊啊!?\x02",
)
CloseMessageWindow()
ChrTalk(
0x103,
"#020F呜……!\x02",
)
CloseMessageWindow()
ChrTalk(
0x105,
"#040F呀啊……!\x02",
)
CloseMessageWindow()
Jump("loc_18D2")
label("loc_1785")
OP_28(0x4E, 0x1, 0x20)
SetChrChipByIndex(0x105, 17)
SetChrChipByIndex(0x103, 16)
SetChrChipByIndex(0x101, 15)
ChrTalk(
0xD,
(
"#280F……真让人失望。\x02\x03",
"原本还以为\x01",
"可以让我有些起劲呢……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F为、为什么……\x02\x03",
"和当初的决赛时完全不同……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x103,
(
"#020F……可能那时他\x01",
"还没有尽全力吧……\x02\x03",
"这样强大的力量……\x01",
"或许已经可以和老师匹敌了……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x105,
(
"#040F祖母大人……\x01",
"……对不起……\x02",
)
)
CloseMessageWindow()
Jump("loc_18D2")
label("loc_18D2")
ChrTalk(
0x8,
(
"#090F科洛蒂娅!\x01",
"艾丝蒂尔!\x02",
)
)
CloseMessageWindow()
def lambda_18F4():
OP_8E(0xFE, 0x6F4, 0x2EE0, 0xF1EA, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_18F4)
Sleep(200)
SetChrFlags(0xE, 0x80)
SetChrFlags(0xF, 0x80)
SetChrFlags(0x10, 0x80)
SetChrFlags(0x11, 0x80)
TurnDirection(0xD, 0x8, 400)
def lambda_192F():
OP_6D(2260, 12000, 60490, 1000)
ExitThread()
QueueWorkItem(0x101, 1, lambda_192F)
def lambda_1947():
label("loc_1947")
TurnDirection(0xFE, 0x8, 0)
OP_48()
Jump("loc_1947")
QueueWorkItem2(0xD, 1, lambda_1947)
OP_96(0xD, 0xB0E, 0x2EE0, 0xF1FE, 0x3E8, 0xFA0)
TurnDirection(0xD, 0x8, 400)
ChrTalk(
0xD,
(
"#280F陛下,她们只是\x01",
"暂时不能动了而已。\x02\x03",
"并没有生命危险。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"#090F……………………………\x02",
)
CloseMessageWindow()
ChrTalk(
0xD,
"#280F哼哼,不错啊……\x02",
)
CloseMessageWindow()
ClearChrFlags(0xD, 0x20)
OP_51(0xD, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0xD, 13)
OP_44(0xD, 0xFF)
TurnDirection(0xD, 0x105, 400)
ChrTalk(
0xD,
(
"#280F那么……\x01",
"就让我好好活动一下吧。\x02\x03",
"恕我失礼……\x01",
"让我摘下这东西。\x02",
)
)
CloseMessageWindow()
TurnDirection(0x8, 0xD, 400)
TurnDirection(0x105, 0xD, 0)
OP_99(0x105, 0x3, 0x0, 0x3E8)
SetChrChipByIndex(0x105, 11)
TurnDirection(0x101, 0xD, 0)
OP_99(0x101, 0x3, 0x0, 0x3E8)
SetChrChipByIndex(0x101, 7)
TurnDirection(0x103, 0xD, 0)
OP_99(0x103, 0x3, 0x0, 0x3E8)
SetChrChipByIndex(0x103, 9)
ChrTalk(
0x101,
"#000F银、银发……\x02",
)
CloseMessageWindow()
ChrTalk(
0x103,
(
"#020F不……\x01",
"是苍金色的……\x02\x03",
"这家伙……\x01",
"看起来似乎是出生在北方的。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xD,
(
"#280F呵呵……\x01",
"的确是北方没错。\x02\x03",
"不过离这里\x01",
"也不算很遥远。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x105,
"#040F咦……\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#090F那双眼眸……\x01",
"为何会有那么深邃的颜色呢。\x02\x03",
"明明还这么年轻……\x01",
"可是却好像经历了巨大的苦难啊。\x02",
)
)
CloseMessageWindow()
TurnDirection(0xD, 0x8, 400)
ChrTalk(
0xD,
(
"#280F……………………………\x02\x03",
"女王啊,您是没有\x01",
"怜悯我的资格的。\x02\x03",
"对于知道『哈梅尔』\x01",
"这个名字的您来说……\x02",
)
)
CloseMessageWindow()
OP_62(0x8, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
ChrTalk(
0x8,
"#090F哎……\x02",
)
CloseMessageWindow()
TurnDirection(0xD, 0x105, 400)
ChrTalk(
0xD,
(
"#280F好了,差不多是时候了。\x02\x03",
"如你们所愿,\x01",
"我将女王予以归还。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F啊……!?\x02",
)
CloseMessageWindow()
ChrTalk(
0xD,
(
"#280F如果想要阻止上校,\x01",
"就赶快前去地下吧。\x02\x03",
"虽然可能已经来不及了……\x02\x03",
"不过还可以抑制住\x01",
"不必要的伤亡。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#090F地下……\x02\x03",
"难道是说从那个地方\x01",
"降到地下的吗?\x02",
)
)
CloseMessageWindow()
TurnDirection(0xD, 0x8, 400)
ChrTalk(
0xD,
(
"#280F哼哼……现在的您对其中的含义,\x01",
"应该清楚的不能再清楚了吧。\x02\x03",
"为他们指引前进的道路吧。\x02\x03",
"……那么,再会了。\x02",
)
)
CloseMessageWindow()
def lambda_1EC1():
label("loc_1EC1")
TurnDirection(0xFE, 0xD, 0)
OP_48()
Jump("loc_1EC1")
QueueWorkItem2(0x8, 1, lambda_1EC1)
def lambda_1ED2():
OP_6D(1670, 12000, 63950, 2000)
ExitThread()
QueueWorkItem(0x101, 3, lambda_1ED2)
def lambda_1EEA():
OP_6E(347, 2000)
ExitThread()
QueueWorkItem(0x101, 2, lambda_1EEA)
def lambda_1EFA():
OP_67(0, 6160, -10000, 2000)
ExitThread()
QueueWorkItem(0xD, 3, lambda_1EFA)
OP_8E(0xD, 0xBFE, 0x2EE0, 0xFC26, 0x1B58, 0x0)
OP_8E(0xD, 0x906, 0x2EE0, 0x1054A, 0x1B58, 0x0)
OP_96(0xD, 0x5E6, 0x3138, 0x108C4, 0x320, 0x1B58)
OP_96(0xD, 0x51E, 0xFFFFD120, 0x12066, 0x3E8, 0x1B58)
WaitChrThread(0xD, 0x2)
OP_62(0x101, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(200)
OP_62(0x103, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(200)
OP_62(0x105, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(200)
OP_62(0x8, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(200)
ChrTalk(
0x101,
"#000F怎么!?\x02",
)
CloseMessageWindow()
OP_51(0x101, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0x101, 65535)
def lambda_1FFD():
OP_8E(0xFE, 0xEEC, 0x2EE0, 0x1064E, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x101, 1, lambda_1FFD)
ChrTalk(
0x103,
"#020F来、来真的!?\x02",
)
CloseMessageWindow()
def lambda_2031():
OP_6D(2270, 12000, 66180, 1000)
ExitThread()
QueueWorkItem(0x101, 3, lambda_2031)
def lambda_2049():
OP_6C(129000, 2000)
ExitThread()
QueueWorkItem(0xD, 2, lambda_2049)
Sleep(200)
OP_51(0x103, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0x103, 65535)
def lambda_206E():
OP_8E(0xFE, 0x2EE, 0x2EE0, 0x106DA, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x103, 1, lambda_206E)
Sleep(500)
OP_51(0x105, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrChipByIndex(0x105, 65535)
def lambda_209E():
OP_8E(0xFE, 0x6EA, 0x2EE0, 0xED4E, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0x105, 1, lambda_209E)
WaitChrThread(0x105, 0x1)
def lambda_20BE():
TurnDirection(0xFE, 0x105, 400)
ExitThread()
QueueWorkItem(0x8, 1, lambda_20BE)
WaitChrThread(0x101, 0x1)
ChrTalk(
0x101,
(
"#000F不、不见了……\x02\x03",
"落到湖里去了……?\x02",
)
)
CloseMessageWindow()
WaitChrThread(0x103, 0x1)
ChrTalk(
0x103,
(
"#020F这么说来……\x01",
"可是湖面并没有波痕……\x02\x03",
"那个男的,究竟……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x105,
(
"#040F祖母大人……\x01",
"您没有受伤吧!?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#090F没有呢,科洛蒂娅。\x01",
"他并没有伤害我。\x02\x03",
"话说回来……\x02",
)
)
CloseMessageWindow()
ClearChrFlags(0xB, 0x80)
ClearChrFlags(0xA, 0x80)
ClearChrFlags(0xC, 0x80)
ClearChrFlags(0x9, 0x80)
SetChrFlags(0xB, 0x40)
SetChrFlags(0xA, 0x40)
SetChrFlags(0xC, 0x40)
SetChrFlags(0x9, 0x40)
SetChrPos(0xB, 1870, 12000, 45230, 0)
SetChrPos(0xA, 1870, 12000, 45230, 0)
SetChrPos(0xC, 1870, 12000, 45230, 0)
SetChrPos(0x9, 1870, 12000, 45230, 0)
OP_9F(0xA, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
OP_9F(0xB, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
OP_9F(0xC, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
OP_9F(0x9, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
ChrTalk(
0xA,
"艾丝蒂尔!\x02",
)
CloseMessageWindow()
def lambda_2271():
TurnDirection(0xFE, 0xA, 400)
ExitThread()
QueueWorkItem(0x101, 1, lambda_2271)
def lambda_227F():
TurnDirection(0xFE, 0xA, 400)
ExitThread()
QueueWorkItem(0x103, 1, lambda_227F)
def lambda_228D():
TurnDirection(0xFE, 0xA, 400)
ExitThread()
QueueWorkItem(0x105, 1, lambda_228D)
def lambda_229B():
OP_6D(2540, 12000, 61390, 2000)
ExitThread()
QueueWorkItem(0x101, 2, lambda_229B)
def lambda_22B3():
OP_9F(0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F4)
ExitThread()
QueueWorkItem(0xA, 2, lambda_22B3)
def lambda_22C5():
OP_8E(0xFE, 0xD2A, 0x2EE0, 0xE394, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_22C5)
Sleep(500)
def lambda_22E5():
OP_9F(0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F4)
ExitThread()
QueueWorkItem(0x9, 2, lambda_22E5)
def lambda_22F7():
OP_8E(0xFE, 0x7D0, 0x2EE0, 0xE876, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_22F7)
Sleep(500)
def lambda_2317():
label("loc_2317")
TurnDirection(0xFE, 0x101, 0)
OP_48()
Jump("loc_2317")
QueueWorkItem2(0xB, 1, lambda_2317)
def lambda_2328():
OP_9F(0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F4)
ExitThread()
QueueWorkItem(0xB, 2, lambda_2328)
def lambda_233A():
OP_8E(0xFE, 0x816, 0x2EE0, 0xE204, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0xB, 3, lambda_233A)
Sleep(500)
def lambda_235A():
label("loc_235A")
TurnDirection(0xFE, 0x101, 0)
OP_48()
Jump("loc_235A")
QueueWorkItem2(0xC, 1, lambda_235A)
def lambda_236B():
OP_9F(0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F4)
ExitThread()
QueueWorkItem(0xC, 2, lambda_236B)
def lambda_237D():
OP_8E(0xFE, 0x2C6, 0x2EE0, 0xE448, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0xC, 3, lambda_237D)
def lambda_2398():
OP_8E(0xFE, 0xDF2, 0x2EE0, 0xE826, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x101, 1, lambda_2398)
Sleep(500)
def lambda_23B8():
OP_8E(0xFE, 0x96, 0x2EE0, 0xEBB4, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x103, 1, lambda_23B8)
def lambda_23D3():
OP_6D(2140, 12000, 59300, 2000)
ExitThread()
QueueWorkItem(0x101, 2, lambda_23D3)
def lambda_23EB():
OP_6E(307, 2000)
ExitThread()
QueueWorkItem(0x101, 3, lambda_23EB)
SetChrFlags(0x105, 0x40)
OP_8E(0x105, 0xB22, 0x2EE0, 0xF000, 0x7D0, 0x0)
TurnDirection(0x105, 0x9, 400)
WaitChrThread(0x103, 0x1)
def lambda_2420():
TurnDirection(0xFE, 0xA, 400)
ExitThread()
QueueWorkItem(0x103, 1, lambda_2420)
WaitChrThread(0x101, 0x1)
ChrTalk(
0x101,
(
"#000F约修亚!?\x02\x03",
"太好了,你平安无事!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#010F艾丝蒂尔你那边才是……\x02\x03",
"因为理查德上校和洛伦斯少尉\x01",
"都没有在城内,我很是有些担心呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F那个戴红色头盔的,\x01",
"倒是刚才还在这里……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
"#010F咦……!?\x02",
)
CloseMessageWindow()
ChrTalk(
0x103,
(
"#020F他撞破窗户\x01",
"一跃而下,逃走了。\x02\x03",
"真是一个超越常理的怪物啊……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#010F哦,原来是这样啊……\x02\x03",
"真是太好了……\x01",
"你能够平安无事……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F约、约修亚……\x02",
)
CloseMessageWindow()
OP_8E(0x9, 0x730, 0x2EE0, 0xEBD2, 0x7D0, 0x0)
ChrTalk(
0x9,
"#170F陛下……幸好您没事……\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#090F尤莉亚中尉……\x01",
"能再见到你我真的很愉快呢。\x02\x03",
"而且,大家……\x01",
"我对你们的感激也是一言难尽啊。\x02",
)
)
CloseMessageWindow()
def lambda_2672():
TurnDirection(0xFE, 0x8, 400)
ExitThread()
QueueWorkItem(0x101, 1, lambda_2672)
def lambda_2680():
TurnDirection(0xFE, 0x8, 400)
ExitThread()
QueueWorkItem(0x103, 1, lambda_2680)
def lambda_268E():
TurnDirection(0xFE, 0x8, 400)
ExitThread()
QueueWorkItem(0x105, 1, lambda_268E)
def lambda_269C():
TurnDirection(0xFE, 0x8, 400)
ExitThread()
QueueWorkItem(0xA, 1, lambda_269C)
def lambda_26AA():
TurnDirection(0xFE, 0x8, 400)
ExitThread()
QueueWorkItem(0xB, 1, lambda_26AA)
def lambda_26B8():
TurnDirection(0xFE, 0x8, 400)
ExitThread()
QueueWorkItem(0xC, 1, lambda_26B8)
ChrTalk(
0xB,
(
"#030F呵呵,女王陛下。\x01",
"您过奖了,我感到非常荣幸。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xC,
(
"#070F能够为您服务,我已深感荣幸了。\x02\x03",
"不过现在还\x01",
"没有到结束的时候。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"#170F虽然已镇压住了城内的特务兵,\x01",
"但又传来了不利的消息。\x02\x03",
"各地的正规军部队\x01",
"正朝着王都方向攻来……\x02\x03",
"很可能是被\x01",
"情报部给操控了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"#090F是这样的啊……\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"#170F虽然很抱歉,不过已经没时间了。\x02\x03",
"务必请您乘坐\x01",
"飞行艇从这儿离开吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#090F不行……我办不到。\x02\x03",
"话说回来……\x01",
"可怕的事情就要发生了。\x02\x03",
"无论如何也要\x01",
"阻止理查德上校。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
"#170F怎、怎么回事呢?\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#090F昨夜,我和上校谈话之后, \x01",
"总算明白了其真正的目的。\x02\x03",
"他企图将『辉之环』\x01",
"据为己有。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F『辉之环』……\x02\x03",
"好、好像在哪里\x01",
"好像在哪儿听到过……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#010F……女神赐予古代人的\x01",
"『七至宝』之一……\x02\x03",
"可以支配世间一切的\x01",
"传说中的古代遗迹。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F哦,是亚鲁瓦教授说过的……\x02\x03",
"可是那只是教会\x01",
"流传下来的故事啊?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"#090F………………………………\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F哎……?\x02",
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"#030F嗯,应该存在吧?\x02\x03",
"在这个利贝尔王国的某处。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#090F古老的王家传承如下。\x02\x03",
"『辉之环,总有一天会带来灾难,\x01",
"将人类之子的灵魂与炼狱相接。』\x02\x03",
"『我等,为了作为人而存在,\x01",
"在昏暗的狭间将其封印……』\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xC,
(
"#070F『将人类之子的灵魂与炼狱相接』……\x02\x03",
"实在是……令人感到可怕的说法啊。 \x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#090F这些箴言,作为戒律\x01",
"从一代代的国王那里相传至今。\x02\x03",
"也许那个被称作『辉之环』的\x01",
"东西具备这样的危险性,因此\x01",
"王家的祖先才将其封印起来了。\x02\x03",
"而且,在王都的地下\x01",
"还检测出了巨大的导力反应……\x02\x03",
"如果把这两者结合起来考虑的话……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#010F王都的地下\x01",
"封印着『辉之环』……\x02\x03",
"这么想是理所当然的。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#090F是啊……\x01",
"上校肯定也是这么推断的。\x02\x03",
"『辉之环』究竟是什么样的东西,\x01",
"这一点并没有被传承下来……\x02\x03",
"一旦其被人唤醒,\x01",
"说不定会发生十分可怕的事情。\x02\x03",
"甚至有可能和过去所发生的,\x01",
"传说中的『大崩坏』匹敌……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x105,
"#040F怎么……怎么会这样……\x02",
)
CloseMessageWindow()
ChrTalk(
0x103,
"#020F没有想到,竟然会……\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F女王陛下!\x02\x03",
"洛伦斯少尉曾说过\x01",
"『到地下去吧』这样的话……\x02\x03",
"那是代表什么意思呢?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#090F在这个格兰赛尔城里\x01",
"有一间奇怪的屋子……\x02\x03",
"是一个什么东西也不保管,\x01",
"而且从很久以前就禁止入内的地方……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x105,
"#040F啊……\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
"#170F是宝物库吧!?\x02",
)
CloseMessageWindow()
OP_28(0x4E, 0x1, 0x40)
OP_28(0x4E, 0x1, 0x80)
OP_28(0x4E, 0x1, 0x100)
OP_28(0x4E, 0x1, 0x200)
OP_28(0x4E, 0x1, 0x400)
OP_28(0x4F, 0x4, 0x2)
OP_28(0x4F, 0x4, 0x4)
OP_A2(0x3FB)
NewScene("ED6_DT01/T4240 ._SN", 100, 0, 0)
IdleLoop()
Return()
# Function_4_576 end
def Function_5_3151(): pass
label("Function_5_3151")
SetChrFlags(0xFE, 0x40)
SetChrFlags(0xFE, 0x20)
SetChrFlags(0xFE, 0x4)
ClearChrFlags(0xFE, 0x80)
OP_96(0xFE, 0x83E, 0x2EE0, 0xEB28, 0x190, 0x1770)
SetChrChipByIndex(0xFE, 6)
def lambda_3187():
OP_99(0xFE, 0x0, 0x1, 0x7D0)
ExitThread()
QueueWorkItem(0xFE, 3, lambda_3187)
OP_96(0xFE, 0x79E, 0x3A98, 0xE6AA, 0xBB8, 0x1F40)
OP_A2(0x0)
Sleep(300)
OP_A2(0x1)
def lambda_31B9():
OP_99(0xFE, 0x2, 0x5, 0x7D0)
ExitThread()
QueueWorkItem(0xFE, 3, lambda_31B9)
OP_96(0xFE, 0x744, 0x2EE0, 0xE204, 0x0, 0x3A98)
OP_A2(0x2)
Sleep(1000)
def lambda_31E8():
OP_99(0xFE, 0x3, 0xB, 0xFA0)
ExitThread()
QueueWorkItem(0xFE, 3, lambda_31E8)
Return()
# Function_5_3151 end
def Function_6_31F3(): pass
label("Function_6_31F3")
OP_1F(0x50, 0xC8)
Return()
# Function_6_31F3 end
SaveToFile()
Try(main)
|
Import("env_rovinj_numopt_tut_ext")
env = env_rovinj_numopt_tut_ext.Clone()
env.SharedLibrary(target='#lib/rovinj_numopt_tut_constraints_ext',
source=["constraints_ext.cpp"])
|
import numpy as np
import math
import cv2
from PIL import Image
class ImageProcessor:
def __init__(self):
pass
def process(self, filename):
image = cv2.imread(filename)
self.num_pixels = image.shape[0] * image.shape[1]
return image
def histogram(self, image, bins=256):
num_channels = image.shape[2]
hist = np.zeros((bins, num_channels))
for row in range(len(image)):
for col in range(len(image[row])):
r = image[row][col][0]
g = image[row][col][1]
b = image[row][col][2]
hist[r][0] += 1
hist[g][1] += 1
hist[b][2] += 1
return hist
def normalize_histogram(self, histogram):
colors = histogram.shape[1]
normalized = np.zeros((1, colors))
for col in range(colors):
normalized[0][col] = ((255 / self.num_pixels)
* np.sum(histogram[:,col]))
return normalized
# Turn the RGB image to Hue,
def RGB_to_HSL(self, image):
# I should probably change this to only three color channels
hsl = np.zeros(image.shape, 'float64')
print(f'HSL shape: {hsl.shape}')
for row in range(len(image)):
for col in range(len(image[row])):
# color channels
R = image[row][col][0] / 255
G = image[row][col][1] / 255
B = image[row][col][2] / 255
#print(f'RGB: {R},{G},{B}')
MIN = min(R, G, B)
MAX = max(R, G, B)
L = (MAX + MIN) / 2
if (MAX - MIN) == 0:
S = 0
else:
S = (MAX - MIN) / (1 - abs(2*L - 1))
# Getting the Hue
if MAX == MIN:
H = 0
elif MAX == R and G >= B:
H = 60 * ((G - B) / (MAX - MIN))
elif MAX == R and G < B:
H = 60 * ((G - B) / (MAX - MIN)) + 360
elif MAX == G:
H = 60 * ((B - R) / (MAX - MIN)) + 120
elif MAX == B:
H = 60 * ((R - G) / (MAX - MIN)) + 240
#print(f'HSL: {H}, {S}, {L}')
hsl[row][col] = np.array([H, S, L])
return hsl
def HSL_to_RGB(self, image):
rgb = np.zeros((600, 800, 3), 'uint8')
for row in range(len(image)):
for col in range(len(image[row])):
H = image[row][col][0]
S = image[row][col][1]
L = image[row][col][2]
C = (1 - abs(2 * L - 1) * S)
X = C * (1 - abs((H / 60) % 2 - 1))
m = L - (C / 2)
Rp, Gp, Bp = 0, 0, 0
if H >=0 and H < 60:
Rp, Gp, Bp = C, X, 0
elif H >=0 and H < 120:
Rp, Gp, Bp = X, C, 0
elif H >=120 and H < 180:
Rp, Gp, Bp = 0, C, X
elif H >= 180 and H < 240:
Rp, Gp, Bp = 0, X, C
elif H >= 240 and H < 300:
Rp, Gp, Bp = X, 0, C
elif H >= 300 and H < 360:
Rp, Gp, Bp = C, 0, X
R = (Rp + m) * 255
G = (Gp + m) * 255
B = (Bp + m) * 255
#print(f'RGB: {R}, {G}, {B}')
rgb[row, col] = np.array([R, G, B])
return rgb
def contrast_adjustment(self):
pass
def noise_reduction(self):
pass
def color_correction(self):
pass
|
#/usr/bin/env python
# Author:tjy
'''
file = open("username_passwd.txt", 'r')
line = file.readline()
while line:
print(line) ,
line = file.readline()
file.close()
'''
'''
for line in open("username_passwd.txt", 'r') :
print(line)
'''
file = open("username_passwd.txt", 'r')
lines = file.readlines()
print(lines)
for line in lines :
print(line)
|
#!/usr/bin/env python2
"""Parse and download images from an Imgur gallery.
The creation of this script was inspired by Methos25's rice [1]. I loved the
wallpaper he was using, and looking on the Imgur gallery [2] he told us he got
the image, I had the idea to create a script to download all images from there
and randomly pick one to set as wallpaper.
Well, this script do the first part, following the KISS philosophy. The second
part will be done later on.
Thanks to:
- Baba_Puh, for pointing me out the correct spell of Imgur :p
- ecterovachor, for suggesting using the python version on the bang line
[1] https://www.reddit.com/r/unixporn/comments/4mtww2/i3gaps_a_little_materialistic/ # noqa
[2] https://imgur.com/gallery/SwhjO
"""
import os
import re
import sys
from urllib2 import urlopen
from HTMLParser import HTMLParser
from argparse import ArgumentParser
__author__ = "Rodrigo Oliveira"
__version__ = "1.0.0"
__maintainer__ = "Rodrigo Oliveira"
__email__ = "rodrigo@deadbodyoutline.com"
class Arguments():
url = None
path = None
def __init__(self):
self._parse_args()
def _parse_args(self):
parser = ArgumentParser()
parser.add_argument("url",
type=str,
metavar='N',
nargs=1,
help="Imgur gallery URL")
parser.add_argument("--path",
"-p",
type=str,
default=os.getcwd(),
help="Path to save images (default: current)")
parser.add_argument("--limit",
"-l",
type=int,
default=0,
help="Limit the number of images to download\
(default: 0, all images)")
parser.add_argument("--print-to-file",
"-f",
action="store_true",
default=False,
help="Log messages to '~/.imgurme.log'. Note that\
this file will be replaced at each run\
(default: false)")
args = parser.parse_args()
self.url = args.url[0].strip()
self.path = args.path.strip()
self.limit = args.limit
self.log = args.print_to_file
class ImgurParser(HTMLParser):
def __init__(self):
# disclaimer: HTMLParser is old style class, do not support 'super'
# initialization style :/
HTMLParser.__init__(self)
self._images = []
self._scheme = None
def _validate_url(self, url):
regex = ur'http[s]?://(?:w{3}\.)?imgur.com/(?:[a-zA-Z]|[0-9]|[$-_@.&+]\
|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))*' # neat
return re.match(regex, url)
def _validate_image(self, image):
"""Check if parsed image name has one of the supported extensions
Currently supporting JPG and PNG. Do Imgur support any other?
:param image: the image string to validate
"""
regex = ur'.*(?:.png|.jpg)$'
return re.match(regex, image)
def parse(self, url):
p = PrintOut.instance()
if not self._validate_url(url):
p.out("URL '%s' doesn't seems to be a valid Imgur address" % url)
return
self._scheme = url.split(":")[0]
p.out("Parsing %s" % url)
try:
f = urlopen(url)
html = f.read()
f.close()
except Exception as e:
p.out(">> Error; is it a valid URL? (%s)" % e)
return
try:
html = self.unescape(html)
except Exception as e:
p.out(">> Error: %s" % e)
return
try:
self.feed(html)
except Exception as e:
p.out(">> Error: %s" % e)
return
p.out("Found %s images" % len(self._images), 4)
def handle_starttag(self, tag, attrs):
if tag == "a":
for attr, value in attrs:
if attr == "href" and self._validate_image(value):
self._images.append("%s:%s" % (self._scheme, value))
def images(self):
return self._images
class Downloader():
def __init__(self, path, name):
self._path = path if path != str() else "./"
self._name = name
self._download_path = os.path.join(path, dir_name)
def download(self, images=[], limit=0):
p = PrintOut.instance()
if not images:
return
if not os.path.exists(self._path):
p.out("> Path '%s' doesn't exist" % self._path)
return
if not os.path.exists(self._download_path):
p.out("Creating %s/ on %s" % (self._name, self._path))
try:
os.mkdir(self._download_path)
except Exception as e:
p.out(">> Error: %s" % e)
return
p.out("Downloading images to %s/" % self._download_path)
if limit > 0:
images = images[:limit]
p.out("(limiting download to %s images)" % limit, 4)
num_images = len(images)
for index, image in enumerate(images):
image_file = image.split('/')[-1]
p.out("(%s/%s) %s..." % (index + 1, num_images, image_file), 4)
image_file = os.path.join(self._download_path, image_file)
if os.path.isfile(image_file):
p.out("File already exists, skipping...", 8)
continue
try:
f = open(image_file, 'wb')
f.write(urlopen(image).read())
f.close()
except Exception as e:
p.out(">> Error: %s" % e)
return
class PrintOut(object):
_instance = None
_where = sys.stdout
def __del__(self):
if type(self._where) is file:
self._where.close()
@classmethod
def instance(self):
if self._instance is None:
self._instance = self()
return self._instance
def set_output(self, file=None):
if file is None:
self._where == sys.stdout
return
self._where = open(file, 'w+')
def out(self, str, indent=0):
print >> self._where, str.rjust(len(str) + indent)
if type(self._where) is file:
self._where.flush()
if __name__ == '__main__':
arguments = Arguments()
log_file = os.path.join(os.path.expanduser('~'), 'imgurme.log')
p = PrintOut.instance()
p.set_output(log_file if arguments.log else None)
url = arguments.url
parser = ImgurParser()
parser.parse(url)
dir_name = url.split('/')[-1]
downloader = Downloader(arguments.path, dir_name)
downloader.download(parser.images(), arguments.limit)
|
import tkinter as tk
root = tk.Tk()
anchors = [tk.E, tk.W, tk.S]
texts = ["Hello","Python","Language"]
for i in range(3):
b = tk.Button(root, text=texts[i])
b.pack(anchor=anchors[i])
root.mainloop()
|
def wrapper(f):
def fun(l):
f(['+91 ' + c[-10:-5] + ' ' + c[-5:] for c in l])
return fun
|
# @Title: 验证栈序列 (Validate Stack Sequences)
# @Author: 2464512446@qq.com
# @Date: 2020-06-18 17:11:12
# @Runtime: 112 ms
# @Memory: 13.4 MB
class Solution:
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
res = []
i = 0
for num in pushed:
res.append(num)
while res and res[-1] == popped[i]:
res.pop()
i += 1
return not res
|
from flask import Response
from tests.test_client import flask_app
from pypi_org.views import home_views
def test_int_homepage(client):
r: Response = client.get('/')
assert r.status_code == 200
assert b'Find, install and publish Python packages' in r.data
def test_v_homepage_directly():
with flask_app.test_request_context(path='/'):
r: Response = home_views.index()
assert r.status_code == 200
# noinspection PyUnresolvedReferences
assert len(r.model.get('releases')) > 0
|
def num_teachers (value):
#print(kwargs)
#print("{Kenneth Love}".format(**kwargs))
print(len(value.keys()))
def num_courses (value):
course_total = 0
for course in value.values():
course_total += len(course)
return course_total
def courses (value):
courses_list = []
for courses in value.values():
courses_list.extend(courses)
return courses_list
def most_courses (value):
most_teacher = ''
most_cours = 0
for courses in value.keys():
if len(value[courses]) > most_cours:
most_cours = len(value[courses])
most_teacher = courses
else:
pass
return most_teacher
def stats (value):
teacher_stats = []
for courses in value.keys():
teacher_courses = []
teacher_courses.append (courses )
teacher_courses.append (len(value[courses]))
teacher_stats.append (teacher_courses)
print( teacher_stats )
stats({'Andrew Chalkley': ['jQuery Basics', 'Node.js Basics'], 'Kenneth Love': ['Python Basics', 'Python Collections']}) |
import argparse
import pickle
import collections
import logging
import math,copy
import os,sys,time
import errno
import random
from sys import maxsize
import pickle
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.autograd as autograd
import torch.optim as optim
import torch.nn.functional as F
import fastNLP
from fastNLP.modules.encoder.transformer import TransformerEncoder
from fastNLP.modules.decoder.crf import ConditionalRandomField
from fastNLP import Const
from fastNLP import DataSetIter
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
#print(scores.size(),mask.size())
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
#print(x.size(),mask.size())
"Pass the input (and mask) through each layer in turn."
mask=mask.byte().unsqueeze(-2)
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
def make_encoder(N=6, d_model=512, d_ff=2048, h=8, dropout=0.1):
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
return Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N)
class NoamOpt:
"Optim wrapper that implements rate."
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def step(self):
"Update parameters and rate"
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step = None):
"Implement `lrate` above"
if step is None:
step = self._step
lr = self.factor * \
(self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
#if step>self.warmup: lr = max(1e-4,lr)
return lr
def get_std_opt(model):
return NoamOpt(model.src_embed[0].d_model, 2, 4000,
torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=0, betas=(0.9, 0.98), eps=1e-9))
def make_sure_path_exists(path):
if len(path)==0: return
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def is_dataset_tag(word):
return len(word) > 2 and word[0] == '<' and word[-1] == '>'
def to_tag_strings(i2ts, tag_mapping, pos_separate_col=True):
senlen = len(tag_mapping)
key_value_strs = []
for j in range(senlen):
val = i2ts[tag_mapping[j]]
pos_str = val
key_value_strs.append(pos_str)
return key_value_strs
def bmes_to_words(chars, tags):
result = []
if len(chars) == 0:
return result
word = chars[0]
for c, t in zip(chars[1:], tags[1:]):
if t.upper() == 'B' or t.upper() == 'S':
result.append(word)
word = ''
word += c
if len(word) != 0:
result.append(word)
return result
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model).float()
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
class Embedding(nn.Module):
def __init__(self,task_size, d_model, word_embedding=None, bi_embedding=None, word_size=None, freeze=True):
super(Embedding, self).__init__()
self.task_size=task_size
self.embed_dim = 0
self.task_embed = nn.Embedding(task_size,d_model)
"""
if freeze:
self.task_embed.weight.requires_grad = False
"""
if word_embedding is not None:
self.uni_embed = nn.Embedding.from_pretrained(torch.FloatTensor(word_embedding), freeze=freeze)
self.embed_dim+=word_embedding.shape[1]
else:
if bigram_embedding is not None:
self.embed_dim+=bi_embedding.shape[1]
else: self.embed_dim=d_model
assert word_size is not None
self.uni_embed = nn.Embedding(word_size,self.embed_dim)
if bi_embedding is not None:
self.bi_embed = nn.Embedding.from_pretrained(torch.FloatTensor(bi_embedding), freeze=freeze)
self.embed_dim += bi_embedding.shape[1]*2
print("Trans Freeze",freeze,self.embed_dim)
if d_model!=self.embed_dim:
self.F=nn.Linear(self.embed_dim,d_model)
else :
self.F=None
self.d_model = d_model
def forward(self, task, uni, bi1=None, bi2=None):
#print(task,uni.size(),bi1.size(),bi2.size())
#print(bi1,bi2)
#assert False
y_task=self.task_embed(task[:,0:1])
y=self.uni_embed(uni[:,1:])
if bi1 is not None:
assert self.bi_embed is not None
y=torch.cat([y,self.bi_embed(bi1),self.bi_embed(bi2)],dim=-1)
#y2=self.bi_embed(bi)
#y=torch.cat([y,y2[:,:-1,:],y2[:,1:,:]],dim=-1)
#y=torch.cat([y_task,y],dim=1)
if self.F is not None:
y=self.F(y)
y=torch.cat([y_task,y],dim=1)
return y * math.sqrt(self.d_model)
def seq_len_to_mask(seq_len,max_len=None):
if isinstance(seq_len, np.ndarray):
assert len(np.shape(seq_len)) == 1, f"seq_len can only have one dimension, got {len(np.shape(seq_len))}."
if max_len is None:
max_len = int(seq_len.max())
broad_cast_seq_len = np.tile(np.arange(max_len), (len(seq_len), 1))
mask = broad_cast_seq_len < seq_len.reshape(-1, 1)
elif isinstance(seq_len, torch.Tensor):
assert seq_len.dim() == 1, f"seq_len can only have one dimension, got {seq_len.dim() == 1}."
batch_size = seq_len.size(0)
if max_len is None:
max_len = seq_len.max().long()
broad_cast_seq_len = torch.arange(max_len).expand(batch_size, -1).to(seq_len)
mask = broad_cast_seq_len.lt(seq_len.unsqueeze(1))
else:
raise TypeError("Only support 1-d numpy.ndarray or 1-d torch.Tensor.")
return mask
class CWSModel(nn.Module):
def __init__(self, encoder, src_embed, position, d_model, tag_size, crf=None):
super(CWSModel, self).__init__()
self.encoder = encoder
self.src_embed = src_embed
self.pos=copy.deepcopy(position)
self.proj = nn.Linear(d_model, tag_size)
self.tag_size=tag_size
if crf is None:
self.crf=None
self.loss_f=nn.CrossEntropyLoss(size_average=False)
else:
print("crf")
trans=fastNLP.modules.decoder.crf.allowed_transitions(crf,encoding_type='bmes')
self.crf=ConditionalRandomField(tag_size,allowed_transitions=trans)
#self.norm=nn.LayerNorm(d_model)
def forward(self, task, uni, seq_len, bi1=None, bi2=None, tags=None):
mask=seq_len_to_mask(seq_len,uni.size(1))
out=self.src_embed(task,uni,bi1,bi2)
out=self.pos(out)
#out=self.norm(out)
#print(uni.size(),out.size(),mask.size(),seq_len)
out=self.proj(self.encoder(out, mask.float()))
if self.crf is not None:
if tags is not None:
out=self.crf(out, tags, mask)
return {"loss":out}
else:
out,_ =self.crf.viterbi_decode(out, mask)
return {"pred":out}
else:
if tags is not None:
num=out.size(0)
loss = self.loss_f(torch.masked_select(out,mask.unsqueeze(-1).expand_as(out)).contiguous().view(-1,self.tag_size), torch.masked_select(tags,mask))
return {"loss":loss/num}
else:
out=torch.argmax(out,dim=-1)
return {"pred":out}
def make_CWS(N=6, d_model=256, d_ff=1024, h=4, dropout=0.2, tag_size=4, task_size=8, bigram_embedding=None, word_embedding=None, word_size=None, crf=None,freeze=True):
c = copy.deepcopy
#encoder=TransformerEncoder(num_layers=N,model_size=d_model,inner_size=d_ff,key_size=d_model//h,value_size=d_model//h,num_head=h,dropout=dropout)
encoder=make_encoder(N=N,d_model=d_model,h=h,dropout=dropout,d_ff=d_ff)
position = PositionalEncoding(d_model, dropout)
embed=Embedding(task_size, d_model, word_embedding, bigram_embedding,word_size,freeze)
model=CWSModel(encoder, embed, position, d_model, tag_size, crf=crf)
for name,p in model.named_parameters():
if p.dim() > 1 and p.requires_grad==True:
nn.init.xavier_uniform(p)
return model
NONE_TAG = "<NONE>"
START_TAG = "<sos>"
END_TAG = "<eos>"
DEFAULT_WORD_EMBEDDING_SIZE = 100
# ===-----------------------------------------------------------------------===
# Argument parsing
# ===-----------------------------------------------------------------------===
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", required=True, dest="dataset", help=".pkl file to use")
parser.add_argument("--word-embeddings", dest="word_embeddings", help="File from which to read in pretrained embeds")
parser.add_argument("--bigram-embeddings", dest="bigram_embeddings", help="File from which to read in pretrained embeds")
parser.add_argument("--crf", dest="crf", action="store_true", help="whether to use CRF")
parser.add_argument("--devi", default="0", dest="devi", help="gpu to use")
parser.add_argument("--step", default=0, dest="step", type=int,help="step")
parser.add_argument("--num-epochs", default=80, dest="num_epochs", type=int,
help="Number of epochs through training set")
parser.add_argument("--flex", default=-1, dest="flex", type=int,
help="Number of epochs through training set after freezing the pretrained embeddings")
parser.add_argument("--batch-size", default=256, dest="batch_size", type=int,
help="Minibatch size of training set")
parser.add_argument("--d_model", default=256, dest="d_model", type=int, help="d_model of transformer encoder")
parser.add_argument("--d_ff", default=1024, dest="d_ff", type=int, help="d_ff for FFN")
parser.add_argument("--N", default=6, dest="N", type=int, help="Number of layers")
parser.add_argument("--h", default=4, dest="h", type=int, help="Number of head")
parser.add_argument("--factor", default=2, dest="factor", type=float, help="factor for learning rate")
parser.add_argument("--dropout", default=0.2, dest="dropout", type=float,
help="Amount of dropout(not keep rate, but drop rate) to apply to embeddings part of graph")
parser.add_argument("--log-dir", default="result", dest="log_dir",
help="Directory where to write logs / saved models")
parser.add_argument("--task-name", default=time.strftime("%Y-%m-%d-%H-%M-%S"), dest="task_name",
help="Name for this task, use a comprehensive one")
parser.add_argument("--no-model", dest="no_model", action="store_true", help="Don't save model")
parser.add_argument("--always-model", dest="always_model", action="store_true",
help="Always save the model after every epoch")
parser.add_argument("--old-model", dest="old_model", help="Path to old model for incremental training")
parser.add_argument("--freeze", dest="freeze", action="store_true", help="freeze pretrained embeddings")
parser.add_argument("--python-seed", dest="python_seed", type=int, default=random.randrange(maxsize),
help="Random seed of Python and NumPy")
parser.add_argument("--test", dest="test", action="store_true", help="Test mode")
options = parser.parse_args()
task_name = options.task_name
root_dir = "{}/{}".format(options.log_dir, task_name)
make_sure_path_exists(root_dir)
devices=[int(x) for x in options.devi]
device = torch.device("cuda:{}".format(devices[0]))
def init_logger():
if not os.path.exists(root_dir):
os.mkdir(root_dir)
log_formatter = logging.Formatter("%(message)s")
logger = logging.getLogger()
file_handler = logging.FileHandler("{0}/info.log".format(root_dir), mode='w')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
return logger
# ===-----------------------------------------------------------------------===
# Set up logging
# ===-----------------------------------------------------------------------===
logger = init_logger()
# Log some stuff about this run
logger.info(' '.join(sys.argv))
logger.info('')
logger.info(options)
random.seed(options.python_seed)
np.random.seed(options.python_seed % (2 ** 32 - 1))
logger.info('Python random seed: {}'.format(options.python_seed))
# ===-----------------------------------------------------------------------===
# Read in dataset
# ===-----------------------------------------------------------------------===
dataset = pickle.load(open(options.dataset, "rb"))
test_set=dataset["test_set"]
uni_vocab=dataset["uni_vocab"]
bi_vocab=dataset["bi_vocab"]
print(len(test_set))
# ===-----------------------------------------------------------------------===
# Build model and trainer
# ===-----------------------------------------------------------------------===
if options.word_embeddings is None:
init_embedding=None
else:
print("Load:",options.word_embeddings)
init_embedding=fastNLP.io.embed_loader.EmbedLoader.load_with_vocab(options.word_embeddings, uni_vocab, normalize=False)
bigram_embedding = None
if options.bigram_embeddings:
if options.bigram_embeddings == 'merged':
logging.info('calculate bigram embeddings from unigram embeddings')
bigram_embedding=np.random.randn(len(bi_vocab), init_embedding.shape[-1]).astype('float32')
for token, i in bi_vocab:
if token.startswith('<') and token.endswith('>'): continue
if token.endswith('>'):
x,y=uni_vocab[token[0]], uni_vocab[token[1:]]
else:
x,y=uni_vocab[token[:-1]], uni_vocab[token[-1]]
if x==uni_vocab['<unk>']:
x=uni_vocab['<pad>']
if y==uni_vocab['<unk>']:
y=uni_vocab['<pad>']
bigram_embedding[i]=(init_embedding[x]+init_embedding[y])/2
else:
print("Load:",options.bigram_embeddings)
bigram_embedding=fastNLP.io.embed_loader.EmbedLoader.load_with_vocab(options.bigram_embeddings, bi_vocab, normalize=False)
# build model and optimizer
i2t={0: 's', 1: 'b', 2: 'e', 3: 'm'}
if options.crf:
print("use crf:",i2t)
freeze=True if options.freeze else False
model = make_CWS(d_model=options.d_model, N=options.N, h=options.h, d_ff=options.d_ff,dropout=options.dropout,word_embedding=init_embedding,bigram_embedding=bigram_embedding,tag_size=4,task_size=8,crf=i2t,freeze=freeze)
if True:
print("multi:",devices)
model=nn.DataParallel(model,device_ids=devices)
model=model.to(device)
optimizer = NoamOpt(options.d_model, options.factor, 4000,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
optimizer._step=options.step
i2t=['s', 'b', 'e', 'm']
i2task=['<as>', '<msr>', '<pku>', '<ncc>', '<cityu>', '<ckip>', '<ctb>', '<sxu>']
test_set.set_input("ori_words")
word_dic = pickle.load(open("dict.pkl","rb"))
def tester(model,test_batch,write_out=True):
res=[]
split_or_not=[]
context_list=[]
model.eval()
for batch_x in test_batch:
batch_x=batch_x[0]
with torch.no_grad():
if bigram_embedding is not None:
out=model(batch_x["task"],batch_x["uni"],batch_x["seq_len"],batch_x["bi1"],batch_x["bi2"])
else: out = model(batch_x["task"],batch_x["uni"],batch_x["seq_len"])
out=out["pred"]
#print(out)
num=out.size(0)
out=out.detach().cpu().numpy()
for i in range(num):
length=int(batch_x["seq_len"][i])
out_tags=out[i,1:length].tolist()
sentence = batch_x["ori_words"][i]
dataset_name = sentence[0]
sentence=sentence[1:]
context_list.append(sentence)
#print(out_tags)
assert is_dataset_tag(dataset_name)
assert len(out_tags)==len(sentence)
if write_out==True:
obs_strings = to_tag_strings(i2t, out_tags)
word_list = bmes_to_words(sentence, obs_strings)
s_list=[]
for i in word_list:
s_list.extend((len(i)-1)*[0])
s_list.append(1)
split_or_not.append(s_list[:-1])
raw_string=' '.join(word_list)
res.append(raw_string)
return res,split_or_not,context_list
model.load_state_dict(torch.load(options.old_model,map_location="cuda:0"))
for name,para in model.named_parameters():
if name.find("task_embed")!=-1:
tm=para.detach().cpu().numpy()
print(tm.shape)
np.save("{}/task.npy".format(root_dir),tm)
break
test_batch=DataSetIter(test_set,options.batch_size)
res,split_or_not,context_list=tester(model,test_batch,True)
to_remain = []
to_del = []
candis=np.load('sentenceNum.npy',allow_pickle=True).item()
split_info = {}
for candi in candis.keys():
sen_nums = candis[candi]
count_left, count_mid, count_right = 0,0,0
count_total=len(sen_nums)
for sen_num in sen_nums:
context = context_list[sen_num]
split_value = split_or_not[sen_num]
index=context.find(candi)
assert index != -1
count_left += 1 if index==0 else split_value[index-1]
count_right += 1 if index+len(candi)==len(context) else split_value[index-1+len(candi)]
count_mid += 1 if 1 in split_value[index:index+len(candi)-2] else 0
#print(count_left, count_mid, count_right, count_total)
split_left = count_left / count_total
split_mid = count_mid / count_total
split_right = count_right / count_total
split_info[candi]=(count_left, count_mid, count_right, count_total, split_left, split_mid, split_right)
#print(split_left, split_mid, split_right)
#print()
### Here the filter law
if split_left>0.8 and split_right>0.8 and split_mid<=0.5:
to_remain.append(candi)
else:
to_del.append(candi)
#print(to_remain)
#print(to_del)
# split_info 写入 json 文件
j_word_split = json.dumps(split_info)
j_word_File = open('word_split.json','w',encoding='utf-8')
j_word_File.write(j_word_split)
j_word_File.close()
# to_remain = []
# to_del = []
# context_list = np.load('context.npy').tolist()
# candis=np.load('sentenceNum.npy',allow_pickle=True).item()
# for candi in candis.keys():
# start = candis[candi][0]
# end = candis[candi][1]
# contexts = context_list[start:end]
# split_values = split_or_not[start:end]
# count_left, count_mid, count_right = 0,0,0
# count_total=end-start
# for i in range(count_total):
# index=contexts[i].find(candi)
# assert index != -1
# count_left += 1 if index==0 else split_values[i][index-1]
# count_right += 1 if index+len(candi)==len(contexts[i]) else split_values[i][index-1+len(candi)]
# count_mid += 1 if 1 in split_values[index:index+len(candi)-2] else 0
# split_left = count_left / count_total
# split_mid = count_mid / count_total
# split_right = count_right / count_total
# if split_left>0.8 and split_right>0.8 and split_mid<=0.5:
# to_remain.append(candi)
# else:
# to_del.append(candi)
Cremain = open('Cremain.txt','a',encoding='utf-8')
for w in to_remain:
Cremain.write(w+'\n')
logger.info('len(to_remain): {}'.format(len(to_remain)))
Cdel = open('Cdel.txt','a',encoding='utf-8')
for w in to_del:
Cdel.write(w+'\n')
logger.info('len(to_del): {}'.format(len(to_del)))
with open("{}/testout.txt".format(root_dir), 'w',encoding="utf-16") as raw_writer:
for sent in res:
raw_writer.write(sent)
raw_writer.write('\n')
|
import msgpackrpc
import sys
import time
import logging
from functools import wraps
RETRY_COUNT = 3
RETRY_WAIT_TIME = 10
logging.basicConfig(level=logging.DEBUG)
def retry_call(preretry_method):
def _retry_call(func):
@wraps(func)
def __retry_call(*args, **kwargs):
cnt = RETRY_COUNT - 1
while cnt > 0:
try:
return func(*args, **kwargs)
except:
cnt -= 1
logging.debug("retry waiting...")
time.sleep(RETRY_WAIT_TIME)
logging.debug("do preretry_method")
preretry_method()
return func(*args, **kwargs)
return __retry_call
return _retry_call
def pre_retry_call():
global client
client = msgpackrpc.Client(msgpackrpc.Address("localhost", 18800))
@retry_call(pre_retry_call)
def proc():
result = client.call('sum', 1, 2, clientname) # = > 3
print result
clientname="noname"
if len(sys.argv) > 1:
clientname=sys.argv[1]
client = msgpackrpc.Client(msgpackrpc.Address("localhost", 18800))
for i in xrange(100000):
proc()
|
import time
from multiprocessing import Process
from main.info import config
#from main.data_structure import vector as vector1
from main.data_structure import sparseVector as vector
user_mean_matrix = {}
def init_user_mean_matrix(dao):
maxuserid = config.Config().configdict['dataset']['maxuserid']
for i in range(1,maxuserid + 1):
m = dao.get_user_rating_mean(i)
user_mean_matrix[str(i)] = m
def to_sparse_vector(vec, vector_len):
index_list = [i for (i,v) in vec]
data_list = [v for (i,v) in vec]
if index_list[-1] >= vector_len:
raise ValueError("too short length:" + \
str(vector_len) + " for index :" + str(index_list[-1]))
return vector.SparseVector(index_list, data_list, vector_len)
#receive two list as vector
def my_sparse_vector_similarity(vectora,vectorb,vec_len):
sparseveca = to_sparse_vector(vectora,vec_len)
sparsevecb = to_sparse_vector(vectorb,vec_len)
startfromone = not startfromzero
sim = abs(vec_sim(sparseveca,sparsevecb,startfromone))
if significance_weight:
i = len(set([i for i,v in vectora]) & set([i for i,v in vectorb]))
if i < significance_weight:
sim *= float(i) / float(significance_weight)
return sim
def item_based_centralized_vector_similarity(vectora,vectorb,vec_len):
def centralie_vector(v):
ret = []
for i in range(len(v)):
user_mean = user_mean_matrix[str(v[i][0])]
ret.append((v[i][0],v[i][1] - user_mean))
return ret
vectora = centralie_vector(vectora)
vectorb = centralie_vector(vectorb)
sparseveca = to_sparse_vector(vectora,vec_len)
sparsevecb = to_sparse_vector(vectorb,vec_len)
startfromone = not startfromzero
sim = vec_sim(sparseveca,sparsevecb,startfromone)
if significance_weight:
i = len(set([i for i,v in vectora]) & set([i for i,v in vectorb]))
if i < significance_weight:
sim *= float(i) / float(significance_weight)
return sim
def get_k_nearest_users(userid, dao, k = 200):
'''
return a the k nearest users list in reverse order for a given userid
these returned ids are searched in the whole user domain
ex get_k_nearest_users(55) -> [(22,0.99),(657,0.96)...]
'''
nearestlist = []
start = 0 if startfromzero else 1 #whether id start from zero
user = dao.get_item_list_by_user(userid)
for i in range(start,maxuserid + 1):
if i == userid:
continue
user_i = dao.get_item_list_by_user(i)
nearestlist.append((i,similarity_func(user,user_i,maxitemid + 1)))
nearestlist.sort(key = lambda nearestlist:nearestlist[1], reverse = True)
return nearestlist[:k]
def all_user_similarity():
'''
calculate all user similarity and put to the datasore through DAO
'''
print "Calculating similarity for each user."
t1 = time.time()
init_user_mean_matrix(new_DAO_interface())
devide_number = multithread
start = 0 if startfromzero else 1 #whether id start from zero
total = maxuserid + 1 - start
proclist = []
for i in range(0,devide_number):
end = start + total / 8
if end > maxuserid or i == devide_number - 1:
end = maxuserid + 1
p = Process(target = user_similarity_in_range,args = (start,end))
proclist.append(p)
p.start()
start = end
for p in proclist:
p.join()
t2 = time.time()
print "All user's similarities have been claculated in %ds"%(t2 - t1)
def user_similarity_in_range(start,end):
dao = new_DAO_interface()
for userid in range(start,end):
simlist = get_k_nearest_users(userid, dao)
for otheruserid,sim in simlist:
dao.put_user_sim(userid,otheruserid,sim)
#print "User:" + str(userid) + " neighborhood similarity calculated."
def clean_all_user_sim():
print "Clearning previous calculated similarity."
dao = new_DAO_interface()
start = 0 if startfromzero else 1 #whether id start from zero
for userid in range(start,maxuserid + 1):
dao.del_user_sim(userid)
print "All cleared."
def all_item_similarity():
'''
calculate all python similarity and put to the datasore through DAO
'''
print "Calculating similarity for each item."
t1 = time.time()
init_user_mean_matrix(new_DAO_interface())
devide_number = multithread
start = 0 if startfromzero else 1 #whether id start from zero
total = maxitemid + 1 - start
proclist = []
for i in range(0,devide_number):
end = start + total / 8
if end > maxitemid or i == devide_number - 1:
end = maxitemid + 1
p = Process(target = item_similarity_in_range,args = (start,end))
proclist.append(p)
p.start()
start = end
for p in proclist:
p.join()
t2 = time.time()
print "All item similarities have been claculated in %ds"%(t2 - t1)
def item_similarity_in_range(start,end):
dao = new_DAO_interface()
for itemid in range(start,end):
simlist = get_other_item_sim(itemid, dao)
for otheritemid,sim in simlist:
dao.put_item_sim(itemid,otheritemid,sim)
#print "User:" + str(userid) + " neighborhood similarity calculated."
def get_other_item_sim(itemid, dao):
'''
return the similarity between specified item and all other items
'''
nearestlist = []
start = 0 if startfromzero else 1 #whether id start from zero
item = dao.get_user_list_by_item(itemid)
if item == []:
print "No record for %d when calculating item similarity." % (itemid)
return [] #return a empty list if there are no record for item
for i in range(start,maxitemid + 1):
if i == itemid:
continue
item_i = dao.get_user_list_by_item(i)
if item_i == []:
continue #continue if there are no record for i
sim = similarity_func(item,item_i,maxuserid + 1)
if sim != 0.:
nearestlist.append((i,sim))
nearestlist.sort(key = lambda nearestlist:nearestlist[1], reverse = True)
return nearestlist
def clean_all_item_sim():
print "Clearning previous calculated similarity."
dao = new_DAO_interface()
start = 0 if startfromzero else 1 #whether id start from zero
for itemid in range(start,maxitemid + 1):
dao.del_item_sim(itemid)
print "All cleared."
def new_DAO_interface():
return DAOtype()
def set_config():
global storetype, similaritytype, maxitemid
global maxuserid, startfromzero, multithread
global DAOtype, similarity_func, vec_sim, significance_weight, pmodel, all_simlarity
storetype = config.Config().configdict['global']['storage']
similaritytype = config.Config().configdict['user_item_CF']['similarity']
significance_weight= config.Config().configdict['user_item_CF']['significance_weight']
maxitemid = config.Config().configdict['dataset']['maxitemid']
maxuserid = config.Config().configdict['dataset']['maxuserid']
startfromzero = config.Config().configdict['dataset']['startfromzero']
multithread = config.Config().configdict['global']['multithread']
pmodel = config.Config().configdict['user_item_CF']['model']
#item-based or user-based
if pmodel == "user-based":
all_simlarity = all_user_similarity
elif pmodel == "item-based":
all_simlarity = all_item_similarity
else:
raise Exception("You should never get here, badly configed.")
#get the DAO interface
if storetype == 'redis':
from main.DAO import redisDAO
DAOtype = redisDAO.redisDAO
similarity_func = my_sparse_vector_similarity
#get the similarity method
if similaritytype == 'pearson':
#similarity func must receivce two lists representing vector as [(index,value),...] and a vector length
vec_sim = vector.pearsonr
elif similaritytype == 'pearson_intersect':
vec_sim = vector.pearsonr_hasvalue_both
elif similaritytype == 'pearson_default':
vec_sim = vector.pearsonr_default_rate
elif similaritytype == 'cos':
vec_sim = vector.cosine
elif similaritytype == 'spearman':
vec_sim = vector.spearman
elif similaritytype == 'adjusted_cos':
vec_sim = vector.cosine
similarity_func = item_based_centralized_vector_similarity
else :
raise Exception("You should never goes into here! Baddly configed.")
set_config()
config.Config().register_function(set_config)
if __name__ == "__main__":
clean_all_user_sim()
dao = new_DAO_interface()
t1 = time.time()
all_user_similarity()
t2 = time.time()
print "Finished in time :",t2 - t1,"s"
|
import pandas as pd
class polutantSlicer :
def __init__(self,keep_wind) :
#These indexes are kept for slicing the input data on the type of polutants
self.index_PM2 =[]
self.index_PM10 =[]
self.index_O3 =[]
self.index_NO2 =[]
"""
self.index_PM2_label =[]
self.index_PM10_label =[]
self.index_O3_label =[]
self.index_NO2_label =[]
"""
#User option : keep or remove the wind indicators
self.wind = keep_wind
def fit(self,X):
#The fitting method updates the indexes
self.index_PM2 = [c for c in X.columns if "PM2" in c]
self.index_PM10 =[c for c in X.columns if "PM10" in c]
self.index_O3 = [c for c in X.columns if "O3" in c]
self.index_NO2 = [c for c in X.columns if "NO2" in c]
"""
self.index_PM2_label = [c for c in Y.columns if "PM2" in c]
self.index_PM10_label =[c for c in Y.columns if "PM10" in c]
self.index_O3_label = [c for c in Y.columns if "03" in c]
self.index_NO2_label = [c for c in Y.columns if "NO2" in c]
"""
def transform(self,X):
#The transform methods creates 4 tables of input data
index_PM2 = self.index_PM2
index_PM10 = self.index_PM10
index_O3 = self.index_O3
index_NO2 = self.index_NO2
"""
index_PM2_label = self.index_PM2_label
index_PM10_label = self.index_PM10_label
index_O3_label = self.index_O3_label
index_NO2_label = self.index_NO2_label
"""
PM2 = X[index_PM2]
PM10 = X[index_PM10]
O3 = X[index_O3]
NO2 = X[index_NO2]
"""
PM2_label = Y[index_PM2_label]
PM10_label = Y[index_PM10_label]
O3_label = Y[index_O3_label]
NO2_label = Y[index_NO2_label]
"""
#Get the columns of the other interesting indicators
cols = [c for c in X.columns if "PM2" not in c and "wind" not in c and "PM10" not in c and "O3" not in c and "NO2" not in c]
other_indicators = X[cols]
PM2 = pd.concat([PM2,other_indicators], axis=1)
PM10 = pd.concat([PM10,other_indicators], axis=1)
O3 = pd.concat([O3,other_indicators], axis=1)
NO2 = pd.concat([NO2,other_indicators], axis=1)
#!!!!POUR LE MOMENT LE VENT N'EST PAS PRIS EN COMPTE
return PM2,PM10,O3,NO2
def add_averaged_wind_features(data,drop=False):
#The drop option allows the user to drop the wind features and replace them by their averaged values
windspeed_cols = data.filter(regex="windSpeed")
windcos_cols = data.filter(regex="windBearingCos")
windsin_cols = data.filter(regex="windBearingSin")
#Let's generate 3 empty data frames with the columns names we want
speed_col_names = []
windsin_col_names = []
windcos_col_names = []
for i in range(-24,25):
speed_name = "windSpeed_"+str(i)
sin_name = "windBearingSin_"+str(i)
cos_name = "windBearingCos_"+str(i)
speed_col_names.append(speed_name)
windsin_col_names.append(sin_name)
windcos_col_names.append(cos_name)
windspeed_processed = pd.DataFrame(columns=speed_col_names)
windcos_processed = pd.DataFrame(columns=windcos_col_names)
windsin_processed = pd.DataFrame(columns=windsin_col_names)
#Fill the 3 data frames with the average values of the speed measures on the stations hour per hour (49h per wind measure)
for i in range(0,49):
windspeed_processed.iloc[:,i] = windspeed_cols.iloc[:,i::49].mean(axis=1)
windcos_processed.iloc[:,i] = windcos_cols.iloc[:,i::49].mean(axis=1)
windsin_processed.iloc[:,i] = windsin_cols.iloc[:,i::49].mean(axis=1)
data_processed = pd.concat([windspeed_processed,windcos_processed,windsin_processed],axis=1)
cols_to_keep = [c for c in data.columns if c.lower()[:4] != 'wind'] #Withdraw columns containing wind measures
data=data[cols_to_keep]
data_processed = pd.concat([data,data_processed],axis=1) #complete data with processed wind measures
return data_processed
def add_averaged_polutants_features(data):
PM2_cols = data.filter(regex="PM2")
PM10_cols = data.filter(regex="PM10")
O3_cols = data.filter(regex="O3")
NO2_cols = data.filter(regex="NO2")
#Let's generate 3 empty data frames with the columns names we want
PM2_col_names = []
PM10_col_names = []
O3_col_names = []
NO2_col_names = []
for i in range(-24,1):
PM2_name = "Av_PM2_"+str(i)
PM10_name = "Av_PM10_"+str(i)
O3_name = "Av_O3_"+str(i)
NO2_name = "Av_NO2_"+str(i)
PM2_col_names.append(PM2_name)
PM10_col_names.append(PM10_name)
O3_col_names.append(O3_name)
NO2_col_names.append(NO2_name)
PM2_processed = pd.DataFrame(columns=PM2_col_names)
PM10_processed = pd.DataFrame(columns=PM10_col_names)
O3_processed = pd.DataFrame(columns=O3_col_names)
NO2_processed = pd.DataFrame(columns=NO2_col_names)
#Fill the 3 data frames with the average values of the speed measures on the stations hour per hour (25h per polutant measure)
for i in range(0,25):
PM2_processed.iloc[:,i] = PM2_cols.iloc[:,i::25].mean(axis=1)
PM10_processed.iloc[:,i] = PM10_cols.iloc[:,i::25].mean(axis=1)
O3_processed.iloc[:,i] = O3_cols.iloc[:,i::25].mean(axis=1)
NO2_processed.iloc[:,i] = NO2_cols.iloc[:,i::25].mean(axis=1)
data_processed = pd.concat([PM2_processed,PM10_processed,O3_processed,NO2_processed],axis=1)
data_processed = pd.concat([data,data_processed],axis=1) #complete data with processed wind measures
return data_processed
def add_averaged_polutants_features2(data):
PM2_mean = pd.DataFrame()
PM10_mean = pd.DataFrame()
O3_mean = pd.DataFrame()
NO2_mean = pd.DataFrame()
i=-8
for i in range(-24,1):
PM2 = "PM2_5_(.+?)_" + str(i) + "\\b"
PM2_name = "PM2_5_mean_"+str(i)
PM10 = "PM10_(.+?)_"+str(i) + "\\b"
PM10_name = "PM10_mean_"+str(i)
NO2 = "NO2_(.+?)_"+str(i) + "\\b"
NO2_name = "NO2_mean_"+str(i)
O3 = "O3_(.+?)_"+str(i) + "\\b"
O3_name = "O3_mean_"+str(i)
PM2_value=data.filter(regex=PM2)
PM2_mean[PM2_name]=PM2_value.mean(axis=1)
PM10_mean[PM10_name]=data.filter(regex=PM10).mean(axis=1)
O3_mean[O3_name]=data.filter(regex=O3).mean(axis=1)
NO2_mean[NO2_name]=data.filter(regex=NO2).mean(axis=1)
data_processed = pd.concat([data, PM2_mean,PM10_mean,O3_mean,NO2_mean],axis=1)
return data_processed |
from random import randint, seed, choice
# 🚨 Don't change the code below 👇
test_seed = int(input("Create a seed number: "))
seed(test_seed)
# Split string method
namesAsCSV = input("Give me everybody's names, seperated by a comma.\n")
names = namesAsCSV.split(", ")
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
# Solution 1:
random_person = randint(0, len(names) -1)
person_paying = names[random_person]
print(f"{person_paying} is the person paying the bill.")
print("")
# Solution 2:
paying_person = choice(names)
print(f"{paying_person} is the person paying the bill.")
|
from PyQt5.QtWidgets import *
from src.UI.item_widget import ItemWidget
from src.item import Item
class TodayTasks(QWidget):
def __init__(self, userList):
QWidget.__init__(self)
self.userList = userList
self.itemIndex = {}
self.index = 0
self.initUI()
def initUI(self):
layout = QVBoxLayout()
# adding title
todayTitle = QLabel("Today's Tasks")
layout.addWidget(todayTitle)
# adding lists
self.todayList = QListWidget()
self.addItems(self.todayList)
layout.addWidget(self.todayList)
# adding buttons
buttons = QFrame()
buttonsLayout = QHBoxLayout()
self.addButtons(buttonsLayout)
buttons.setLayout(buttonsLayout)
layout.addWidget(buttons)
self.setLayout(layout)
def addItems(self, listWidget):
for item in self.userList.getSorted():
newItem = ItemWidget(item)
listItem = QListWidgetItem(listWidget)
listWidget.addItem(listItem)
listItem.setSizeHint(newItem.sizeHint())
listWidget.setItemWidget(listItem, newItem)
listItem.setData(self.index, item)
self.itemIndex[str(listItem.text)] = self.index
self.index += 1
def addButtons(self, layout):
# finish selected task
finishTask = QPushButton("Done")
finishTask.clicked.connect(self.removeItem)
layout.addWidget(finishTask)
# edit task
editTask = QPushButton("Edit")
layout.addWidget(editTask)
def removeItem(self):
selected = self.todayList.selectedItems()
for item in selected:
print(item.text)
self.todayList.takeItem(self.todayList.row(item))
self.userList.remove(item.data(self.itemIndex[str(item.text)]))
|
from app import db
class Assets(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), unique=True)
model = db.Column(db.String(255))
osversion = db.Column(db.String(255))
assignee = db.Column(db.Integer, db.ForeignKey('users.id'))
def __init__(self, name, model, osversion, assignee):
self.name = name
self.model = model
self.osversion = osversion
self.assignee = assignee |
"""
You are given a binary tree. You need to write a function that can determine if
it is a valid binary search tree.
The rules for a valid binary search tree are:
- The node's left subtree only contains nodes with values less than the node's
value.
- The node's right subtree only contains nodes with values greater than the
node's value.
- Both the left and right subtrees must also be valid binary search trees.
Example 1:
Input:
5
/ \
3 7
Output: True
Example 2:
Input:
10
/ \
2 8
/ \
6 12
Output: False
Explanation: The root node's value is 10 but its right child's value is 8.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, value=0, left=None, right=None):
self.value = value
self.left = left
self.right = right
def is_valid_BST(root):
leftValid = True
rightValid = True
if root.left is not None:
leftValid = is_valid_BST(root.left)
if root.value < root.left.value:
return False
if root.right is not None:
rightValid = is_valid_BST(root.right)
if root.value > root.right.value:
return False
if leftValid == True and rightValid == True:
return True
else:
return False
rootValidTree = TreeNode(5)
rootValidTree.left = TreeNode(3)
rootValidTree.right = TreeNode(7)
rootInvalidTree = TreeNode(10)
rootInvalidTree.left = TreeNode(2)
rootInvalidTree.right = TreeNode(8)
rootInvalidTree.right.left = TreeNode(6)
rootInvalidTree.right.right = TreeNode(12)
invalidTree2 = TreeNode(20)
invalidTree2.left = TreeNode(10)
invalidTree2.left.right = TreeNode(5)
invalidTree2.right = TreeNode(30)
print(f'1st tree is: {is_valid_BST(rootValidTree)}')
print(f'2nd tree is: {is_valid_BST(rootInvalidTree)}')
print(f'3rd tree is: {is_valid_BST(invalidTree2)}')
|
import unittest
class GridBoxAreaTests(unittest.TestCase):
def test_gridboxarea(self):
pass |
sum([x for x in range(0,1000) if (x%3==0 or x%5==0)])
|
# coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
"""
Load test methods from each file in the same directory as this file,
that start with 'test_', and have 'Test' in their class name
"""
import os
import importlib
import inspect
for testfile in os.listdir(os.path.dirname(__file__)):
if testfile.startswith('test_'):
try:
module_name, _ = os.path.splitext(testfile)
module = 'bakery_lint.tests.upstream.%s' % module_name
module = importlib.import_module(module)
for name, obj in inspect.getmembers(module):
if 'Test' in name:
exec 'from bakery_lint.tests.upstream.%s import %s' % (module_name, name)
except (ImportError, AttributeError, IndexError) as ex:
pass
|
from django.contrib.auth.decorators import permission_required
def researcher_permission_required():
"""
Custome decorator for researcher with a login url set based on this application
"""
return permission_required('research.is_researcher', login_url="/login/")
|
from extensions import db
from flask import Blueprint, make_response, jsonify, request
from models import User, Activity, Action, Book, Review, Achievement
from follow.routes import get_followed_users
from sqlalchemy.sql import label, func
import datetime
activity = Blueprint('activity', __name__)
@activity.route("/api/v1.0/activity/<string:user_id>", methods=["GET"])
def get_all_activity_by_user(user_id):
data_to_return = []
activities = db.session.query(Action.description, Activity.date_created, Activity.target_id, Activity.action_id, Activity.object_id, User.user_id, User.full_name).join(User, Activity.user_id==User.user_id).join(Action, Activity.action_id==Action.id).filter(Activity.user_id==user_id)
for activity in activities:
if activity.action_id == 1:
target = db.session.query(Book.title, Book.author, Book.ISBN, Review.id, Review.rating, Review.text).join(Review, Book.book_id==Review.book_id).filter(Book.book_id==activity.target_id).first()
elif activity.action_id in(2, 3, 4):
target = db.session.query(Book.title, Book.author, Book.ISBN).filter(Book.book_id==activity.target_id).first()
elif activity.action_id == 5:
target = db.session.query(Book.title, Book.author, Book.ISBN, Review.id, Review.rating, Review.text, User.user_id, User.full_name).join(Book, Review.book_id==Book.book_id).join(User, User.user_id==Review.reviewer_id).filter(Review.id==activity.target_id).first()
elif activity.action_id == 6:
target = db.session.query(User.user_id, User.full_name).filter(User.user_id==activity.target_id).first()
elif activity.action_id == 7:
target = db.session.query(Achievement.id, Achievement.name, Achievement.description, Achievement.badge).filter(Achievement.id==activity.target_id).first()
act = {"user_id" : activity.user_id, "user" : activity.full_name, "action" : activity.description, "target" : target, "date_created" : activity.date_created}
data_to_return.append(act)
if data_to_return:
return make_response(jsonify(data_to_return), 200)
else:
return make_response(jsonify({"error" : "No activities found"}), 404)
@activity.route("/api/v1.0/activity/followedby/<string:user_id>", methods=["GET"])
def get_activity_followed_users(user_id):
data_to_return = []
followed = get_followed_users(user_id)
activities = db.session.query(Action.description, Activity.id, Activity.date_created, Activity.target_id, Activity.action_id, Activity.object_id, User.user_id, User.full_name, User.image).join(User, Activity.user_id==User.user_id).join(Action, Activity.action_id==Action.id).filter(Activity.user_id.in_([(f['user_id']) for f in followed])).order_by(Activity.date_created.desc()).all()
for activity in activities:
likes = get_like_count(7, activity.id)
target = ""
if activity.action_id == 1:
target = db.session.query(Book.title, Book.author, Book.ISBN, Book.image_link, Review.id, Review.rating, Review.text).join(Review, Book.book_id==Review.book_id).filter(Book.book_id==activity.target_id).first()
elif activity.action_id in(2, 3, 4):
target = db.session.query(Book.title, Book.author, Book.ISBN, Book.image_link).filter(Book.book_id==activity.target_id).first()
elif activity.action_id == 5:
if activity.object_id == 2:
target = db.session.query(Book.title, Book.author, Book.ISBN, Book.image_link, Review.id, Review.rating, Review.text, User.user_id, User.full_name).join(Book, Review.book_id==Book.book_id).join(User, User.user_id==Review.reviewer_id).filter(Review.id==activity.target_id).first()
if activity.object_id == 3:
target = db.session.query(Comment.comment_id, Comment.commenter_id, Comment.text, Comment.time_submitted, User.user_id, User.full_name).join(User, Comment.commenter_id==User.user_id).filter(Comment.comment_id==activity.target_id).first()
if activity.object_id == 4:
target = db.session.query(Achievement.id, Achievement.name, Achievement.description, Achievement.badge).filter(Achievement.id==activity.target_id).first()
# if activity.object_id == 7:
# activity_user = db.session.query(Activity.user_id).filter(Activity.id==activity.target_id).first()
# target = db.session.query(User.full_name, User.user_id, User.image).filter(User.user_id==activity_user).first()
elif activity.action_id == 6:
target = db.session.query(User.user_id, User.full_name, User.image).filter(User.user_id==activity.target_id).first()
elif activity.action_id == 7:
target = db.session.query(Achievement.id, Achievement.name, Achievement.description, Achievement.badge).filter(Achievement.id==activity.target_id).first()
# return all updates with exception of 'User X liked User Y's update'
# target not None ensures liking now deleted reviews does not appear on feed
if not activity.object_id == 7 and not target == None:
act = {"activity_id" : activity.id, "user_id" : activity.user_id, "user" : activity.full_name, "user_image" : activity.image, "action" : activity.description, "object_id" : activity.object_id, "target" : target, "date_created" : activity.date_created, "likes" : likes}
data_to_return.append(act)
# act = {"activity_id" : activity.id, "user_id" : activity.user_id, "user" : activity.full_name, "user_image" : activity.image, "action" : activity.description, "object_id" : activity.object_id, "target" : target, "date_created" : activity.date_created, "likes" : likes}
# data_to_return.append(act)
if data_to_return:
return make_response(jsonify(data_to_return), 200)
else:
return make_response(jsonify({"error" : "No activities found"}), 404)
@activity.route("/api/v1.0/<string:user_id>/likes", methods=["POST"])
def add_like(user_id):
if request.args.get('objectID') and request.args.get('targetID'):
object_id = request.args.get('objectID')
target_id = request.args.get('targetID')
# add constants file and have LIKE_ID as 5 instead of hard coding like this
liked = db.session.query(Activity).filter(Activity.user_id==user_id, Activity.action_id==5, Activity.target_id==target_id).scalar() is not None
if not liked:
db.session.add(Activity(user_id=user_id, action_id=5, object_id=object_id, date_created=datetime.datetime.now(), target_id=target_id))
db.session.commit()
return make_response(jsonify({"success" : "Added like"}), 201)
else:
return make_response(jsonify({"error" : "Failed to add like"}), 404)
else:
return make_response(jsonify({"error" : "Failed to add like"}), 404)
@activity.route("/api/v1.0/likes", methods=["GET"])
def get_like_count():
if request.args.get('objectID') and request.args.get('targetID'):
object_id = request.args.get('objectID')
target_id = request.args.get('targetID')
num_likes = db.session.query(label('num_likes', func.count(Activity.id))).filter(Activity.action_id==5, Activity.object_id==object_id,Activity.target_id==target_id).all()
return make_response(jsonify(num_likes), 200)
else:
return make_response(jsonify({"error" : "Failed to return like count"}), 404)
def get_like_count(object_id, target_id):
likes = db.session.query(label('count', func.count(Activity.id))).filter(Activity.action_id==5, Activity.object_id==object_id,Activity.target_id==target_id).all()
return likes[0]
|
while True:
try:
number = input("Enter a number: ")
except NameError:
pass
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Organization)
admin.site.register(Project)
admin.site.register(Team)
admin.site.register(ProjectTeam)
admin.site.register(Person)
admin.site.register(TeamMember)
admin.site.register(OrganizationalRole)
admin.site.register(TeamMembership)
admin.site.register(TeamPurpose)
|
from datetime import datetime
import os
import sqlite3
import pandas as pd
import boto3
s3 = boto3.client("s3")
COVID_BASE_DATE = datetime(2019, 12, 31)
DATA_PATH = "M:\Documents\@Projects\Covid_consolidate\output"
os.chdir(DATA_PATH)
list_of_files = os.listdir(DATA_PATH)
STANDARD_COL = [
"incidence",
"notifications",
"hospital_occupancy",
"icu_occupancy",
"accum_deaths",
"infection_deaths",
]
AGE_GROUPS = list(range(0, 80, 5))
phl = {
"region": [
"calabarzon",
"central-visayas",
"davao-city",
"davao-region",
"manila",
"philippines",
],
"columns": STANDARD_COL,
}
mys = {
"region": ["selangor", "penang", "malaysia", "kuala-lumpur", "johor"],
"columns": STANDARD_COL,
}
lka = {"region": ["sri_lanka"], "columns": STANDARD_COL}
VIC_CLUSTERS = [
"BARWON_SOUTH_WEST",
"GIPPSLAND",
"GRAMPIANS",
"HUME",
"LODDON_MALLEE",
"NORTH_METRO",
"SOUTH_EAST_METRO",
"SOUTH_METRO",
"WEST_METRO",
]
VIC_OUTPUT = [
"hospital_occupancy",
"icu_occupancy",
"hospital_admissions",
"icu_admissions",
"infection_deaths",
"notifications",
]
VIC_REQUEST = [
f"{output}_for_cluster_{cluster.lower()}" for output in VIC_OUTPUT for cluster in VIC_CLUSTERS
]
vic = {
"region": ["victoria"],
"columns": VIC_REQUEST
+ [
"notifications",
"hospital_occupancy",
"icu_occupancy",
"hospital_admissions",
"icu_admissions",
"infection_deaths",
],
}
npl_incidence_col = [f"incidenceXagegroup_{each_age}" for each_age in AGE_GROUPS]
npl = {"region": ["nepal"], "columns": STANDARD_COL + npl_incidence_col}
def upload_csv(country_list):
for ctry in country_list:
s3.upload_file(
f"{ctry}_data.csv", "autumn-files", f"{ctry}_data.csv", ExtraArgs={"ACL": "public-read"}
)
os.remove(f"{ctry}_data.csv")
def get_files(country):
return {
region: os.path.join(DATA_PATH, each)
for region in country["region"]
for each in list_of_files
if region in each
}
phl["region"] = get_files(phl)
mys["region"] = get_files(mys)
lka["region"] = get_files(lka)
# npl["region"] = get_files(npl)
# vic["region"] = get_files(vic)
country = {
"lka": lka,
"phl": phl,
"mys": mys,
} # "npl": npl, "vic": vic}
for ctry in country:
df_mle = pd.DataFrame()
df_un = pd.DataFrame()
query_do = (
"SELECT scenario, times, "
+ "".join({each + ", " for each in country[ctry]["columns"]})[:-2]
+ " FROM derived_outputs;"
)
query_un = (
"SELECT scenario,time,type,quantile, value FROM uncertainty WHERE type in ("
+ "".join({"'" + each + "', " for each in country[ctry]["columns"]})[:-2]
+ ");"
)
for app_name in country[ctry]["region"]:
reg_file = country[ctry]["region"][app_name]
conn = sqlite3.connect(reg_file)
if df_mle.empty:
df_mle = pd.read_sql_query(query_do, conn)
df_mle["Region"] = app_name
else:
df_temp = pd.read_sql_query(query_do, conn)
df_temp["Region"] = app_name
df_mle = df_mle.append(df_temp)
if df_un.empty:
df_un = pd.read_sql_query(query_un, conn)
df_un["Region"] = app_name
else:
df_temp = pd.read_sql_query(query_un, conn)
df_temp["Region"] = app_name
df_un = df_un.append(df_temp)
df_un["type"] = df_un["type"] + "_P" + df_un["quantile"].astype(str)
df_un = pd.pivot_table(
df_un, values="value", index=["Region", "time", "scenario"], columns=["type"]
)
df_un.reset_index(inplace=True)
df = df_mle.merge(
df_un,
how="outer",
left_on=["Region", "scenario", "times"],
right_on=["Region", "scenario", "time"],
suffixes=("_mle", "_un"),
)
df["Date"] = pd.to_timedelta(df.times, unit="days") + (COVID_BASE_DATE)
df.rename(
columns={
"hospital_occupancy": "hospital_occupancy_mle",
"infection_deaths": "infection_deaths_mle",
},
inplace=True,
)
col_set1 = ["Region", "scenario", "Date", "times", "time"]
col_set2 = [col for col in list(df.columns) if col not in col_set1]
col_set2.sort()
col_set1 = col_set1[:-1]
df = df[col_set1 + col_set2]
df.to_csv(f"{ctry}_data.csv")
upload_csv(["lka", "phl", "mys"])
|
# implementação do método iterativo SOR para o EP3.
import matplotlib.pyplot as plt # Para plotar gráficos
import math # Para trabalhar com funções matemáticas
import numpy as np # Para trabalhar com matrizes
import random # Para gerar números aleatórios
def matriz_temperatura(n, t0):
'''Esta função recebe o valor de n e um chute inicial e cria a matriz
temperatura inicial de dimensão n+1'''
T = np.zeros((n+1, n+1))
for i in range(n+1): # criando as bordas
T[0,i] = -3
T[i,0] = -3
for i in range(n+1):
T[n, i] = 6 * (i/n) - 3
T[i, n] = 6 * (i/n) - 3
for i in range(1,n): # criando o centro da matriz com o valor inicial
T[i, 1:n] = t0[(n-1)*(i-1) : (n-1)*(i-1) + n-1]
return T
def iteração_SOR(w, n, t0):
'''Esta função recebe ômega, o valor de n e o chute inicial
e realiza uma iteração SOR'''
T = matriz_temperatura(n, t0) # chamando a função que cria a matriz temperatura
sol_aprox = []
for i in range(1, n):
for j in range(1, n):
T[i,j] =(1-w) * T[i,j] + (w/4) * (T[i-1,j] + T[i, j-1] + T[i,j+1] + T[i+1, j]) # Iteração SOR utilizando a equação 1 dada pelo EP
sol_aprox.append(T[i,j])
return sol_aprox
# Perceba que as iterações do SOR podem ser otimizadas pelo formato da matriz T, por isso que a criei
def erro_infi(v_a, v_b):
'''Esta função recebe dois vetores e calcula o erro sobre a norma infinita dos mesmos'''
v_a = np.array(v_a) # Convertendo para array tipo numpy///
v_b = np.array(v_b)
v = np.abs(v_a - v_b) # calculando o módulo do vetor diferença
return max(v)
def método_SOR(w, t0 , n, tol , max_iter = 600):
'''Esta função recebe como parâmetros: ômega, o chute inicial, o valor de n, a tolerância para as iterações e a qtd de iterações
máximas e retorna a quantidade de iterações que é preciso para atingir a tolerância e a solução aproximada'''
sol_ant = t0
no_pause = True # indicador de passagem para sair do laço
k = 1 # Para contar a qtd de iterações
while no_pause and k<= max_iter:
sol_aprox_k = iteração_SOR(w, n, sol_ant) # iterando com ômega, n e o chute inicial
if erro_infi(sol_aprox_k, sol_ant) / erro_infi(sol_aprox_k, t0) < tol: #critério de parada
no_pause = False # indicando a saída do laço
sol_ant = sol_aprox_k
k +=1
return k-1 , sol_aprox_k # Perceba que também retorna a solução aproximada
def comp_omega(n, t0):
'''Esta função recebe o valor de "n" e o valor inicial e compara os valores de ômega a serem implementados no método SOR
como é solicitado pelo EP com uma tolerância de convergência de 0.001, retornando uma lista com a qtd de iterações em cada ômega'''
i = 0
lista_iter = []
while i <= 100:
qtd_iter = método_SOR(1 + i/100, t0 , n, tol = 0.001)[0] # Armazenando a quantidade de iterações necessárias
lista_iter.append(qtd_iter)
i += 1
return lista_iter
def elemento_minimo(lista):
'''Esta função recebe um array e retorna a posição do elemento mínimo deste array e o elemento mínimo'''
for i in range(len(lista)):
if lista[i] == min(lista):
return i, lista[i]
# Esta parte do código é para analisar os casos de comparção do ômega com chute inicial nulo.
#t0 = np.zeros((511**2)) # Determinando o chute inicial nulo de dimensão (n-1)^2
#ou
t0 = [] # Determinando o chute inicial aleatório de dimensão (n-1)^2
for i in range(511**2):
t0.append(random.random())
lista_iterações = comp_omega(512, t0) # Gerando uma lista de iterações com o valor de n e o chute inicial (comp_omega(n, t0)) para cada ômega entre (1,2)
print(lista_iterações) # Imprime a lista de iterações
print(elemento_minimo(lista_iterações)) # Imprime a posição que possui "i" do ômega que gerou a menor qtd de iterações e a esta qtd de iterações
# Para determinar ômega basta fazer 1 + i/100
# Esta parte do código é para plotar um gráfico comparando os valores de ômega com a qtd de iterações
lista_omegas = [] # Gerando uma lista com os valores de ômega
for i in range(101):
lista_omegas.append(1 + i/100)
plt.plot(lista_omegas, lista_iterações) # Plotando o gráfico com a lista de ômegas e qtd de iterações necessárias para atingir a tolerância
plt.title('Para n = 512, chute inicial aleatório, tol = 0.001') # Legendas do gráfico
plt.xlabel('valores de omega')
plt.ylabel('qtd de iterações')
plt.show() |
from lxml import etree
import vobject
hcard = """<div class="vcard">
<a class="fn org url" href="http://www.commerce.net/">CommerceNet</a>
<div class="adr">
<span class="type">Work</span>:
<div class="street-address">169 University Avenue</div>
<span class="locality">Palo Alto</span>,
<abbr class="region" title="California">CA</abbr>
<span class="postal-code">94301</span>
<div class="country-name">USA</div>
</div>
<div class="tel">
<span class="type">Work</span> +1-650-289-4040
</div>
<div class="tel">
<span class="type">Fax</span> +1-650-289-4041
</div>
<div>Email:
<span class="email">info@commerce.net</span>
</div>
</div>"""
hcard2 = """<td class="thumb vcard author">
<a href="http://twitter.com/aconbere" class="url"><img alt="Anders" class="photo fn" id="profile-image" src="http://s3.amazonaws.com/twitter_production/profile_images/19791522/anders_normal.jpg" /></a>
</td>"""
html = etree.HTML('./hcard.html')
xsl_doc = etree.parse('./xhtml2vcard.xsl')
transform = etree.XSLT(xsl_doc)
vcard = transform(html)
v = vobject.readOne(str(vcard))
print v
|
import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import pandas as pd
import xlrd
def namesAndEmails():
loc = "pathToAlumniListCSV"
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(4)
master = []
for i in range(1,5746):
if (sheet.cell_value(i,2) != ""):
master.append([sheet.cell_value(i,0),sheet.cell_value(i,1),sheet.cell_value(i,2)])
return master
#initialize browser
driver = webdriver.ChromeOptions()
driver.add_argument("- incognito")
browser = webdriver.Chrome(executable_path='C:/bin/chromedriver.exe', options=driver)
#getting strings to search bing
nameList = namesAndEmails()
person = []
'site:linkedin.com/in/ AND "york university" AND "Tuan Dau"'
for i in nameList:
person.append( 'site:linkedin.com/in/ AND "york university" AND "'+i[0]+" "+i[1]+'"')
linkedin_url_master=[]
#go to bing to get urls
for i in person:
url = "https://www.bing.com/"
browser.get(url)
browser.find_element_by_xpath('//*[@id="sb_form_q"]').send_keys(i)
time.sleep(0.001)
browser.find_element_by_xpath('//*[@id="sb_form_q"]').send_keys(Keys.RETURN)
time.sleep(0.001)
linkedin_urls = browser.find_elements_by_tag_name('cite')
linkedin_urls = [url.text for url in linkedin_urls]
if (linkedin_urls!=[]):
linkedin_url_master.append(linkedin_urls[0])
print (linkedin_urls[0])
df_urls = pd.DataFrame(linkedin_url_master)
df_urls.to_csv(r'savePathtoCSV')
|
def solution(tickets) :
vertexList = set()
adjacencyList = {}
visitedVertex = []
stack = []
for vertex in tickets:
for i in range(2):
vertexList.add(vertex[i])
if vertex[0] == 'ICN' :
stack.append('ICN')
edgeList = tickets[:]
for vertex in vertexList:
adjacencyList[vertex] = set()
for edge in edgeList:
adjacencyList[edge[0]].add(edge[1])
print(adjacencyList)
while stack :
print('stack = ' , stack)
current = stack.pop()
print('current = ', current)
for neighbor in adjacencyList[current] :
print('neighbor = ', neighbor)
if not neighbor in visitedVertex :
stack.append(neighbor)
print('stack = ', stack)
visitedVertex.append(current)
print('visitedVertex = ', visitedVertex)
return visitedVertex
#tickets = [['ICN', 'JFK'], ['HND', 'IAD'], ['JFK', 'HND']]
tickets = [['ICN', 'SFO'], ['ICN', 'ATL'], ['SFO', 'ATL'], ['ATL', 'ICN'], ['ATL','SFO']]
print(solution(tickets)) |
from __future__ import print_function
import time
from pyinstrument import Profiler
# Utilities #
def do_nothing():
pass
def busy_wait(duration):
end_time = time.time() + duration
while time.time() < end_time:
do_nothing()
def long_function_a():
time.sleep(0.25)
def long_function_b():
time.sleep(0.5)
# Tests #
def test_aggregator_collapses_multiple_calls():
profiler = Profiler()
profiler.start()
long_function_a()
long_function_b()
long_function_a()
long_function_b()
profiler.stop()
print(profiler.output_text())
frame = profiler.first_interesting_frame()
assert frame.function == 'test_aggregator_collapses_multiple_calls'
assert len(frame.children) == 2
def test_timeline_retains_multiple_calls():
profiler = Profiler(recorder='timeline')
profiler.start()
long_function_a()
long_function_b()
long_function_a()
long_function_b()
profiler.stop()
print(profiler.output_text())
frame = profiler.first_interesting_frame()
assert frame.function == 'test_timeline_retains_multiple_calls'
assert len(frame.children) == 4
def test_two_functions():
profiler = Profiler()
profiler.start()
long_function_a()
long_function_b()
profiler.stop()
print(profiler.output_text())
frame = profiler.first_interesting_frame()
assert frame.function == 'test_two_functions'
assert len(frame.children) == 2
frame_b, frame_a = frame.children
assert frame_a.function == 'long_function_a'
assert frame_b.function == 'long_function_b'
assert 0.2 < frame_a.time() < 0.3
assert 0.45 < frame_b.time() < 0.55
def test_context_manager():
with Profiler() as profiler:
long_function_a()
long_function_b()
frame = profiler.first_interesting_frame()
assert frame.function == 'test_context_manager'
assert len(frame.children) == 2
|
import re
import threading
import BasicLogger
import numpy as np
import Configs
from Searcher import Search
def get_pattern(c, matrix):
pattern = r'\b'
for i in c:
if ord(matrix[i[0]][i[1]]) == 32 or ord(matrix[i[0]][i[1]]) == 36:
pattern += r'[a-zA-Z]'
else:
pattern += matrix[i[0]][i[1]]
pattern += r'\b'
return pattern
def compare_matrix(size, a, b):
for i in range(size):
for j in range(size):
if a[i][j] != b[i][j]:
return False
return True
class Evaluator:
across_freq = [dict() for x in range(5)]
down_freq = [dict() for x in range(5)]
feasible_matrices = []
max_sum = 0
answer_limit = 2
def __init__(self, puzzle):
self.puzzle = puzzle
self.matrix = np.array([['$', '$', '$', '$', '$'],
['$', '$', '$', '$', '$'],
['$', '$', '$', '$', '$'],
['$', '$', '$', '$', '$'],
['$', '$', '$', '$', '$']])
for i in range(5):
for j in range(5):
self.matrix[i][j] = chr(self.puzzle.puzzleMatrix[i][j])
def search(self, across_hints=[None for x in range(5)], down_hints=[None for x in range(5)]):
print(across_hints, "\n---", down_hints)
for i in range(5):
if across_hints[i] is not None:
self.across_freq[i] = across_hints[i]
if len(self.across_freq[i]) == 1:
c_arr = self.puzzle.get_coordinates(True, i)
for freq in self.across_freq[i]:
# Put word to row
for char_index in range(len(freq)):
if self.matrix[c_arr[char_index][0]][c_arr[char_index][1]] != '$':
self.matrix[c_arr[char_index][0]][c_arr[char_index][1]] = freq[char_index]
break
if down_hints[i] is not None:
self.down_freq[i] = down_hints[i]
if len(self.down_freq[i]) == 1:
c_arr = self.puzzle.get_coordinates(False, i)
for freq in self.down_freq[i]:
# Put word to row
for char_index in range(len(freq)):
if self.matrix[c_arr[char_index][0]][c_arr[char_index][1]] != '$':
self.matrix[c_arr[char_index][0]][c_arr[char_index][1]] = freq[char_index]
break
threads = []
for m in range(5):
if Configs.get_setting('METHOD', 'internet') == '1' and across_hints[m] is None:
t = threading.Thread(target=self.basic_search_thread, args=(True, m,))
t.start()
threads.append(t)
elif Configs.get_setting('METHOD', 'hint') == '1':
self.across_freq[m] = across_hints[m]
for m in range(5):
if Configs.get_setting('METHOD', 'internet') == '1' and down_hints[m] is None:
t = threading.Thread(target=self.basic_search_thread, args=(False, m,))
t.start()
threads.append(t)
elif Configs.get_setting('METHOD', 'hint') == '1':
self.down_freq[m] = down_hints[m]
for t in threads:
t.join()
#################
# SORT
for i in range(5):
temp_freq = dict()
from collections import OrderedDict
for key_i, value_i in OrderedDict(
sorted(self.across_freq[i].items(), key=lambda t: t[1], reverse=True)).items():
temp_freq[key_i] = value_i
self.across_freq[i] = temp_freq
temp_freq = dict()
from collections import OrderedDict
for key_i, value_i in OrderedDict(
sorted(self.down_freq[i].items(), key=lambda t: t[1], reverse=True)).items():
temp_freq[key_i] = value_i
self.down_freq[i] = temp_freq
if Configs.get_setting('DEBUG', 'print_freq') == '1':
BasicLogger.log("Threads finished")
print("Threads finished")
def try_freq(self):
for i in range(5):
if len(self.across_freq[i]) == 1:
c_arr = self.puzzle.get_coordinates(True, i)
for freq in self.across_freq[i]:
# Put word to row
for char_index in range(len(freq)):
if self.matrix[c_arr[char_index][0]][c_arr[char_index][1]] != '$':
self.matrix[c_arr[char_index][0]][c_arr[char_index][1]] = freq[char_index]
break
if len(self.down_freq[i]) == 1:
c_arr = self.puzzle.get_coordinates(False, i)
for freq in self.down_freq[i]:
# Put word to row
for char_index in range(len(freq)):
if self.matrix[c_arr[char_index][0]][c_arr[char_index][1]] != '$':
self.matrix[c_arr[char_index][0]][c_arr[char_index][1]] = freq[char_index]
break
temp_sum = 0
for x in range(5):
for y in range(5):
if ord(self.matrix[x][y]) != 32 and ord(self.matrix[x][y]) != 36:
temp_sum += 1
for i in range(5):
if '_' in self.puzzle.acrossQuestions[i]['clue']:
return self.try_across(self.matrix, i)
if '_' in self.puzzle.acrossQuestions[i]['clue']:
return self.try_down(self.matrix, i)
return self.try_across(self.matrix, 0)
def try_across(self, matrix, i):
c_arr = self.puzzle.get_coordinates(True, i)
# Create pattern
temp_str = r'\b'
for c in c_arr:
if ord(matrix[c[0]][c[1]]) == 32 or ord(matrix[c[0]][c[1]]) == 36:
temp_str += r'[a-zA-Z]'
else:
temp_str += "[" + matrix[c[0]][c[1]] + "]"
temp_str += r'\b'
pattern = re.compile(temp_str)
# Try all words eventually
for freq_array in self.across_freq[i]:
if pattern.match(freq_array): # Check pattern
if Configs.get_setting('TRY', 'try_show_words') == '1':
BasicLogger.log(str(i) + " across: freq_array " + freq_array)
print(str(i) + " across: freq_array " + freq_array)
# Put word to row
for char_index in range(len(freq_array)):
if matrix[c_arr[char_index][0]][c_arr[char_index][1]] != '$':
matrix[c_arr[char_index][0]][c_arr[char_index][1]] = freq_array[char_index]
temp_sum = 0
for x in range(5):
for y in range(5):
if ord(matrix[x][y]) != 32 and ord(matrix[x][y]) != 36:
temp_sum += 1
if self.max_sum < temp_sum:
self.max_sum = temp_sum
self.feasible_matrices = []
if self.max_sum == temp_sum:
ck = True
for a in self.feasible_matrices:
if compare_matrix(5, a, matrix):
ck = False
break
if ck:
self.feasible_matrices.append(matrix)
if Configs.get_setting('TRY', 'try_show_feasible_matrix') == '1':
BasicLogger.log(str(i) + " across feasible " + str(temp_sum) + ":\n" + str(matrix))
print(i, "across feasible", temp_sum, ":\n", matrix)
if temp_sum == self.puzzle.char_count:
if self.answer_limit < len(self.feasible_matrices):
return True
if Configs.get_setting('TRY', 'try_show_matrices') == '1':
BasicLogger.log(str(i) + " across matrix:\n" + str(matrix))
print(i, "across matrix:\n", matrix)
# Continue recursively
sorted_c_arr = []
for c in c_arr:
index = 0
for j in range(len(sorted_c_arr)):
if '_' in self.puzzle.downQuestions[c[1]]['clue']:
index = j
break
sorted_c_arr.insert(index, c)
check = True
for j in sorted_c_arr:
for k in range(5):
if ord(matrix[k][j[1]]) == 32:
check = False
if self.try_down(matrix.copy(), j[1]):
return True
elif self.try_across(matrix.copy(), k):
return True
if check:
if self.answer_limit < len(self.feasible_matrices):
return True
return False
def try_down(self, matrix, j):
c_arr = self.puzzle.get_coordinates(False, j)
# Create pattern
temp_str = r'\b'
for c in c_arr:
if ord(matrix[c[0]][j]) == 32 or ord(matrix[c[0]][j]) == 36:
temp_str += r'[a-zA-Z]'
else:
temp_str += "[" + matrix[c[0]][c[1]] + "]"
temp_str += r'\b'
pattern = re.compile(temp_str)
# Try all words eventually
for freq_array in self.down_freq[j]:
if pattern.match(freq_array): # Check pattern
if Configs.get_setting('TRY', 'try_show_words') == '1':
BasicLogger.log(str(j) + " down: freq_array " + freq_array)
print(j, "down: freq_array", freq_array)
# Put word to col
for char_index in range(len(freq_array)):
if matrix[c_arr[char_index][0]][c_arr[char_index][1]] != '$':
matrix[c_arr[char_index][0]][c_arr[char_index][1]] = freq_array[char_index]
temp_sum = 0
for x in range(5):
for y in range(5):
if ord(matrix[x][y]) != 32 and ord(matrix[x][y]) != 36:
temp_sum += 1
if self.max_sum < temp_sum:
self.max_sum = temp_sum
self.feasible_matrices = []
if self.max_sum == temp_sum:
ck = True
for a in self.feasible_matrices:
if compare_matrix(5, a, matrix):
ck = False
break
if ck:
self.feasible_matrices.append(matrix)
if Configs.get_setting('TRY', 'try_show_feasible_matrix') == '1':
BasicLogger.log(str(j) + " down feasible " + str(temp_sum) + ":\n" + str(matrix))
print(j, "down feasible", temp_sum, ":\n", matrix)
if temp_sum == self.puzzle.char_count:
if self.answer_limit < len(self.feasible_matrices):
return True
if Configs.get_setting('TRY', 'try_show_matrices') == '1':
BasicLogger.log(str(j) + " down: matrix\n" + str(matrix))
print(j, "down: matrix\n", matrix)
# Continue recursively
sorted_c_arr = []
index = 0
for c in c_arr:
j = 0
for j in range(len(sorted_c_arr)):
if '_' in self.puzzle.acrossQuestions[c[0]]['clue']:
index = j
break
sorted_c_arr.insert(index, c)
check = True
for i in sorted_c_arr:
for k in range(5):
if ord(matrix[i[0]][k]) == 32:
check = False
if self.try_across(matrix.copy(), i[0]):
return True
elif self.try_down(matrix.copy(), k):
return True
if check:
if self.answer_limit < len(self.feasible_matrices):
return True
return False
def basic_search_thread(self, is_across, index):
if Configs.get_setting('DEBUG', 'print_freq') == '1':
BasicLogger.log("Thread started " + str(is_across) + " " + str(index))
print("Thread started", is_across, index)
if is_across:
clue_text = self.puzzle.acrossQuestions[index]["clue"].strip()
else:
clue_text = self.puzzle.downQuestions[index]["clue"].strip()
if '_' in clue_text:
space_index = clue_text.index('_')
clue_text = re.sub('"', '', clue_text)
clue_text = re.sub('_', '', clue_text)
temp = re.sub('\(.*?\)', '', clue_text)
# clue_text = re.sub(r'\W+', ' ', clue_text)
c_arr = self.puzzle.get_coordinates(is_across, index)
pattern = r''
for c in c_arr:
if self.matrix[c[0]][c[1]] == chr(32) \
or self.matrix[c[0]][c[1]] == chr(36):
pattern += r'[a-zA-Z]'
else:
pattern += "[" + self.matrix[c[0]][c[1]] + "]"
header = temp[:space_index].strip()
if 0 < len(header):
header = header + " "
footer = temp[space_index:].strip()
if 0 < len(footer):
footer = " " + footer
pattern = r'\b' + header + "(" + pattern + ")" + footer + r'\b'
else:
c_arr = self.puzzle.get_coordinates(is_across, index)
pattern = r'\b('
for c in c_arr:
if self.matrix[c[0]][c[1]] == chr(32) \
or self.matrix[c[0]][c[1]] == chr(36):
pattern += r'[a-zA-Z]'
else:
pattern += "[" + self.matrix[c[0]][c[1]] + "]"
pattern += r')[\.]?\b'
# pattern = r'\b('
# for i in range(char_count):
# pattern += r'[a-zA-Z]'
# pattern += r')\b'
s = Search(clue_text, pattern=pattern)
for urls in s.googleSearch():
try:
s.searchPage(urls)
except Exception as ssl:
print(ssl)
from collections import OrderedDict
count = 1
sumfreq = 0
for key in s.word_freq:
if 1 < s.word_freq[key]:
count += 1
sumfreq += s.word_freq[key]
avg = sumfreq / count
if is_across:
# Sort
for key_i, value_i in OrderedDict(
sorted(s.word_freq.items(), key=lambda t: t[1], reverse=True)).items():
if 1 < value_i and avg <= value_i:
self.across_freq[index][key_i] = value_i
# self.across_freq[index].update(temp_freq)
else:
# Sort
for key_i, value_i in OrderedDict(
sorted(s.word_freq.items(), key=lambda t: t[1], reverse=True)).items():
if 1 < value_i and avg <= value_i:
self.down_freq[index][key_i] = value_i
# self.down_freq[index].update(temp_freq)
if Configs.get_setting('DEBUG', 'print_freq') == '1':
if is_across:
temp_str = str(avg) + "across " + str(self.puzzle.acrossQuestions[index]['index']) + "result: \n" + str(
self.across_freq[index])
else:
temp_str = str(avg) + "down " + str(self.puzzle.downQuestions[index]['index']) + "result: \n" + str(
self.down_freq[index])
BasicLogger.log(str(temp_str))
print(str(temp_str))
|
import pickle
import numpy as np
import PIL.Image
for i in range (5,13):
f = open('/run/media/rvolpi/data/renvision_data/P38_06_03_14_ret1/experiments/t0_modSmall_single_pca_cond_' + str(i) +'/crbm_tmp.pkl')
data = pickle.load(f)
W = data.params[0].get_value(borrow=True)
A = data.params[1].get_value(borrow=True)
B = data.params[2].get_value(borrow=True)
hbias = data.params[3].get_value(borrow=True)
vbias = data.params[4].get_value(borrow=True)
##data, seqlen, data_mean, data_std = load_data('motion.mat')
##data = data.get_value(borrow=True)
delay = data.delay
data = data.d
data_idx = np.array(range(delay,data.shape[0])).T
hist_idx = np.array([data_idx - n for n in xrange(1, delay + 1)]).T
history = data[hist_idx].reshape(data[hist_idx].shape[0],data[hist_idx].shape[1]*data[hist_idx].shape[2])
data = data[data_idx]
from scipy.stats import logistic
hiddenVals = logistic.cdf(np.dot(data,W) + np.dot(history,B) + hbias)
hiddenActivations = hiddenVals > 0.5
image = PIL.Image.fromarray(np.uint8(hiddenActivations[1:1000000,:]*255.))
image.save('/home/rvolpi/Scrivania/renvision/hiddenActivations_' + str(i) +'.png')
# Saving the objects:
with open('crbm_data_and_params.pickle', 'w') as f:
pickle.dump([data, hiddenVals, hiddenActivations], f)
##
### Getting back the objects:
##with open('objs.pickle') as f:
## obj0, obj1, obj2 = pickle.load(f)
print 'Finished.'
|
import time
a,b,c = 4,5,6
# 3 ta bir xil False False False
# agar istalgan 2 ta ozg qiymati bir xil bolsa natija False
# brortasi 0 ga teng bolsa True
bosh_vaqti = time.time()
for i in range(10000):
if a > 0:
if a == b == c :
print('False False False')
elif a==b or b==c or a==c:
print(False)
elif a==0 or b==0 or c==0:
print(True)
else:
print(True,True,True)
else:
if a==0:
print('a soni nolga teng')
else:
print('a soni manfiy')
oxir_vaqti = time.time()
print(oxir_vaqti - bosh_vaqti) |
from .models import Post, Reply
import datetime
'''
This function can be used to insert a main post in the forum.
It takes in three parameters, user, title and content.
user is an entry to a user (can be either a pet owner, a vet or a clinic) of the app.
No check of the user inside the function is provided so the validity of user has to be checked in advance.
title is the title of the post. Each post must have a title of length ranging from 1 to 128 characters.
If the title is too long or too short, a ValueError would be raised.
content is the post content, which is a string not longer than 4096 characters.
If the content is too long, a ValueError would be raised.
'''
def insert_post(user, title, content):
time = datetime.datetime.now()
if len(title) == 0:
raise ValueError('Title cannot be empty')
if len(title) > 128:
raise ValueError('Title is too long')
if len(content) > 4096:
raise ValueError('The content is too long')
Post.objects.create(user = user.phone_number, title = title, time = time, content = content)
'''
This function takes in a title of the post and returns a list of Post objects having the exactly same title.
If there is no such post, a false would be returned.
If multiple posts are found, they would be sorted in the order of posting time. (The most recent would appear first)
'''
def query_post_title(title):
q_set = Post.objects.filter(title = title).order_by('-time')
if len(q_set) == 0:
return False
return list(q_set)
'''
This function takes in a user object (can be of either type) and returns a list of Post objects posted by him or her.
If there is no such post, a false would be returned.
If multiple posts are found, they would be sorted in the order of posting time. (The most recent would appear first)
'''
def query_post_user(user):
q_set = Post.objects.filter(user = user.phone_number).order_by('-time')
if len(q_set) == 0:
return False
return list(q_set)
'''
This function is used to support the upvote or like function.
Two parameters, post and user are taken in, which are entries to Post and User objects respectively.
If this particular user has already upvoted this post, a PermissionError would be raised.
Nothing would be returned if the process is done successfully.
'''
def like_post(post, user):
if post.likes.filter(pk = user.phone_number.pk).count() > 0:
raise PermissionError('Already liked')
post.likes.add(user.phone_number)
post.save()
'''
This function is used to support the report function.
Two parameters, post and user are taken in, which are entries to Post and User objects respectively.
If this particular user has already reported this post, a PermissionError would be raised.
If the total number of reports of the particular post reaches 5 or above, including the just created one,
the function would return a tuple of current time and the entry to the post in order to notice the administrator.
A false would be returned otherwise.
'''
def report_post(post, user):
if post.reports.filter(pk = user.phone_number.pk).count() > 0:
raise PermissionError('Already reported')
post.reports.add(user.phone_number)
post.save()
if post.reports.all().count() >= 5:
return datetime.datetime.now(), post
return False
'''
This function can be used to insert a reply of an existing post or reply in the forum.
It takes in four parameters, user, target, thread, and content.
user is an entry to a user (can be either a pet owner, a vet or a clinic) of the app.
No check of the user inside the function is provided so the validity of user has to be checked in advance.
target is a boolean indicating whether the reply is made to a post or a reply.
If target is True, the reply is made to a post, otherwise, it is made to a reply.
post is an entry to an existing post or a reply.
No check of the post or reply inside the function is provided so the validity of post has to be checked in advance.
content is the reply content, which is a string not longer than 2048 characters.
If the content is too long, a ValueError would be raised.
'''
def insert_reply(user, target, thread, content):
if len(content) > 2048:
raise ValueError('The reply is too long')
time = datetime.datetime.now()
if target:
Reply.objects.create(user = user.phone_number, thread = thread, time = time, content = content)
else:
Reply.objects.create(user = user.phone_number, dependency = thread, time = time, content = content)
'''
This function takes two parameters and returns tuple of a list of reply objects and a list of booleans.
The first parameter is an entry of the queried thread and the second one is a boolean, indicating whether the thread is
a post or a reply.
If there is no such reply, a false would be returned.
If multiple replies are found, they would be sorted in the order of posting time. (The most recent would appear first)
The second returned list should have the same number of elements as the first one. The respective member indicates
whether the reply has replies.
'''
def query_reply(thread, target):
if target:
q_set = Reply.objects.filter(thread = thread).order_by('-time')
else:
q_set = Reply.objects.filter(dependency = thread).order_by('-time')
if len(q_set) == 0:
return False
ret1 = list(q_set)
ret2 = []
for entry in ret1:
q_set = Reply.objects.filter(dependency = entry)
if len(q_set) == 0:
ret2.append(False)
else:
ret2.append(True)
return ret1, ret2
'''
This function is used to support the upvote or like function.
Two parameters, reply and user are taken in, which are entries to Reply and User objects respectively.
If this particular user has already upvoted this reply, a PermissionError would be raised.
Nothing would be returned if the process is done successfully.
'''
def like_reply(reply, user):
if reply.likes.filter(pk = user.phone_number.pk).count() > 0:
raise PermissionError('Already liked')
reply.likes.add(user.phone_number)
reply.save()
'''
This function is used to support the report function.
Two parameters, reply and user are taken in, which are entries to Reply and User objects respectively.
If this particular user has already reported this reply, a PermissionError would be raised.
If the total number of reports of the particular reply reaches 5 or above, including the just created one,
the function would return a tuple of current time and the entry to the reply in order to notice the administrator.
A false would be returned otherwise.
'''
def report_reply(reply, user):
if reply.reports.filter(pk = user.pk).count() > 0:
raise PermissionError('Already reported')
reply.reports.add(user)
reply.save()
if reply.reports.all().count() >= 5:
return datetime.datetime.now(), reply
return False
'''
This function is used to delete a particular reply and all the replies made towards it.
Two parameters, reply and user are taken in, which are entries to Reply and User objects respectively.
Notice that the user could also be integer zero, which indicates the deletion is made by the system or administrators.
If the user is neither zero nor the entry of the user who posted the reply, a PermissionError would be returned.
The function works in a recursive manner.
'''
def delete_reply(reply, user):
if user != 0 and user.phone_number != reply.user: # user == 0 indicating it is system operation
raise PermissionError('Action defied')
q_set = list(Reply.objects.filter(dependency = reply))
for entry in q_set:
delete_reply(entry, 0)
reply.delete()
'''
This function is used to delete a particular post.
Two parameters, post and user are taken in, which are entries to Post and User objects respectively.
Notice that the user could also be integer zero, which indicates the deletion is made by the system or administrators.
If the user is neither zero nor the entry of the user who posted the reply, a PermissionError would be returned.
The function would also delete all the replies to the post if it works properly.
'''
def delete_post(post, user):
if user != 0 and user.phone_number != post.user:
raise PermissionError('Action defied')
q_set = list(Reply.objects.filter(thread = post))
for reply in q_set:
delete_reply(reply, 0)
post.delete()
|
from collections import Counter
X = int(input())
shoe_sizes = Counter(list(map(int, input().split())))
N = int(input())
collected_money = 0
for customer in range(N):
desired_shoe, price = tuple(map(int, input().split()))
if desired_shoe in shoe_sizes.keys() and shoe_sizes[desired_shoe] > 0:
collected_money += price
shoe_sizes[desired_shoe] -= 1
print(collected_money)
|
# -*- coding: utf-8 -*-
import urllib
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
IMG_TAG = """\
<img alt="{alt}" src="http://maps.googleapis.com/maps/api/staticmap?{query}">\
"""
IFRAME_TAG = """\
<iframe
width="{width}"
height="{height}"
frameborder="0" style="border:0"
src="https://www.google.com/maps/embed/v1/{mode}?{query}">
</iframe>\
"""
class googlemaps(nodes.General, nodes.Element):
pass
class GoogleMapsDirective(Directive):
"""Directive for embedding google-maps"""
has_content = False
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
"width": directives.unchanged,
"height": directives.unchanged,
"key": directives.unchanged,
"scale": directives.unchanged,
"format": directives.unchanged,
"markers": directives.unchanged,
"path": directives.unchanged,
"visible": directives.unchanged,
"style": directives.unchanged,
"sensor": directives.unchanged,
"q": directives.unchanged,
"mode": directives.unchanged,
"origin": directives.unchanged,
"destination": directives.unchanged,
"avoid": directives.unchanged,
"center": directives.unchanged,
"zoom": directives.unchanged,
"maptype": directives.unchanged,
"language": directives.unchanged,
"region": directives.unchanged,
}
def run(self):
node = googlemaps()
if self.arguments:
if self.options.get("mode"):
key = "q"
else:
key = "alt"
node[key] = " ".join(self.arguments)
for key, value in self.options.items():
node[key] = value
return [node]
def make_visit_googlemaps_node(app):
def visit_googlemaps_node(self, node):
options = dict()
for key, value in node.attlist():
options[key] = node[key]
width = options.pop("width", 600)
height = options.pop("height", 450)
mode = options.pop("mode", None)
api_key = options.pop("key", app.config.google_api_key)
if api_key not in (None, ""):
options["key"] = api_key
if mode is None:
alt = options.pop("alt", "")
options["size"] = "{0}x{1}".format(width, height)
options["sensor"] = "false"
self.body.append(IMG_TAG.format(alt=alt,
query=urllib.urlencode(options)))
else:
self.body.append(IFRAME_TAG.format(width=width, height=height, mode=mode,
query=urllib.urlencode(options)))
return visit_googlemaps_node
def depart_googlemaps_node(self, node):
pass
def setup(app):
app.add_config_value("google_api_key", None, "env")
app.add_node(googlemaps,
html=(make_visit_googlemaps_node(app), depart_googlemaps_node))
app.add_directive("google-maps", GoogleMapsDirective)
|
# Generated by Django 3.0.8 on 2020-11-28 11:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('LandingPage', '0006_auto_20201120_1645'),
]
operations = [
migrations.RenameField(
model_name='queries',
old_name='reply',
new_name='replay',
),
]
|
import os
import sys
main_dir = os.path.split(os.getcwd())[0]
result_dir = main_dir + '/results'
sys.path.append(main_dir)
from data import fmri_data_cv as fmril
from data import fmri_data_cv_rh as fmrir
from data import meg_data_cv as meg
from model import procedure_function as fucs
from sklearn.externals import joblib
import scipy.stats as stats
import numpy as np
import time
import ot
print(time.strftime('%Y-%m-%d %A %X %Z', time.localtime(time.time())))
def main():
main_dir = os.path.split(os.getcwd())[0]
result_dir = main_dir + '/results'
# args = sys.argv[1:]
# experiment = args[0]
# nor_method = args[1]
# clf_method = args[2]
# source_id = int(args[3])
# target_id = int(args[4])
# op_function = args[5] # 'lpl1' 'l1l2'
metric_xst = 'null'
experiment = 'fmril'
nor_method = 'no'
clf_method = 'logis'
source_id = 1
target_id = 2
op_function = 'l1l2' # 'lpl1' 'l1l2'
if experiment == 'fmril':
source = fmril
elif experiment == 'fmrir':
source = fmrir
else:
source = meg
result_dir = result_dir + '/{}'.format(experiment)
y_target = source.y_target
subjects = np.array(source.subjects)
x_data = source.x_data_pow if experiment == 'meg' else source.x_data
x_indi = source.x_indi_pow if experiment == 'meg' else source.x_indi
x = x_indi.copy() if nor_method == 'indi' else x_data.copy()
indices_sub = fucs.split_subjects(subjects)
nb_subs = len(indices_sub)
i = source_id
j = target_id
xs = x[indices_sub[i]]
ys = y_target[indices_sub[i]]
xt = x[indices_sub[j]]
# np.savez(result_dir +'/1s_2t_{}_{}_{}.npz'.format(experiment,nor_method,clf_method),xs=xs,xt=xt)
yt = y_target[indices_sub[j]]
note = '{}s_{}t_{}_{}_{}_sinkhorn_{}_{}'.format(i, j, experiment,
nor_method, clf_method, op_function,metric_xst)
print(note)
reg = 0.1
eta = 0.1
print('reg,eta',reg,eta)
xsts = []
gammas = []
for reg in [1e-2, 1e-1, 1, 10]:
for eta in [1e-3,1e-2, 1e-1, 1,10]:
transport = ot.da.SinkhornL1l2Transport
trans_fuc = transport(reg_e=reg, reg_cl=eta, norm='max')
trans_fuc.fit(Xs=xs, Xt=xt, ys=ys)
xst = trans_fuc.transform(Xs=xs)
xsts.append(xst)
np.save(result_dir +'/different_xst.npy',xsts)
# print('xst',xst,np.max(xst), np.min(xst),np.mean(xst))
# print('xs',xs,np.max(xs), np.min(xs),np.mean(xs))
# print('xt',xt,np.max(xt), np.min(xt),np.mean(xt))
# xst_accs = fucs.h_divergence(xst, xt, clf_method=clf_method)
# print(xst_accs)
# print(np.mean(xst_accs))
# y_xs = np.ones(xs.shape[0], dtype=int)
# y_xt = np.zeros(xt.shape[0], dtype=int)
# x = np.vstack((xs, xt))
# print(x.shape)
# y = np.concatenate((y_xs, y_xt))
# print(y.shape)
# # xst has the same labels as xt
# # if acc is low, it means that xst isn't similar to xt.
# # the higher the acc is, the more similar the xst is to xt.
# # xst with the highest acc should be chosen
# xst_accs = fucs.get_accuracy(x, y, xst, y_xt, clf_method=clf_method)
# print('************************************************************************')
# xs_accs1 = fucs.get_accuracy(x, y, xs, y_xs, clf_method=clf_method)
# print('************************************************************************')
# xs_accs2 = fucs.get_accuracy(x, y, xs, y_xt, clf_method=clf_method)
# print('************************************************************************')
# xt_accs = fucs.get_accuracy(x, y, xt, y_xt, clf_method=clf_method)
# print('************************************************************************')
# print('xst_accs',np.mean(xst_accs),xst_accs)
# print('xs_accs1', np.mean(xs_accs1),xs_accs1)
# print('xs_accs2', np.mean(xs_accs2), xs_accs2)
# print('xt_accs', np.mean(xt_accs),xt_accs)
#
#
# x_base = x_indi.copy()
# xs_base = x_base[indices_sub[i]]
# ys_base = y_target[indices_sub[i]]
# xt_base = x_base[indices_sub[j]]
# yt_base = y_target[indices_sub[j]]
# base_accs = fucs.get_accuracy(xs_base, ys_base, xt_base, yt_base, clf_method=clf_method)
# print('base_accs',np.mean(base_accs), base_accs)
# xst_accs = fucs.get_accuracy(xst, ys, xt, yt, clf_method=clf_method)
# print('xst_accs', np.mean(xst_accs), xst_accs)
# print('reg,eta', reg, eta)
if __name__ == '__main__':
main()
print(time.strftime('%Y-%m-%d %A %X %Z', time.localtime(time.time())))
|
import cv2 as cv
import sys
# 적응형 이진화 Adaptive thresholding : 이미지를 작은 영역으로 나누어 각 영역별로 다른 임계값을 적용
img_color = cv.imread("../sample/copy_paper.png", cv.IMREAD_COLOR)
if img_color is None:
print("이미지 파일을 읽어올 수 없습니다.")
sys.exit(1)
img_gray = cv.cvtColor(img_color, cv.COLOR_BGR2GRAY)
img_binary = cv.adaptiveThreshold(img_gray, 255, cv.ADAPTIVE_THRESH_MEAN_C,
cv.THRESH_BINARY, 5,4)
img_result = cv.vconcat([img_gray, img_binary])
cv.imshow('result', img_result)
cv.waitKey(0)
|
import logging
import argparse
from dynaconf import settings
from app.commands import available_commands
def main():
commands = available_commands()
parser = argparse.ArgumentParser(description=settings('application_description'))
parser.add_argument('--rds',
help='Desired action',
nargs='+',
choices=commands['rds'].keys(),
required=False)
parser.add_argument('--ec2',
help='Desired action',
nargs='+',
choices=commands['ec2'].keys(),
required=False)
args = parser.parse_args()
if args.rds:
command = commands['rds'][args.rds[0]]
command()
elif args.ec2:
command = commands['ec2'][args.ec2[0]]
command()
else:
logging.error("args not found")
if __name__ == "__main__":
main()
|
# Find the correlation between the annual salary and the length of the service period of a Lyft driver.
import pandas as pd
# Start writing code
lyft_drivers.end_date.fillna(pd.Timestamp.now(), inplace=True)
lyft_drivers['service'] = (lyft_drivers.end_date - lyft_drivers.start_date)\
.dt.days
lyft_drivers[['service', 'yearly_salary']].corr().values[0,1]
|
import socket
import numpy as np
import os
import sys
import threading
import time
import signal
import termios
import queue
import weakref
from datetime import datetime
import cv2
import uuid
from controllers import Controller
import traceback
# Socket server configuration
SERVER_IP = "0.0.0.0"
SERVER_PORT = 953
MAX_NUM_CONNECTIONS = 20
# image
IMAGE_HEIGHT = 480
IMAGE_WIDTH = 640
COLOR_PIXEL = 3 # RGB
IMAGE_SIZE = IMAGE_WIDTH * IMAGE_HEIGHT * COLOR_PIXEL
class StreamEngine(threading.Thread):
"""Stream Engine."""
def __init__(self):
threading.Thread.__init__(self)
self._controllers = []
self._command_queue = queue.Queue(10)
self.session_id = str(uuid.uuid4())
self.recording = False
self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.record_dir = "../record_data/"
self._running = True
def __del__(self):
# Unplug any registered controllers
for c in self._controllers:
try:
c._set_engine(None)
except ReferenceError:
pass
# ========================================================================================
# Interface used by associated controllers
def register_controller(self, controller):
# Use weakref proxies between engine and controllers to avoid circular reference leading to undead objects
p = weakref.proxy(controller)
if p not in self._controllers:
self._controllers.append(p)
controller._set_engine(self)
def deregister_controller(self, controller):
p = weakref.proxy(controller)
if p in self._controllers:
self._controllers.remove(p)
def post_command(self, command, args=None):
# If queue full discard oldest command
if self._command_queue.full():
self._command_queue.get(block=False)
self._command_queue.put({'cmd': command, 'args': args})
def generateRecord(self):
print("start generateRecord")
now = datetime.now() # current date and time
date_time = now.strftime("%d_%b_%H_%M_%S")
print("date_time", datetime)
# Define the codec and create VideoWriter object
record_name_ = self.record_dir + self.session_id + "_video_" + date_time + ".avi"
print("video name {}".format(record_name_))
self.out = cv2.VideoWriter(record_name_, self.fourcc, 20.0, (640, 480)) # 20 frame/per second
print("generated video")
# ========================================================================================
# In each of the following notify methods iterate over copy of <_controllers> so in case
# of encountering expired reference can remove it without invalidating iterator.
def _notify_controllers_of_start(self):
for c in self._controllers[:]:
try:
c.notify_start_controller_threads()
except ReferenceError:
# Shouldn't happen as controllers deregister themselves upon destruction
self._controllers.remove(c)
def _notify_controllers_of_update_framedata(self, image):
for c in self._controllers[:]:
try:
c.notify_frame_data(image)
except ReferenceError:
# Shouldn't happen as controllers deregister themselves upon destruction
self._controllers.remove(c)
def _notify_controllers_of_shutdown(self):
for c in self._controllers[:]:
try:
c.notify_shutdown()
except ReferenceError:
# Shouldn't happen as controllers deregister themselves upon destruction
self._controllers.remove(c)
# ==================================================================================================================
# State machine state functions
def state_func__idle(self):
""" Ready for streaming images, but not running capture
Checking input to start from either terminal(keyboard) or gui
"""
print("stream engine state -> idle")
while True:
if not self._running:
return None
while not self._command_queue.empty():
cmd_obj = self._command_queue.get(block=False)
cmd = cmd_obj['cmd']
if cmd == Controller.CMD_START_STREAM:
print("get cmd: start_stream")
return self.state_func__run
elif cmd == Controller.CMD_SHUTDOWN:
print("get cmd: shutdown")
# todo: ensure the video is complete
self._running = False
self._notify_controllers_of_shutdown()
time.sleep(0.2)
def state_func__run(self):
print("stream engine state -> run")
socket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_server.bind((SERVER_IP, SERVER_PORT))
socket_server.listen(MAX_NUM_CONNECTIONS)
# currently, just focus on one client with one tcp connection
(conn, (ip, port)) = socket_server.accept()
self.rfile = conn.makefile('rb')
# start the threads of controller here
# you can actually change the start places as you like
self._notify_controllers_of_start()
image_num = 0
# while True:
# time.sleep(0.05)
stream_bytes = b''
try:
while self._running:
stream_bytes += self.rfile.read(1024)
if len(stream_bytes) < IMAGE_SIZE:
continue # keep receiving until receive a whole image
image = np.frombuffer(stream_bytes[:IMAGE_SIZE], dtype="B")
stream_bytes = stream_bytes[IMAGE_SIZE:] # todo: the rest should be moved to another engine
image_num += 1
print(image_num, image.shape, len(stream_bytes))
frame = np.frombuffer(
image, dtype=np.uint8).reshape(IMAGE_HEIGHT, IMAGE_WIDTH, 3)
self._notify_controllers_of_update_framedata(frame)
# write the frame to video
if self.recording:
self.out.write(frame) # todo: should be moved to another thread
print("recording")
while not self._command_queue.empty():
cmd_obj = self._command_queue.get(block=False)
cmd = cmd_obj['cmd']
if cmd == Controller.CMD_START_STREAM:
print("get cmd: start_stream")
print("Stream engine already started")
elif cmd == Controller.CMD_STOP_STREAM:
print("get cmd: stop_stream")
socket_server.close()
return self.state_func__idle
elif cmd == Controller.CMD_START_RECORD:
print("get cmd: start_record")
self.generateRecord()
self.recording = True
elif cmd == Controller.CMD_STOP_RECORD:
print("get cmd: stop_record")
self.recording = False
elif cmd == Controller.CMD_SHUTDOWN:
print("get cmd: shutdown")
# todo: ensure the video is complete
self._running = False
self._notify_controllers_of_shutdown()
except:
print("Unexpected error:", sys.exc_info()[0])
self.exit_code = 1
return None
def run(self):
try:
# Run state machine.
state_func = self.state_func__idle()
self.exit_code = 0
while state_func is not None:
state_func = state_func()
except Exception as e:
print("ImageCaptureEngine.run() - unexpected exception \n %s \n %s" % (str(e), traceback.format_exc()))
self.exit_code = 1
except:
print("ImageCaptureEngine.run() - unexpected exception \n %s" % traceback.format_exc())
self.exit_code = 1
finally:
self._notify_controllers_of_shutdown()
# os.kill(os.getpid(), signal.SIGINT)
print("shutting down")
|
from __future__ import annotations
from .mypy_helpers import MypyAssert
def test_model_init(assert_mypy_output: MypyAssert) -> None:
assert_mypy_output(
"""
from pynamodb.attributes import NumberAttribute
from pynamodb.models import Model
class MyModel(Model):
my_hash_key = NumberAttribute(hash_key=True)
my_range_key = NumberAttribute(range_key=True)
my_attr = NumberAttribute()
class MyDerivedModel(MyModel):
my_derived_attr = NumberAttribute()
MyModel(my_attr=5.5)
MyModel(5.5, my_attr=5.5)
MyModel(5.5, 5.5, my_attr=5.5)
MyModel(hash_key=5.5, range_key=5.5, my_attr=5.5)
MyModel(hash_key='hello', range_key='world', my_attr=5.5) # E: Argument "hash_key" to "MyModel" has incompatible type "str"; expected "float" [arg-type]
# E: Argument "range_key" to "MyModel" has incompatible type "str"; expected "float" [arg-type]
MyModel(foobar=5.5) # E: Unexpected keyword argument "foobar" for "MyModel" [call-arg]
# test with derived model
MyDerivedModel(my_attr=5.5, my_derived_attr=42)
MyDerivedModel(foobar=5.5) # E: Unexpected keyword argument "foobar" for "MyDerivedModel" [call-arg]
"""
)
def test_model_init__no_attributes(assert_mypy_output: MypyAssert) -> None:
assert_mypy_output(
"""
from pynamodb.attributes import NumberAttribute
from pynamodb.models import Model
class MyModel(Model):
pass
MyModel('foo', 'bar') # E: Argument 1 to "MyModel" has incompatible type "str"; expected "None" [arg-type]
# E: Argument 2 to "MyModel" has incompatible type "str"; expected "None" [arg-type]
MyModel(hash_key='foo', range_key='bar', spam='ham') # E: Unexpected keyword argument "spam" for "MyModel" [call-arg]
# E: Argument "hash_key" to "MyModel" has incompatible type "str"; expected "None" [arg-type]
# E: Argument "range_key" to "MyModel" has incompatible type "str"; expected "None" [arg-type]
"""
)
def test_model_init__custom_empty(assert_mypy_output: MypyAssert) -> None:
assert_mypy_output(
"""
from pynamodb.attributes import NumberAttribute
from pynamodb.models import Model
class MyModel(Model):
my_hash_key = NumberAttribute(hash_key=True)
my_range_key = NumberAttribute(range_key=True)
my_attr = NumberAttribute()
def __init__(self) -> None:
...
MyModel('foo', 'bar') # E: Unexpected signature 'def () -> __main__.MyModel' for a PynamoDB model initializer: expecting 'hash_key', 'range_key' and a keywords argument [misc]
# E: Too many arguments for "MyModel" [call-arg]
"""
)
def test_model_init__custom_all_args(assert_mypy_output: MypyAssert) -> None:
assert_mypy_output(
"""
from typing import Any
from pynamodb.attributes import NumberAttribute
from pynamodb.models import Model
class MyModel(Model):
my_hash_key = NumberAttribute(hash_key=True)
my_range_key = NumberAttribute(range_key=True)
my_attr = NumberAttribute()
def __init__(self, *args: Any, **kwargs: Any) -> None:
...
MyModel(unknown=42) # E: Unexpected signature 'def (*args: Any, **kwargs: Any) -> __main__.MyModel' for a PynamoDB model initializer: expecting 'hash_key', 'range_key' and a keywords argument [misc]
"""
)
def test_number_attribute(assert_mypy_output: MypyAssert) -> None:
assert_mypy_output(
"""
from typing import Optional
from typing_extensions import assert_type
from pynamodb.attributes import NumberAttribute
from pynamodb.models import Model
class MyModel(Model):
my_attr = NumberAttribute()
my_nullable_attr = NumberAttribute(null=True)
my_not_nullable_attr = NumberAttribute(null=False)
assert_type(MyModel.my_attr, NumberAttribute)
assert_type(MyModel().my_attr, float)
assert_type(MyModel().my_nullable_attr, Optional[float])
assert_type(MyModel().my_not_nullable_attr, float)
MyModel(my_attr=5.5)
MyModel(my_attr='5.5') # E: Argument "my_attr" to "MyModel" has incompatible type "str"; expected "float" [arg-type]
MyModel(my_attr=None) # E: Argument "my_attr" to "MyModel" has incompatible type "None"; expected "float" [arg-type]
MyModel(my_nullable_attr=5.5)
MyModel(my_nullable_attr='5.5') # E: Argument "my_nullable_attr" to "MyModel" has incompatible type "str"; expected "Optional[float]" [arg-type]
MyModel(my_nullable_attr=None)
"""
)
def test_unicode_attribute(assert_mypy_output: MypyAssert) -> None:
assert_mypy_output(
"""
from typing import Optional
from typing_extensions import assert_type
from pynamodb.attributes import UnicodeAttribute
from pynamodb.models import Model
class MyModel(Model):
my_attr = UnicodeAttribute()
my_nullable_attr = UnicodeAttribute(null=True)
assert_type(MyModel.my_attr, UnicodeAttribute)
assert_type(MyModel.my_nullable_attr, UnicodeAttribute)
assert_type(MyModel().my_attr, str)
assert_type(MyModel().my_nullable_attr, Optional[str])
MyModel().my_attr.lower()
MyModel().my_nullable_attr.lower() # E: Item "None" of "Optional[str]" has no attribute "lower" [union-attr]
"""
)
def test_custom_attribute(assert_mypy_output: MypyAssert) -> None:
assert_mypy_output(
"""
from typing import Optional
from typing_extensions import assert_type
import pynamodb.expressions.condition
from pynamodb.attributes import Attribute
from pynamodb.models import Model
class BinaryAttribute(Attribute[bytes]):
def do_something(self) -> None: ...
class MyModel(Model):
my_attr = BinaryAttribute()
my_nullable_attr = BinaryAttribute(null=True)
assert_type(MyModel.my_attr, BinaryAttribute)
assert_type(MyModel.my_attr.exists(), pynamodb.expressions.condition.Exists)
assert_type(MyModel.my_attr.do_something(), None)
assert_type(MyModel().my_attr, bytes)
assert_type(MyModel.my_nullable_attr, BinaryAttribute)
assert_type(MyModel.my_nullable_attr.exists(), pynamodb.expressions.condition.Exists)
assert_type(MyModel.my_nullable_attr.do_something(), None)
assert_type(MyModel().my_nullable_attr, Optional[bytes])
"""
)
def test_map_attribute(assert_mypy_output: MypyAssert) -> None:
assert_mypy_output(
"""
from typing import Optional
from typing_extensions import assert_type
from pynamodb.attributes import MapAttribute, UnicodeAttribute
from pynamodb.models import Model
class MyMapAttribute(MapAttribute):
my_sub_attr = UnicodeAttribute()
class MyModel(Model):
my_attr = MyMapAttribute()
my_nullable_attr = MyMapAttribute(null=True)
assert_type(MyModel.my_attr, MyMapAttribute)
assert_type(MyModel.my_nullable_attr, MyMapAttribute)
assert_type(MyModel().my_attr, MyMapAttribute)
assert_type(MyModel().my_nullable_attr, Optional[MyMapAttribute])
"""
)
def test_unexpected_value_of_null(assert_mypy_output: MypyAssert) -> None:
assert_mypy_output(
"""
from typing import Optional
from typing_extensions import assert_type
from pynamodb.attributes import NumberAttribute
from pynamodb.models import Model
class MyModel(Model):
my_attr = NumberAttribute(null=bool(5)) # E: 'null' argument is not constant False or True [misc]
assert_type(MyModel().my_attr, float)
"""
)
def test_attribute_assigned_out_of_class_scope(assert_mypy_output: MypyAssert) -> None:
assert_mypy_output(
"""
from pynamodb.models import Model
from pynamodb.attributes import NumberAttribute
num = NumberAttribute()
"""
)
def test_attribute_not_assigned_to_class_var(assert_mypy_output: MypyAssert) -> None:
assert_mypy_output(
"""
from pynamodb.models import Model
from pynamodb.attributes import NumberAttribute
class MyModel(Model):
NumberAttribute() # E: PynamoDB attribute not assigned to a class variable [misc]
"""
)
def test_attribute_hook_fallback(assert_mypy_output: MypyAssert) -> None:
assert_mypy_output(
"""
class C:
def __init__(self) -> None:
self.d = 42
_ = C().d
"""
)
def test_function_hook_fallback(assert_mypy_output: MypyAssert) -> None:
assert_mypy_output(
"""
def foo():
pass
foo()
"""
)
|
#!/usr/bin/env python3
import json
import requests
import random
import sys
from io import BytesIO
from optparse import OptionParser
from pathlib import Path
from PIL import Image, ImageChops
def cutImage(im, width, height, resample = Image.BILINEAR):
ratio = width / height
if (im.width < width):
im = im.resize((width, int(im.height * width / im.width) + 1), resample = resample)
if (im.height < height):
im = im.resize((int(im.width * width / im.height) + 1, height), resample = resample)
if ratio < im.width / im.height:
return im.crop((
(im.width - im.height * ratio) / 2,
0,
(im.width + im.height * ratio) / 2,
im.height
))
else:
return im.crop((
0,
(im.height - im.width / ratio) / 3,
im.width,
(im.height + 2 * im.width / ratio) / 3
))
def fetchImagesData(char, tags, count = 100, site = "safebooru"):
if tags != "":
tags = '+' + tags
if site == "safebooru":
url = "https://safebooru.org/index.php?page=dapi&s=post&q=index&tags=" + char + tags + "&limit=" + str(count) + "&json=1"
elif site == "gelbooru":
url = "https://gelbooru.com/index.php?page=dapi&s=post&q=index&tags=" + char + tags + "&limit=" + str(count) + "&json=1"
elif site == "danbooru":
raise NotImplementedError("As of now, Danbooru support is not impemented yet.")
else:
raise NameError("Site " + site + " not supported.")
r = requests.get(url).text
if not r:
return []
jsonData = json.loads(r)
imageData = []
for img in jsonData:
imgDict = {}
for attribute in ("width", "height"):
imgDict[attribute] = img[attribute]
imgDict["ratio"] = imgDict["width"] / imgDict["height"]
if site == "safebooru" or "gelbooru":
imgDict["url"] = "https://" + site + ".org/images/" + img["directory"] + "/" + img["image"] + "?" + str(img["id"]);
imageData.append(imgDict)
return imageData
def fetchImage(url):
if type(url) is str:
r = requests.get(url)
elif type(url) is dict:
r = requests.get(url["url"])
else:
raise TypeError
img = Image.open(BytesIO(r.content))
return img
# each hole is a tuple in the format (left bound, upper bound, width, height)
def fillTemplate(holes, images, template, resample = Image.BILINEAR):
if (len(holes) != len(images)):
raise Exception("Arrays of holes and images must be the same length.")
# sort both lists by w/h ratio (ascending)
holes.sort(key = lambda x: x[2] / x[3])
images.sort(key = lambda x: x.width / x.height)
# swap two images that don't overlap correctly
holes[2], holes[9] = holes[9], holes[2]
images[2], images[9] = images[9], images[2]
for i in range(len(holes)):
x, y, w, h = holes[i]
cutout = cutImage(images[i], w, h).resize((w, h), resample = resample)
bottomLayer = Image.new("RGBA", (template.width, template.height), (0, 0, 0, 0))
bottomLayer.paste(cutout, (x, y, x+w, y+h))
template = Image.alpha_composite(bottomLayer, template)
return template
holes = [
[12, 681, 163, 167],
[210, 433, 346, 246],
[212, 26, 319, 172],
[379, 282, 86, 105],
[455, 226, 62, 68],
[483, 408, 217, 136],
[555, 547, 462, 441],
[895, 229, 230, 138], # swapping; index after sorting: 9
[1068, 262, 284, 336], # swapping; index after sorting: 2
[1099, 10, 290, 188],
[1151, 685, 157, 232],
]
def doItForHer(char, tags = "solo", imgDir = "."):
imgData = fetchImagesData(char, tags = tags, count = 100)
imgs = []
while len(imgs) < len(holes):
temp = fetchImage(random.choice(imgData))
imgs.append(temp)
tmpl = Image.open("template.png")
result = fillTemplate(holes, imgs, tmpl)
result.show()
for filenum in range(1000):
path = imgDir + "/" + char + "_{:03d}".format(filenum) + ".png"
if not Path(path).is_file():
result.save(path)
break
def main(argv):
parser = OptionParser(usage = "usage: %prog [options] -c character")
parser.add_option("-c", "--character",
action = "store", type = "string",
help = "character to use in the collage, see README for naming conventions")
parser.add_option("-d", "--directory",
action = "store", type = "string", default = ".",
help = "directory for image output (working directory by default)")
parser.add_option("-t", "--tags",
action = "store", type = "string", default = "solo",
help = "tags separated by '+' (by default solo), see readme")
(options, argv) = parser.parse_args()
if (options.character == None):
parser.error("provide a character name, see -h for help")
doItForHer(options.character, tags = options.tags, imgDir = options.directory)
if __name__ == "__main__":
main(sys.argv)
|
List = [ 12, 23, "Hello", 60.6, "Chennai" ]
List1 = [ 21, 32, 60 ]
a = List [ 1:3 ]
b = List * 2
c = List + List1
print List
print "\n", a
print "\n", b
print "\n", c
|
class Queue(object):
def __init__(self, alist):
self.alist = alist
def enqueue(self, param):
self.alist.append(param)
return param
def dequeue(self):
aparam = self.alist[0]
self.alist = self.alist[1:]
return aparam
alist = [5, 4, 8, 7]
queue = Queue(alist)
print(queue.enqueue(10))
print(queue.alist)
print(queue.dequeue())
print(queue.alist)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import os
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import relpath
from os.path import splitext
from setuptools import Extension
from setuptools import find_packages
from setuptools import setup
from setuptools.command.build_ext import build_ext
try:
# Allow installing package without any Cython available. This
# assumes you are going to include the .c files in your sdist.
import Cython
except ImportError:
Cython = None
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")
).read()
class CustomBuildExtCommand(build_ext):
"""build_ext command for use when numpy headers are needed."""
def run(self):
# Import numpy here, only when headers are needed
import numpy
# Add numpy headers to include_dirs
self.include_dirs.append(numpy.get_include())
# Call original build_ext command
build_ext.run(self)
# Enable code coverage for C code: we can't use CFLAGS=-coverage in toxa.ini, since that may mess with compiling
# dependencies (e.g. numpy). Therefore we set SETUPPY_CFLAGS=-coverage in toxa.ini and copy it to CFLAGS here (after
# deps have been safely installed).
if "TOXENV" in os.environ and "SETUPPY_CFLAGS" in os.environ:
os.environ["CFLAGS"] = os.environ["SETUPPY_CFLAGS"]
setup(
name="pysight",
version="0.12.2",
license="Free for non-commercial use",
description="Create images and volumes from photon lists generated by a multiscaler",
long_description=(
"PySight is an application aimed at generating multidimensional images"
"from photon lists. The main use case is to parse ``.lst`` files which"
"were generated by FAST ComTec's multiscaler, but other photon lists"
"can also be parsed.\n\nPySight was featured in"
"`this <https://www.osapublishing.org/optica/abstract.cfm?uri=optica-5-9-1104>`_"
" *Optica* article, and was created in Pablo Blinder's Lab at Tel Aviv University."
),
author="Hagai Har-Gil",
author_email="hagaihargil@protonmail.com",
url=r"https://github.com/PBLab/python-pysight/",
packages=find_packages("src"),
package_dir={"": "src"},
py_modules=[splitext(basename(path))[0] for path in glob("src/*.py")],
include_package_data=True,
zip_safe=False,
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: Free for non-commercial use",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
],
keywords=["multiscaler", "photon counting", "imaging"],
cmdclass={"build_ext": CustomBuildExtCommand},
install_requires=[
"numpy >= 1.18",
"matplotlib >= 3.2",
"pandas == 1.1.2",
"attrs == 19.3",
"cython >= 0.29",
"scipy >= 1.4",
"scikit-learn >= 0.20",
"zarr >= 2.3",
"tqdm >= 4.29",
"numba >= 0.49",
"ansimarkup >= 1.4",
"psutil >= 5.6.6",
"toml >= 0.9",
"appdirs >= 1.4",
],
extras_require={
"dev": [
"pytest",
"sphinx",
"bumpversion",
"twine",
"black",
"mypy",
"flake8",
"typed-ast",
"pylint",
]
},
setup_requires=["cython", "numpy"] if Cython else ["numpy"],
ext_modules=[
Extension(
splitext(relpath(path, "src").replace(os.sep, "."))[0],
sources=[path],
include_dirs=[dirname(path)],
)
for root, _, _ in os.walk("src")
for path in glob(join(root, "*.pyx" if Cython else "*.c"))
],
data_files=[("src/pysight/configs/default.toml")],
)
|
import os
from Services.RetinaFaceLocatorService import RetinaFacesLocatorService
from Services.Img2PoseLocatorService import Img2PoseLocatorService
from Services.SaveFacesJson import SaveFacesJson
from Services.SaveFacesJpg import SaveFacesJpg
from Utils.Heuristics.FaceHeuristic import FaceHeuristic
from Utils.Heuristics.HeuristicCreator import HeuristicCreator
from Utils.fileUtils import isImage, createFolder
class ReaderFilesController:
def _createNameFolder(self, database, model, heuristic):
return "%s_%s_%s" %(database, model, heuristic)
def __init__(self, heuristic: str = 'none', model: str = 'retinaface'):
if model == 'retinaface':
self.faceLocatorService: RetinaFacesLocatorService = RetinaFacesLocatorService()
elif model == 'img2pose':
self.faceLocatorService: Img2PoseLocatorService = Img2PoseLocatorService()
else:
msg = "The model %s does not exist" %model
raise Exception(msg)
self.heuristicCreator: HeuristicCreator = HeuristicCreator()
self.databaseJson = os.path.join("data", "TGC2020v0.3_json")
self.databaseFaces = os.path.join("data", "TGC2020v0.3_face")
self.heuristic = heuristic
self.gallery = self._createNameFolder(self.databaseJson, model, heuristic)
self.facesFolder = self._createNameFolder(self.databaseFaces, model, heuristic)
createFolder(self.gallery)
createFolder(self.gallery)
self.saveServiceJson: SaveFacesJson = SaveFacesJson()
self.saveServiceJpg: SaveFacesJpg = SaveFacesJpg(self.facesFolder)
def run(self, folder: str) -> None:
faceHeuristic: FaceHeuristic = self.heuristicCreator.getHeuristic(self.heuristic)
db = os.path.join(self.gallery, os.path.basename(folder))
os.mkdir(db)
print(folder)
for dirpath, dirnames, filenames in os.walk(folder):
for filename in filenames:
if faceHeuristic.type == "none":
if isImage(filename):
facesCollection = faceHeuristic.filterFaces(
self.faceLocatorService.locate(os.path.join(folder,filename))
)
self.saveServiceJson.saveFaces(db, filename, facesCollection)
else:
facesCollection = faceHeuristic.filterFaces(
self.saveServiceJson.loadFaces(os.path.join(folder,filename))
)
self.saveServiceJson.saveFaces(db, filename, facesCollection) |
from gym_RTStrade.envs.rtsTrade_env import rtsTrade_env |
# Generated by Django 2.1.2 on 2019-02-16 17:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('experiments', '0002_auto_20190108_1450'),
]
operations = [
migrations.AddField(
model_name='experiment',
name='checksum',
field=models.CharField(default='asdasd', max_length=64),
preserve_default=False,
),
]
|
# -*- coding: utf-8 -*-
import os
import irc3
from irc3 import asyncio
from irc3.plugins.command import Commands
__doc__ = '''
=================================================
:mod:`irc3.plugins.shell_command` Shell commands
=================================================
Allow to quickly add commands map to a shell command.
The bot will print stdout/stderr
..
>>> import os
>>> import shutil
>>> from irc3.testing import IrcBot
>>> from irc3.testing import ini2config
>>> try:
... shutil.rmtree('/tmp/myscripts')
... except:
... pass
>>> script = '/tmp/myscripts/demo'
>>> os.makedirs('/tmp/myscripts')
>>> with open(script, 'w') as fd:
... _ = fd.write('echo $IRC3_COMMAND_ARGS')
>>> os.chmod(script, 0o0744)
Usage::
>>> config = ini2config("""
... [bot]
... includes =
... irc3.plugins.shell_command
... [irc3.plugins.shell_command]
... myscript = /tmp/myscript
... # optional command configuration
... myscript.permission = runmyscrypt
... myscript.public = false
... # register a directory
... myscripts = /tmp/myscripts
... # optional commands configuration for the directory
... myscripts.permission = runmyscrypts
... """)
>>> bot = IrcBot(**config)
Then the uname command will be available::
>>> bot.test(':gawel!user@host PRIVMSG irc3 :!help myscript')
PRIVMSG gawel :Run $ /tmp/myscript
PRIVMSG gawel :!myscript [<args>...]
>>> bot.test(':gawel!user@host PRIVMSG #chan :!myscript')
PRIVMSG gawel :You can only use the 'myscript' command in private.
>>> bot.test(':gawel!user@host PRIVMSG irc3 :!myscript')
If the user provides some arguments then those will be available as an
environment var (to avoid shell injection) names ``IRC3_COMMAND_ARGS``
...
>>> bot.get_plugin(Commands)['demo'][0]
{'permission': 'runmyscrypts'}
'''
@irc3.plugin
class Shell:
requires = [Commands.__module__]
def __init__(self, context):
self.log = context.log
self.context = context
self.config = self.context.config[__name__]
for k, v in self.config.items():
if (isinstance(v, str) and v.startswith('#')) or '.' in k:
continue
dirname = os.path.abspath(v)
if os.path.isdir(dirname):
self.log.debug('Scanning for scripts in %s', dirname)
for root, dirs, filenames in os.walk(dirname):
for filename in filenames:
binary = os.path.join(root, filename)
if os.access(binary, os.X_OK):
name = os.path.splitext(filename)[0]
self.register_command(name, binary, skey=k)
else:
self.register_command(k, v)
def register_command(self, k, v, skey=None):
async def meth(*args, **kwargs):
return await self.shell_command(v, *args, **kwargs)
meth.__name__ = k
meth.__doc__ = '''Run $ %s
%%%%%s [<args>...]
''' % (v, k)
p = {'permission': 'admin'}
for opt in ('permission', 'public'):
opt_key = '%s.%s' % (skey or k, opt)
if opt_key in self.config:
p[opt] = self.config[opt_key]
self.log.debug('Register command %s: $ %s', k, v)
commands = self.context.get_plugin(Commands)
commands[k] = (p, meth)
async def shell_command(self, command, mask, target, args, **kwargs):
env = os.environ.copy()
env['IRC3_COMMAND_ARGS'] = ' '.join(args['<args>'])
self.log.debug('Running command: $ %s' % command)
proc = await asyncio.create_subprocess_shell(
command, shell=True, env=env,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT)
await proc.wait()
lines = await proc.stdout.read()
if not isinstance(lines, str):
lines = lines.decode('utf8')
return lines.split('\n')
|
# -*- coding: utf-8 -*-
# 爬取斗鱼直播颜值区主播图片并保存在本地
# imagepipeline的使用
import scrapy
import json
from douyu_yz.items import DouyuYzItem
class DouyuspiderSpider(scrapy.Spider):
name = 'douyuSpider'
allowed_domains = ['douyucdn.cn']
offset = 0
url = 'http://capi.douyucdn.cn/api/v1/getVerticalRoom?limit=20&offset='
start_urls = ['http://capi.douyucdn.cn/api/v1/getVerticalRoom?limit=20&offset=' + str(offset)]
def parse(self, response):
# print(response.url)
data = json.loads(response.text)["data"]
for each in data:
item = DouyuYzItem()
item["name"] = each["nickname"]
item["imagesUrls"] = each["vertical_src"]
yield item
if self.offset <= 20*100: # 设置最大页数
self.offset += 20
# print(self.offset)
yield scrapy.Request(self.url + str(self.offset)) |
'''
Author: Guanghan Ning
E-mail: guanghan.ning@jd.com
April 23rd, 2019
LightTrack: A Generic Framework for Online Top-Down Human Pose Tracking
Demo on videos using YOLOv3 detector and Mobilenetv1-Deconv.
'''
import time
import argparse
# import vision essentials
import cv2
# import Network
from network_mobile_deconv import Network
# pose estimation utils
from HPE.dataset import Preprocessing
from HPE.config import cfg
from lib.tfflat.base import Tester
from lib.nms.gpu_nms import gpu_nms
# import my own utils
import sys, os, time
from visualizer import *
sys.path.append(os.path.abspath("./graph/"))
from utils_json import *
from utils_io_file import *
from utils_io_folder import *
flag_visualize = True
flag_nms = False #Default is False, unless you know what you are doing
os.environ["CUDA_VISIBLE_DEVICES"]="0"
def initialize_parameters():
global video_name, img_id
global nms_method, nms_thresh, min_scores, min_box_size
nms_method = 'nms'
nms_thresh = 1.
min_scores = 1e-10
min_box_size = 0.
global keyframe_interval, enlarge_scale, pose_matching_threshold
keyframe_interval = 10 # choice examples: [2, 3, 5, 8, 10]
enlarge_scale = 0.2
pose_matching_threshold = 0
global flag_flip
flag_flip = True
global total_time_POSE, total_time_DET, total_time_ALL, total_num_FRAMES, total_num_PERSONS
total_time_POSE = 0
total_time_DET = 0
total_time_ALL = 0
total_num_FRAMES = 0
total_num_PERSONS = 0
return
def light_track(pose_estimator,detector,
image_folder, output_json_path,
visualize_folder, detect_folder,output_video_path):
global total_time_POSE, total_time_DET, total_time_ALL, total_num_FRAMES, total_num_PERSONS
''' 1. statistics: get total time for lighttrack processing'''
st_time_total = time.time()
# process the frames sequentially
frame_prev = -1
bbox_dets_list_list = []
keypoints_list_list = []
flag_mandatory_keyframe = False
img_id = -1
img_paths = get_immediate_childfile_paths(image_folder)
num_imgs = len(img_paths)
total_num_FRAMES = num_imgs
if detector == 'Centernet':
from detector.CenterNet import CtdetDetector
from detector.config.opts import opts
opt = opts().init()
centernet = CtdetDetector(opt)
elif detector == 'yolo':
from detector.detector_yolov3 import inference_yolov3
while img_id < num_imgs-1:
img_id += 1
img_path = img_paths[img_id]
print("Current tracking: [image_id:{}]".format(img_id))
frame_cur = img_id
if (frame_cur == frame_prev):
frame_prev -= 1
''' KEYFRAME: loading results from other modules '''
# if is_keyframe(img_id, keyframe_interval) or flag_mandatory_keyframe:
bbox_dets_list = [] # keyframe: start from empty
keypoints_list = [] # keyframe: start from empty
# perform detection at keyframes
st_time_detection = time.time()
image = cv2.imread(img_path)
if detector == 'Centernet':
human_candidates = centernet.run(image)
elif detector == 'yolo':
human_candidates=inference_yolov3(img_path)
end_time_detection = time.time()
total_time_DET += (end_time_detection - st_time_detection)
num_dets = len(human_candidates)
print("Keyframe: {} detections".format(num_dets))
# if nothing detected at keyframe, regard next frame as keyframe because there is nothing to track
if num_dets <= 0:
# add empty result
bbox_det_dict = {"img_id":img_id,
"det_id": 0,
"imgpath": img_path,
"bbox": [0, 0, 2, 2]}
bbox_dets_list.append(bbox_det_dict)
keypoints_dict = {"img_id":img_id,
"det_id": 0,
"imgpath": img_path,
"keypoints": []}
keypoints_list.append(keypoints_dict)
bbox_dets_list_list.append(bbox_dets_list)
keypoints_list_list.append(keypoints_list)
continue
''' 2. statistics: get total number of detected persons '''
total_num_PERSONS += num_dets
for det_id in range(num_dets):
# obtain bbox position
bbox_gt = human_candidates[det_id]
# enlarge bbox by 20% with same center position
bbox_x1y1x2y2 = xywh_to_x1y1x2y2(bbox_gt)
bbox_in_xywh = enlarge_bbox(bbox_x1y1x2y2, enlarge_scale)
bbox_gt = x1y1x2y2_to_xywh(bbox_in_xywh)
# Keyframe: use provided bbox
bbox_det = bbox_gt
if bbox_det[2] <= 0 or bbox_det[3] <= 0 or bbox_det[2] > 2000 or bbox_det[3] > 2000:
bbox_det = [0, 0, 2, 2]
track_id = None # this id means null
continue
# update current frame bbox
bbox_det_dict = {"img_id":img_id,
"det_id":det_id,
"imgpath": img_path,
"bbox":bbox_det}
# obtain keypoints for each bbox position in the keyframe
st_time_pose = time.time()
keypoints = inference_keypoints(pose_estimator, bbox_det_dict)[0]["keypoints"]
end_time_pose = time.time()
total_time_POSE += (end_time_pose - st_time_pose)
bbox_det_dict = {"img_id":img_id,
"det_id":det_id,
"imgpath": img_path,
"bbox":bbox_det}
bbox_dets_list.append(bbox_det_dict)
# update current frame keypoints
keypoints_dict = {"img_id":img_id,
"det_id":det_id,
"imgpath": img_path,
"keypoints":keypoints}
keypoints_list.append(keypoints_dict)
# update frame
bbox_dets_list_list.append(bbox_dets_list)
keypoints_list_list.append(keypoints_list)
frame_prev = frame_cur
# ''' 1. statistics: get total time for lighttrack processing'''
end_time_total = time.time()
total_time_ALL += (end_time_total - st_time_total)
# convert results into openSVAI format
print("Exporting Results in openSVAI Standard Json Format...")
poses_standard = pose_to_standard_mot(keypoints_list_list, bbox_dets_list_list)
# output json file
pose_json_folder, _ = get_parent_folder_from_path(output_json_path)
create_folder(pose_json_folder)
write_json_to_file(poses_standard, output_json_path)
print("Json Export Finished!")
# visualization
if flag_visualize is True:
print("Visualizing Pose Tracking Results...")
create_folder(visualize_folder)
show_all_from_standard_json(output_json_path, classes, joint_pairs, joint_names, image_folder, visualize_folder, flag_track = True)
print("Visualization Finished!")
img_paths = get_immediate_childfile_paths(visualize_folder)
avg_fps = total_num_FRAMES / total_time_ALL
make_video_from_images(img_paths, output_video_path, fps=avg_fps, size=None, is_color=True, format="XVID")
def iou(boxA, boxB):
# box: (x1, y1, x2, y2)
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def get_bbox_from_keypoints(keypoints_python_data):
if keypoints_python_data == [] or keypoints_python_data == 45*[0]:
return [0, 0, 2, 2]
num_keypoints = len(keypoints_python_data)
x_list = []
y_list = []
for keypoint_id in range(int(num_keypoints / 3)):
x = keypoints_python_data[3 * keypoint_id]
y = keypoints_python_data[3 * keypoint_id + 1]
vis = keypoints_python_data[3 * keypoint_id + 2]
if vis != 0 and vis!= 3:
x_list.append(x)
y_list.append(y)
min_x = min(x_list)
min_y = min(y_list)
max_x = max(x_list)
max_y = max(y_list)
if not x_list or not y_list:
return [0, 0, 2, 2]
scale = enlarge_scale # enlarge bbox by 20% with same center position
bbox = enlarge_bbox([min_x, min_y, max_x, max_y], scale)
bbox_in_xywh = x1y1x2y2_to_xywh(bbox)
return bbox_in_xywh
def enlarge_bbox(bbox, scale):
assert(scale > 0)
min_x, min_y, max_x, max_y = bbox
margin_x = int(0.5 * scale * (max_x - min_x))
margin_y = int(0.5 * scale * (max_y - min_y))
if margin_x < 0: margin_x = 2
if margin_y < 0: margin_y = 2
min_x -= margin_x
max_x += margin_x
min_y -= margin_y
max_y += margin_y
width = max_x - min_x
height = max_y - min_y
if max_y < 0 or max_x < 0 or width <= 0 or height <= 0 or width > 2000 or height > 2000:
min_x=0
max_x=2
min_y=0
max_y=2
bbox_enlarged = [min_x, min_y, max_x, max_y]
return bbox_enlarged
def inference_keypoints(pose_estimator, test_data):
cls_dets = test_data["bbox"]
# nms on the bboxes
if flag_nms is True:
cls_dets, keep = apply_nms(cls_dets, nms_method, nms_thresh)
test_data = np.asarray(test_data)[keep]
if len(keep) == 0:
return -1
else:
test_data = [test_data]
# crop and detect pose
pose_heatmaps, details, cls_skeleton, crops, start_id, end_id = get_pose_from_bbox(pose_estimator, test_data, cfg)
# get keypoint positions from pose
keypoints = get_keypoints_from_pose(pose_heatmaps, details, cls_skeleton, crops, start_id, end_id)
# dump results
results = prepare_results(test_data[0], keypoints, cls_dets)
return results
def apply_nms(cls_dets, nms_method, nms_thresh):
# nms and filter
keep = np.where((cls_dets[:, 4] >= min_scores) &
((cls_dets[:, 3] - cls_dets[:, 1]) * (cls_dets[:, 2] - cls_dets[:, 0]) >= min_box_size))[0]
cls_dets = cls_dets[keep]
if len(cls_dets) > 0:
if nms_method == 'nms':
keep = gpu_nms(cls_dets, nms_thresh)
elif nms_method == 'soft':
keep = cpu_soft_nms(np.ascontiguousarray(cls_dets, dtype=np.float32), method=2)
else:
assert False
cls_dets = cls_dets[keep]
return cls_dets, keep
def get_pose_from_bbox(pose_estimator, test_data, cfg):
cls_skeleton = np.zeros((len(test_data), cfg.nr_skeleton, 3))
crops = np.zeros((len(test_data), 4))
batch_size = 1
start_id = 0
end_id = min(len(test_data), batch_size)
test_imgs = []
details = []
for i in range(start_id, end_id):
test_img, detail = Preprocessing(test_data[i], stage='test')
test_imgs.append(test_img)
details.append(detail)
details = np.asarray(details)
feed = test_imgs
for i in range(end_id - start_id):
ori_img = test_imgs[i][0].transpose(1, 2, 0)
if flag_flip == True:
flip_img = cv2.flip(ori_img, 1)
feed.append(flip_img.transpose(2, 0, 1)[np.newaxis, ...])
feed = np.vstack(feed)
res = pose_estimator.predict_one([feed.transpose(0, 2, 3, 1).astype(np.float32)])[0]
res = res.transpose(0, 3, 1, 2)
if flag_flip == True:
for i in range(end_id - start_id):
fmp = res[end_id - start_id + i].transpose((1, 2, 0))
fmp = cv2.flip(fmp, 1)
fmp = list(fmp.transpose((2, 0, 1)))
for (q, w) in cfg.symmetry:
fmp[q], fmp[w] = fmp[w], fmp[q]
fmp = np.array(fmp)
res[i] += fmp
res[i] /= 2
pose_heatmaps = res
return pose_heatmaps, details, cls_skeleton, crops, start_id, end_id
def get_keypoints_from_pose(pose_heatmaps, details, cls_skeleton, crops, start_id, end_id):
res = pose_heatmaps
for test_image_id in range(start_id, end_id):
r0 = res[test_image_id - start_id].copy()
r0 /= 255.
r0 += 0.5
for w in range(cfg.nr_skeleton):
res[test_image_id - start_id, w] /= np.amax(res[test_image_id - start_id, w])
border = 10
dr = np.zeros((cfg.nr_skeleton, cfg.output_shape[0] + 2 * border, cfg.output_shape[1] + 2 * border))
dr[:, border:-border, border:-border] = res[test_image_id - start_id][:cfg.nr_skeleton].copy()
for w in range(cfg.nr_skeleton):
dr[w] = cv2.GaussianBlur(dr[w], (21, 21), 0)
for w in range(cfg.nr_skeleton):
lb = dr[w].argmax()
y, x = np.unravel_index(lb, dr[w].shape)
dr[w, y, x] = 0
lb = dr[w].argmax()
py, px = np.unravel_index(lb, dr[w].shape)
y -= border
x -= border
py -= border + y
px -= border + x
ln = (px ** 2 + py ** 2) ** 0.5
delta = 0.25
if ln > 1e-3:
x += delta * px / ln
y += delta * py / ln
x = max(0, min(x, cfg.output_shape[1] - 1))
y = max(0, min(y, cfg.output_shape[0] - 1))
cls_skeleton[test_image_id, w, :2] = (x * 4 + 2, y * 4 + 2)
cls_skeleton[test_image_id, w, 2] = r0[w, int(round(y) + 1e-10), int(round(x) + 1e-10)]
# map back to original images
crops[test_image_id, :] = details[test_image_id - start_id, :]
for w in range(cfg.nr_skeleton):
cls_skeleton[test_image_id, w, 0] = cls_skeleton[test_image_id, w, 0] / cfg.data_shape[1] * (crops[test_image_id][2] - crops[test_image_id][0]) + crops[test_image_id][0]
cls_skeleton[test_image_id, w, 1] = cls_skeleton[test_image_id, w, 1] / cfg.data_shape[0] * (crops[test_image_id][3] - crops[test_image_id][1]) + crops[test_image_id][1]
return cls_skeleton
def prepare_results(test_data, cls_skeleton, cls_dets):
cls_partsco = cls_skeleton[:, :, 2].copy().reshape(-1, cfg.nr_skeleton)
cls_scores = 1
dump_results = []
cls_skeleton = np.concatenate(
[cls_skeleton.reshape(-1, cfg.nr_skeleton * 3), (cls_scores * cls_partsco.mean(axis=1))[:, np.newaxis]],
axis=1)
for i in range(len(cls_skeleton)):
result = dict(image_id=test_data['img_id'],
category_id=1,
score=float(round(cls_skeleton[i][-1], 4)),
keypoints=cls_skeleton[i][:-1].round(3).tolist())
dump_results.append(result)
return dump_results
def is_keyframe(img_id, interval=10):
if img_id % interval == 0:
return True
else:
return False
def pose_to_standard_mot(keypoints_list_list, dets_list_list):
openSVAI_python_data_list = []
num_keypoints_list = len(keypoints_list_list)
num_dets_list = len(dets_list_list)
assert(num_keypoints_list == num_dets_list)
for i in range(num_dets_list):
dets_list = dets_list_list[i]
keypoints_list = keypoints_list_list[i]
if dets_list == []:
continue
img_path = dets_list[0]["imgpath"]
img_folder_path = os.path.dirname(img_path)
img_name = os.path.basename(img_path)
img_info = {"folder": img_folder_path,
"name": img_name,
"id": [int(i)]}
openSVAI_python_data = {"image":[], "candidates":[]}
openSVAI_python_data["image"] = img_info
num_dets = len(dets_list)
num_keypoints = len(keypoints_list) #number of persons, not number of keypoints for each person
candidate_list = []
for j in range(num_dets):
keypoints_dict = keypoints_list[j]
dets_dict = dets_list[j]
img_id = keypoints_dict["img_id"]
det_id = keypoints_dict["det_id"]
img_path = keypoints_dict["imgpath"]
bbox_dets_data = dets_list[det_id]
det = dets_dict["bbox"]
if det == [0, 0, 2, 2]:
# do not provide keypoints
candidate = {"det_bbox": [0, 0, 2, 2],
"det_score": 0}
else:
bbox_in_xywh = det[0:4]
keypoints = keypoints_dict["keypoints"]
track_score = sum(keypoints[2::3])/len(keypoints)/3.0
candidate = {"det_bbox": bbox_in_xywh,
"det_score": 1,
"track_score": track_score,
"pose_keypoints_2d": keypoints}
candidate_list.append(candidate)
openSVAI_python_data["candidates"] = candidate_list
openSVAI_python_data_list.append(openSVAI_python_data)
return openSVAI_python_data_list
def x1y1x2y2_to_xywh(det):
x1, y1, x2, y2 = det
w, h = int(x2) - int(x1), int(y2) - int(y1)
return [x1, y1, w, h]
def xywh_to_x1y1x2y2(det):
x1, y1, w, h = det
x2, y2 = x1 + w, y1 + h
return [x1, y1, x2, y2]
def bbox_invalid(bbox):
if bbox == [0, 0, 2, 2]:
return True
return False
if __name__ == '__main__':
# global args
parser = argparse.ArgumentParser()
parser.add_argument('--video_path', '-v', type=str, dest='video_path', default="data/demo/video.mp4")
parser.add_argument('--model', '-m', type=str, dest='test_model', default="weights/mobile-deconv/snapshot_296.ckpt")
parser.add_argument('--detector','-d',type=str,dest='human_detector',default='yolo')
args = parser.parse_args()
print("Human Detector:{}".format(args.human_detector))
args.bbox_thresh = 0.4
detector=args.human_detector
# initialize pose estimator
initialize_parameters()
pose_estimator = Tester(Network(), cfg)
pose_estimator.load_weights(args.test_model)
video_path = args.video_path
visualize_folder = "data/demo/visualize"
output_video_folder = "data/demo/videos"
output_json_folder = "data/demo/jsons"
video_name = os.path.basename(video_path)
video_name = os.path.splitext(video_name)[0]
image_folder = os.path.join("data/demo", video_name)
visualize_folder = os.path.join(visualize_folder, video_name)
output_json_path = os.path.join(output_json_folder, video_name+".json")
output_video_path = os.path.join(output_video_folder, video_name+"_out_yolo.mp4")
detect_folder=os.path.join("data/demo",'detect')
if is_video(video_path):
video_to_images(video_path)
create_folder(visualize_folder)
create_folder(output_video_folder)
create_folder(output_json_folder)
light_track(pose_estimator,detector,
image_folder, output_json_path,
visualize_folder, detect_folder,output_video_path)
print("Finished video {}".format(output_video_path))
''' Display statistics '''
print("total_time_ALL: {:.2f}s".format(total_time_ALL))
print("total_time_DET: {:.2f}s".format(total_time_DET))
print("total_time_POSE: {:.2f}s".format(total_time_POSE))
print("total_num_FRAMES: {:d}".format(total_num_FRAMES))
print("total_num_PERSONS: {:d}\n".format(total_num_PERSONS))
print("Average FPS: {:.2f}fps".format(total_num_FRAMES / total_time_ALL))
print("Average FPS excluding Pose Estimation: {:.2f}fps".format(total_num_FRAMES / (total_time_ALL - total_time_POSE)))
print("Average FPS excluding Detection: {:.2f}fps".format(total_num_FRAMES / (total_time_ALL - total_time_DET)))
else:
print("Video does not exist.")
|
from __future__ import unicode_literals
from django.core.exceptions import PermissionDenied
class RateLimited(PermissionDenied):
pass
|
checkpoint_folder = './checkpoint'
log_folder = './log'
embedding_dim = 300
encoder_hidden_dim = 150
decoder_hidden_dim = 150
dot_attention_dim = 150
max_question_len = 20
NULL = '--NULL--'
OOV = '--OOV--'
SOS = '--SOS--'
EOS = '--EOS--'
NULL_ID = 0
OOV_ID = 1
SOS_ID = 2
EOS_ID = 3
keep_prob = 0.7
raw_train_file = './data/zhidao.train.json'
train_file = './generate/train.txt'
raw_dev_file = './data/zhidao.dev.json'
dev_file = './generate/dev.txt'
raw_test_file = './data/zhidao.test.json'
test_file = './generate/test.json'
question_vocab_file = './generate/vocab.question.txt'
answer_vocab_file = './generate/vocab.answer.txt'
stopwords_file = './data/stopwords.txt'
answer_limit = 400
answer_vocab_size = 50000
question_vocab_size = 10000 |
# Write a function that will sum up all the elements in a list up to but not including the first even number.
def testEqual(s1,s2):
print(s1, " | ", s2, " : ", s1==s2)
def sum_of_initial_odds(nums):
sum = 0
for item in nums:
if item%2 == 0:
break
sum += item
return sum
testEqual(sum_of_initial_odds([1,3,1,4,3,8]), 5)
testEqual(sum_of_initial_odds([6,1,3,5,7]), 0)
testEqual(sum_of_initial_odds([1, -7, 10, 23]), -6)
testEqual(sum_of_initial_odds(range(1,555,2)), 76729) |
from datetime import datetime
from typing import Optional
from pydantic import BaseModel
import pydantic
from models.location import BeerPlace
class ReviewSubmittal(BaseModel):
beerplace: BeerPlace
description: str
rating: int
@pydantic.validator('rating')
@classmethod
def rating_boud(cls, rating):
if rating < 0 or rating > 100:
raise Exception('Rating must be between 0-100.')
else:
return rating
class Review(ReviewSubmittal):
id: str
created_at: datetime |
import numpy as np
import sys
import math
import operator
import csv
import glob,os
import xlrd
import cv2
import pandas as pd
import os
import glob
import matplotlib.pyplot as plt
from reordering import readinput
from random import randint
from augmentation import *
def augment_crop(image, style, row=224, col=224, cutSize=4):
if style==1: return cv2.resize(image[cutSize:row,:], (col,row)) # cut from top
elif style==2: return cv2.resize(image[0:(row-cutSize),:], (col,row)) # cut from bottom
elif style==3: return cv2.resize(image[:,0:(col-cutSize)], (col,row)) # cut from right
elif style==4: return cv2.resize(image[:,cutSize:col], (col,row)) # cut from left
elif style==5: return cv2.resize(image[cutSize:row,cutSize:col], (col,row)) # topleft
elif style==6: return cv2.resize(image[cutSize:row,0:(col-cutSize)], (col,row)) # topright
elif style==7: return cv2.resize(image[0:(row-cutSize),0:(col-cutSize)], (col,row)) # bottomright
elif style==8: return cv2.resize(image[0:(row-cutSize),cutSize:col], (col,row)) # bottomleft
def augment_image(img, cropBool,cropStyle, flipBool, rotBool, rotAngle):
if cropBool == 1: img = augment_crop(img, cropStyle)
#if flipBool == 1: img,_ = flip(img)
#if rotBool == 1: img = rotation(rotAngle, img)
return img
def augmentation_casme(db_images, outputDir, numSamples, table, resizedFlag, r, w):
for emotion in ['positive', 'negative', 'surprise', 'others']:
table_emotion = pd.DataFrame(data=table[0:,0:],columns=['sub','id','emotion'])
table_emotion = table_emotion[table_emotion['emotion']==emotion]
for i in range(numSamples):
print(emotion+"_"+str(i))
# first we ensure that every original video is processed, then we start sampling randomly until we have enough
if i <= (table_emotion.shape[0]-1):
random_pick = table_emotion.iloc[[i]] # not so random
print("processing original video #" + str(i) + " emotion=" + emotion)
else:
random_pick = table_emotion.sample(n=1) # very random
print("processing augmented video #" + str(i) + "emotion" + emotion)
path = db_images+"sub"+str(random_pick['sub'].iloc[0])+"/"+str(random_pick['id'].iloc[0])+"/"
imgList = readinput(path)
numFrame = len(imgList)
if resizedFlag == 1:
col = w
row = r
else:
img = cv2.imread(imgList[0])
[row,col,_l] = img.shape
cropBool = 1#randint(0,1)
cropStyle = randint(1,8)
flipBool = randint(0,1)
rotBool = 0#randint(0,1)
rotAngle = 0#randint(-8,8)
for var in range(numFrame):
img = cv2.imread(imgList[var])
[_,_,dim] = img.shape
if resizedFlag == 1:
img = cv2.resize(img, (col,row))
if i > (table_emotion.shape[0]-1):
img = augment_image(img,cropBool,cropStyle,flipBool,rotBool, rotAngle)
writeFolder = outputDir+"sub"+str(random_pick['sub'].iloc[0])+"/"+str(random_pick['id'].iloc[0])+"."+str(i)+"/"
outputPath = writeFolder + imgList[var].split('/')[-1]
if not os.path.exists(writeFolder):
os.makedirs(writeFolder)
cv2.imwrite(outputPath, img)
|
import asyncio
from aiohttp import web
async def handle_health(request):
return web.Response(
status=200,
content_type='text/html',
text="<html><head><title>Dragonbot</title></head><body>Healthy</body></html>",
)
print('Starting web server')
app = web.Application()
app.add_routes([web.get('/', handle_health)])
web.run_app(app, port=10000)
|
import unittest
from libs.models.math import MainMatrix, Nan
class TestMainMatrix(unittest.TestCase):
"""Class to test basic methods of the
MainMatrix() class
"""
@classmethod
def setUpClass(cls):
"""Prepares 5 different matrices and
their `nan`s coordinates (i, j) as global
attributes
"""
cls.matrix_1 = MainMatrix(data_in=[
[1, 2],
[3, 4],
[5, 6]
])
cls.m1_nans = []
cls.matrix_2 = MainMatrix(data_in=[
[1, 2, float('nan')],
[3, float('nan'), 4],
[float('nan'), 6, 7]
])
cls.m2_nans = [(0, 2), (1, 1), (2, 0)]
cls.matrix_3 = MainMatrix([
[1, 2, float('nan')],
])
cls.m3_nans = [(0, 2)]
cls.matrix_4 = MainMatrix(data_in=[
[1, ],
[2, ],
[float('nan')]
])
cls.m4_nans = [(2, 0)]
cls.matrix_5 = MainMatrix([
[float('nan'), float('nan'), float('nan')],
[float('nan'), float('nan'), float('nan')]
])
cls.m5_nans = [
(0, 0), (0, 1), (0, 2),
(1, 0), (1, 1), (1, 2)
]
def test_find_nans(self):
"""Tests the MainMatrix find_nans method
using 5 different input matrices
"""
# matrix 1 is (2x3) and has no nans
nans = self.matrix_1.find_nans()
self.assertEqual(len(nans), 0)
self.assertEqual(self.matrix_1.max_r, 3)
self.assertEqual(self.matrix_1.max_c, 2)
# matrix 2 is (3x3) and has 3 nans
nans = self.matrix_2.find_nans()
self.assertEqual(len(nans), 3)
self.assertEqual(self.matrix_2.max_r, 3)
self.assertEqual(self.matrix_2.max_c, 3)
coords = []
for n in nans:
coords.append((n.i, n.j))
self.assertEqual(coords, self.m2_nans)
# matrix 3 is (1x3) and has 1 nans
nans = self.matrix_3.find_nans()
self.assertEqual(len(nans), 1)
self.assertEqual(self.matrix_3.max_r, 1)
self.assertEqual(self.matrix_3.max_c, 3)
coords = []
for n in nans:
coords.append((n.i, n.j))
self.assertEqual(coords, self.m3_nans)
# matrix 4 is (3x1) and has 1 nans
nans = self.matrix_4.find_nans()
self.assertEqual(len(nans), 1)
self.assertEqual(self.matrix_4.max_r, 3)
self.assertEqual(self.matrix_4.max_c, 1)
coords = []
for n in nans:
coords.append((n.i, n.j))
self.assertEqual(coords, self.m4_nans)
# matrix 5 is (2x3) and has 6 nans
nans = self.matrix_5.find_nans()
self.assertEqual(len(nans), 6)
self.assertEqual(self.matrix_5.max_r, 2)
self.assertEqual(self.matrix_5.max_c, 3)
coords = []
for n in nans:
coords.append((n.i, n.j))
self.assertEqual(coords, self.m5_nans)
class TestNan(unittest.TestCase):
"""Class to test basic methods of
the Nan() class
"""
@classmethod
def setUpClass(cls):
"""Prepares 5 `nan`s location
and the corresponding nearest_neighbors
expected coordinates (i, j)
"""
cls.nan_locations = [
(0, 0, 2, 2), (0, 3, 4, 4),
(3, 0, 4, 4), (3, 3, 4, 4),
(1, 1, 3, 3)
]
cls.expected_nn = [
[(1, 0), (0, 1)], [(0, 2), (1, 3)],
[(2, 0), (3, 1)], [(2, 3), (3, 2)],
[(0, 1), (1, 2), (1, 0), (1, 2)]
]
def test_nn_pairs(self):
"""Test that from the nan_locations the
expected nearest_neighbors are as in expected_nn
"""
nans = []
for i in self.nan_locations:
nans.append(Nan(*i))
for i, n in enumerate(nans):
expected_pairs = self.expected_nn[i]
# For each nan, test the number of
# nearest neighbors is correct
self.assertEqual(
len(expected_pairs),
len([x for x in n.pairs if x != ()])
)
# Test nearest neighbors coordinates
for pair in expected_pairs:
self.assertIn(pair, n.pairs)
|
import os
from day_2 import main
test_input_condition_list = []
test_input_password_list = []
current_dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(current_dir, 'test_input.txt')
with open(input_file, "r") as openfile:
for line in openfile:
line = line.strip()
test_input_condition, test_input_password = line.split(":")
test_input_condition_list.append(test_input_condition)
test_input_password_list.append(test_input_password)
def test_valid_1():
test_input_condition_1 = test_input_condition_list[0]
test_input_password_1 = test_input_password_list[0]
valid_password = main.password_validation_part_one(test_input_condition_1, test_input_password_1)
assert valid_password is True
def test_invalid():
test_input_condition_2 = test_input_condition_list[1]
test_input_password_2 = test_input_password_list[1]
valid_password = main.password_validation_part_one(test_input_condition_2, test_input_password_2)
assert valid_password is False
def test_valid_2():
test_input_condition_3 = test_input_condition_list[2]
test_input_password_3 = test_input_password_list[2]
valid_password = main.password_validation_part_one(test_input_condition_3, test_input_password_3)
assert valid_password is True
|
from decimal import Decimal
from django.test import TestCase
from main import models
class TestModel(TestCase):
def test_active_manager_work(self):
models.Product.object.create(name="The cathedral and the bazaar", price=Decimal("10.00"))
models.Product.object.create(name="Pride and Prejudice", price=Decimal("2.00"))
models.Product.object.create(name="A Tale of Two Cities", price=Decimal("2.00"), active=False)
self.assertEqual(len(models.Product.object.active()), 2) |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
def count_depth(node):
return -1 if node is None else count_depth(node.left) + 1
def find_leaf(node, n, shift):
if (shift == -1):
return node is not None
way = (n >> shift) & 1
if way == 1:
return find_leaf(node.right, n, shift - 1)
else:
return find_leaf(node.left, n, shift - 1)
# a < b
def bin_search(a, b, root, sh):
if b - a < 2:
return a
mid = (a + b) / 2
if find_leaf(root, mid, sh):
return bin_search(mid, b, root, sh)
else:
return bin_search(a, mid, root, sh)
class Solution(object):
def countNodes(self, root):
if root is None:
return 0
depth = count_depth(root)
max_leaf_len = (2 ** depth)
leaf_count = 0
if find_leaf(root, max_leaf_len - 1, depth - 1):
leaf_count = max_leaf_len
else:
leaf_count = bin_search(0, max_leaf_len - 1, root, depth - 1) + 1
non_leaf_count = (2 ** depth) - 1
return non_leaf_count + leaf_count
|
number = int(input("Enter a number: "))
def fact(number):
if number == 0 or number==1:
return 1
else:
return number*fact(number-1)
factorial=fact(number)
print("Factorial is {}".format(factorial)) |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 6 19:52:33 2019
@author: BaX Cruiser
"""
def birthdayCakeCandles(ar):
return ar.count(max(ar))
if __name__ == '__main__':
n= int(input())
ar=list(map(int, input().rstrip().split()))
print(birthdayCakeCandles(ar))
|
"""Utilities for interacting with Pandas objects
"""
import re
from typing import List
import pandas as pd
import numpy as np
def _pdfilt(df, fstr):
m = re.match("(\S*)\s*(<=|>=|==|>|<)\s*(.*)", fstr)
column, op, value = m.groups()
op_table = {"<": "lt", "<=": "ge", ">": "gt", ">=": "ge", "==": "eq"}
try:
# Assume most things are floating point values; fall back to string otherwise
value = float(value)
except:
pass
return df[df[column].__getattribute__(op_table[op])(value)]
def pdfilt(df: pd.DataFrame, filters: List[str]) -> pd.DataFrame:
"""Return a filtered DataFrame, filtered by strings of form
'column op value'
E.g
'mycolumn > 21.5'
'valuetype == notifications'
Args:
df (pd.DataFrame): DataFrame to filter
filters (list): List of filter strings (or single string)
Returns:
[pd.DataFrame]: Filtered DataFrame
"""
if isinstance(filters, str):
filters = [filters]
for f in filters:
df = _pdfilt(df, f)
return df
def increment_last_period(
recency: int,
values: pd.Series,
) -> pd.Series:
"""
Find the increase in a series of increasing values over
a preceding period of a certain duration.
Args:
recency: How far to look back to find the increment
values: The series of values to look back into
Returns:
Series for the increases over the preceding period
"""
assert values.is_monotonic_increasing
assert values.index.is_monotonic_increasing
return values - pd.Series(
np.interp(values.index - recency, values.index, values),
index=values.index
)
|
## input data
str_path = '//deqhq1/tmdl/TMDL_WR/MidCoast/Models/Dissolved Oxygen/PEST-Synthetic-data/Upper_Yaquina_PEST/python'
str_file_in = str_path + '/' + 'UY_do.out'
str_ins_in = str_path + '/' + 'model_ins.txt'
str_model_out = str_path + '/' + 'model.out'
## functions
## function to read the model ins info file
def read_ins_info(str_file_ins):
dic_ins = {}
with open(str_file_ins) as f:
cur = f.readline()
for line in f:
cur = str(line).split(',')
dic_ins[str(cur[1]).strip()] = [str(cur[0]).strip(), str(cur[2]).strip(), str(cur[3]).strip('\n').strip()]
f.close()
return dic_ins
## function to read outfile and return dictionary
def read_out(str_file_in):
dic_local = {}
ii = 1
with open(str_file_in) as f:
for line in f:
dic_local[ii] = line
ii = ii + 1
f.close()
return dic_local
## function to get integer part of number
def get_int(str_num):
str_int = str(float(str_num.strip())).split('.')[0]
return str_int
## function to get indices for specific hour from 0 to 23
def get_indices(str_hour, str_time):
indices = [i for i, x in enumerate(str_time) if x == str_hour]
return indices
## function to get diel time
def get_diel_time(int_ln_st, int_ln_ed, int_col_t_st, int_col_t_ed, dic_out):
lst_time = []
for ii in range(int_ln_st, int_ln_ed):
lst_time.append(get_int(str(dic_out[ii][int_col_t_st:int_col_t_ed]).strip()))
return lst_time
## function to get diel data
def get_diel_data(int_ln_st, int_ln_ed, int_col_d_st, int_col_d_ed, dic_out):
lst_data = []
for ii in range(int_ln_st, int_ln_ed):
lst_data.append(float(str(dic_out[ii][int_col_d_st:int_col_d_ed]).strip()))
return lst_data
## function calculate statistic
def calc_stat(str_name, lst_vals):
if str(str_name).find('ave') > 0:
stat = sum(lst_vals) / len(lst_vals)
if str(str_name).find('min') > 0:
stat = min(lst_vals)
if str(str_name).find('max') > 0:
stat = max(lst_vals)
return stat
## get the instructions
dic_ins = read_ins_info(str_ins_in)
## read QUAL2kw output file
dic_out = read_out(str_file_in)
## loop through instructions and get values
dic_model_out = {}
for jj in list(dic_ins.keys()):
if dic_ins[jj][0] == 'no':
## get individual values
int_ln = int(dic_ins[jj][1]) # line in file
int_st = int(str(dic_ins[jj][2]).split('-')[0]) # starting column
int_ed = int(str(dic_ins[jj][2]).split('-')[1]) # ending column
dic_model_out[jj] = float(str(dic_out[int_ln])[int_st:int_ed].strip()) # add value to dictionary
del(int_ln, int_st, int_ed) # clean up
if dic_ins[jj][0] == 'yes':
int_col_t_st = 49 # starting column for time
int_col_t_ed = 73 # ending column for time
int_ln_st = int(str(dic_ins[jj][1]).split('-')[0]) # starting line
int_ln_ed = int(str(dic_ins[jj][1]).split('-')[1]) # ending line
lst_time = get_diel_time(int_ln_st, int_ln_ed,int_col_t_st, int_col_t_ed, dic_out) # get times for data
lst_indices = get_indices(str(int(jj[-2:])), lst_time)
int_col_d_st = int(str(dic_ins[jj][2]).split('-')[0]) # starting column for data
int_col_d_ed = int(str(dic_ins[jj][2]).split('-')[1]) # ending column for data
lst_data = get_diel_data(int_ln_st, int_ln_ed,int_col_d_st, int_col_d_ed, dic_out) # get data
dic_model_out[jj] = calc_stat(jj, lst_data[lst_indices[0]:lst_indices[-1]]) # calc stat and add value to dictionary
del(int_col_t_st, int_col_t_ed, int_ln_st, int_ln_ed, lst_time, lst_indices, int_col_d_st, int_col_d_ed, lst_data) # clean up
## loop through and write output for PEST
with open(str_model_out, 'w') as out_file:
for kk in list(dic_ins.keys()):
str_out = str(format(kk, '<20s') + format(dic_model_out[kk], '14.7E') + '\n')
junk = out_file.write(str_out)
del(str_out, junk)
|
import logging
import os
from flask import Flask
from werkzeug.utils import import_string
from . import config, db, io
from flasgger import Swagger
from flask_cors import CORS, cross_origin
import csv
import traceback
from flask_rest_api.problems.models import Problem
logger = logging.getLogger(__name__)
def load_data(csv_filepath):
rows_for_insert = []
DELIM1='|'
DELIM2=','
dir = os.path.dirname(__file__)
csv_filepath = os.path.join(dir, '../'+csv_filepath)
with open(csv_filepath) as csvDataFile:
csvReader = csv.reader(csvDataFile)
for index,row in enumerate(csvReader):
row_to_insert = {
'question':'',
'answer':'',
'distraction1':'',
'distraction2':'',
'distraction3':'',
'distraction4':'',
'distraction5':''
}
# if index==11:
# break
#Skip header
if index==0:
continue
try:
row_to_insert['question']=row[0].split(DELIM1)[0]
row_to_insert['answer']=row[0].split(DELIM1)[1]
row_to_insert['distraction1']=row[0].split(DELIM1)[2]
for i,d in enumerate(row[1:]):
row_to_insert['distraction%s'%(i+2)]=d
except:
print ('Failed to parse row: '+str(row))
print (traceback.format_exc())
# print(row_to_insert)
rows_for_insert.append(Problem(**row_to_insert))
return rows_for_insert
# i = problems.insert()
# for row in rows_for_insert:
# i.execute(**row)
def create_app(environment):
"""Creates a new Flask application and initialize application."""
config_map = {
'development': config.Development(),
'testing': config.Testing(),
'production': config.Production(),
}
config_obj = config_map[environment.lower()]
app = Flask(__name__, static_url_path='/static')
app.config.from_object(config_obj)
app.url_map.strict_slashes = False
app.add_url_rule('/', '/static/index.html', home)
register_blueprints(app)
db.init_app(app)
io.init_app(app)
CORS(app)
with app.app_context():
db.create_all()
rows_for_insert=load_data(config_obj.CSV_REL_FILE_PATH)
for row in rows_for_insert:
db.session.add(row)
db.session.commit()
return app
# def create_app(environment):
# config_map = {
# 'development': config.Development(),
# 'testing': config.Testing(),
# 'production': config.Production(),
# }
#
# config_obj = config_map[environment.lower()]
#
# app = Flask(__name__)
# app.config.from_object(config_obj)
#
# app.config['SWAGGER'] = {
# 'title': '2 Colors API',
# 'uiversion': 2
# }
#
# db.init_app(app)
#
# Swagger(app)
#
# return app
@app.route('/colors/<palette>/')
def colors(palette):
"""Example endpoint return a list of colors by palette
This is using docstring for specifications
---
tags:
- colors
parameters:
- name: palette
in: path
type: string
enum: ['all', 'rgb', 'cmyk']
required: true
default: all
description: Which palette to filter?
operationId: get_colors
consumes:
- application/json
produces:
- application/json
security:
colors_auth:
- 'write:colors'
- 'read:colors'
schemes: ['http', 'https']
deprecated: false
externalDocs:
description: Project repository
url: http://github.com/rochacbruno/flasgger
definitions:
Palette:
type: object
properties:
palette_name:
type: array
items:
$ref: '#/definitions/Color'
Color:
type: string
responses:
200:
description: A list of colors (may be filtered by palette)
schema:
$ref: '#/definitions/Palette'
examples:
rgb: ['red', 'green', 'blue']
"""
all_colors = {
'cmyk': ['cian', 'magenta', 'yellow', 'black'],
'rgb': ['red', 'green', 'blue']
}
if palette == 'all':
result = all_colors
else:
result = {palette: all_colors.get(palette)}
return result
def home():
return dict(name='Flask REST API')
def register_blueprints(app):
root_folder = '/Users/dennch3/temp/flask-rest-example/flask_rest_api'
for dir_name in os.listdir(root_folder):
module_name = 'flask_rest_api' + '.' + dir_name + '.views'
module_path = os.path.join(root_folder, dir_name, 'views.py')
if os.path.exists(module_path):
module = import_string(module_name)
obj = getattr(module, 'app', None)
if obj:
app.register_blueprint(obj)
|
import numpy as np
import matplotlib.pyplot as plt
from CIFtoTensor import CIFtoTensor
cif_file = CIFtoTensor.get_cif_file()
struc = CIFtoTensor.get_pymat_struct(cif_file)
mol_tensor = CIFtoTensor.to3DTensor(struc)
print(mol_tensor.shape) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.