content stringlengths 5 1.05M |
|---|
# Looks at the sample def and determines which feature-counter calls can be disabled
__MRO__ = """
stage DISABLE_FEATURE_STAGES(
in map[] sample_def,
out bool disable_crispr,
out bool disable_antibody,
src py "stages/common/disable_feature_stages",
)
"""
def main(args, outs):
sample_def = args.sample_def
library_types = [x.get('library_type') for x in sample_def
if x.get('library_type') is not None]
found_crispr = ('CRISPR Guide Capture' in library_types) or ('Gene Expression and CRISPR Guide Capture' in library_types)
found_antibody = ('Antibody Capture' in library_types) or ('Gene Expression and Antibody Capture' in library_types)
outs.disable_crispr = not(found_crispr)
outs.disable_antibody = not(found_antibody)
|
"""
x = int(input())
y = int(input())
if x > 0:
if y > 0:
print(1)
else:
print(4)
else:
if y > 0:
print(2)
else:
print(3)
"""
x = int(input())
if x == 1:
print('One')
elif x == 2:
print('Two')
elif x == 3:
print('Three')
else:
print('Other')
|
from confvar import *
from webkit import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import PyQt5.QtWebEngineWidgets
from PyQt5.QtWebEngineWidgets import *
from PyQt5.QtPrintSupport import *
from PyQt5.QtWebEngineCore import *
import json
import os
import re
import time
extension_data = {} # Holds the loaded extensions
preload_data = {} # Holds the preloaded code of all extensions
permissions = {} # Holds the permissions of each extension
script_list = {}
apiFile = QFile(":/qtwebchannel/qwebchannel.js");
if not apiFile.open(QIODevice.ReadOnly):
print("Could not open API file")
apiScript = apiFile.readAll().data().decode()
apiFile.close();
def readExtension(extension_file):
global extension_data, permissions, script_list
with open(BASE_PATH + "extensions/" + extension_file) as f:
data = json.load(f)
if 'extension' not in data.keys():
return False
if 'permissions' in data.keys():
permissions[data['name']] = data['permissions']
for i, host in enumerate(data['extension']['host']):
if data['enabled'] == False:
return
if host.replace("www.", "") not in extension_data:
extension_data[host.replace("www.", "")] = []
if data['extension']['js'][-3:] == ".ts":
new_name = data['extension']['js'].replace(".ts", ".js")
if BROWSER_TS_DISABLED:
return
if not os.path.isfile(BASE_PATH + "extensions/" + new_name):
print("Transpiling TS code")
os.system("npx tsc " + BASE_PATH + "extensions/" + data['extension']['js'])
data['extension']['js'] = new_name
script_list[data['extension']['js']] = data['name']
extension_data[host.replace("www.", "")].append(data['extension']['js'])
def readExtensions():
files = os.listdir(BASE_PATH + "extensions/")
for i, extension_file in enumerate(files):
if extension_file[-5:] == ".json":
readExtension(extension_file)
print(extension_data, permissions)
def javascriptLoad(path):
global preload_data
if path not in list(preload_data.keys()):
with open(path) as f:
js_code = f.read()
preload_data[path] = js_code
return preload_data[path]
def execute(load_scripts, browser):
js_code = javascriptLoad(BASE_PATH + "extensions/" + load_scripts)
if script_list[load_scripts] in list(permissions.keys()):
setPrivileges(permissions[script_list[load_scripts]])
browser.page().runJavaScript(js_code, 0)
QTimer.singleShot(500, setPrivileges) # FIXME: This should wait for the JS to execute
def pageLoad(browser):
global apiScript
browser.page().runJavaScript(apiScript)
time.sleep(0.01)
match = browser.page().url().host().replace("www.", "")
if match in list(extension_data.keys()):
for load_scripts in extension_data[match]:
execute(load_scripts, browser)
if "*" in list(extension_data.keys()):
for load_scripts in extension_data["*"]:
execute(load_scripts, browser)
return 1
return 0
|
import imp
import os
# all base settings for Django
from .base import *
from .installed import *
ALLOWED_HOSTS = ['*']
HOME_PAGE_MSG = "Hello World, This is local"
print("Using local")
# Database
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('LOCAL_DATABASE_NAME'),
'USER': os.environ.get('LOCAL_DATABASE_USER'),
'PASSWORD':os.environ.get('LOCAL_DATABASE_PWD'),
'HOST':"127.0.0.1",
'PORT':5432
}
}
|
from datetime import datetime
def get_the_datetime() -> datetime:
return datetime.now()
|
from random import choice
import nmap
"""This class allow us to scan all ips address and hosts
from the current machine """
class Network:
def __init__(self, choice):
self.choice = choice
def networkscanner(self):
network = "127.0.1.1/24"
nm = nmap.PortScanner()
nm.scan(hosts=network, arguments="-sn")
host_list = [(x, nm[x]["status"]["state"]) for x in nm.all_hosts()]
return host_list
if __name__ == "__main__":
D = Network("192.168.1.1/24")
D.networkscanner()
|
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input())
if n%2:
print('Weird')
elif n%2 ==0 and 2<=n<=5:
print('Not Weird')
elif n%2 ==0 and 6<=n<=20:
print('Weird')
elif n%2 ==0 and n>20:
print('Not Weird')
|
import pandas as pd
import plotly.plotly as py
import plotly
df2 = pd.read_csv('input.csv', header=0)
df2['promo_dep15'].astype(float)
color = pd.Series(['rgb(100,100,100)', 'rgb(38,17,235)', 'rgb(17,93,235)', 'rgb(17,235,220)',
'rgb(49,235,17)', 'rgb(188,235,17)', 'rgb(235,202,17)', 'rgb(235,115,17)', 'rgb(255,0,0)'])
# cities = []
# for i in range(9):
# df_sub = df2[df2['cat_tot'] == i]
# city = dict(
# type='scattergeo',
# locationmode='USA-states',
# lon=df_sub['long'],
# lat=df_sub['lat'],
# text=df_sub['store_nbr'],
# marker=dict(
# size=(df_sub['cat_tot'] + 1) * 5,
# color=color[df_sub['cat_tot']],
# line=dict(width=0.5, color='rgb(40,40,40)'),
# sizemode='area'
# ),
# name='Cluster %d' % i)
# cities.append(city)
# layout = dict(
# title='Sale cluster',
# showlegend=True,
# geo=dict(
# scope='usa',
# projection=dict(type='albers usa'),
# showland=True,
# landcolor='rgb(217, 217, 217)',
# subunitwidth=1,
# countrywidth=1,
# subunitcolor="rgb(255, 255, 255)",
# countrycolor="rgb(255, 255, 255)"
# ),
# )
# fig = dict(data=cities, layout=layout)
# # py.plot( fig, validate=False, filename='store_cluster' )
# plotly.offline.plot(fig, validate=False, filename='store_cluster')
promo = []
for i in range(9):
df_sub = df2[df2['cat_tot'] == i]
city = dict(
type='scattergeo',
locationmode='USA-states',
lon=df_sub['long'],
lat=df_sub['lat'],
text=df_sub['store_nbr'],
marker=dict(
size=(df_sub['promo_dep15'] *100) ,
color=color[df_sub['cat_tot']],
line=dict(width=0.5, color='rgb(40,40,40)'),
sizemode='area'
),
name='Cluster %d' % i)
promo.append(city)
layout = dict(
title='Promotion depth cluster',
showlegend=True,
geo=dict(
scope='usa',
projection=dict(type='albers usa'),
showland=True,
landcolor='rgb(217, 217, 217)',
subunitwidth=1,
countrywidth=1,
subunitcolor="rgb(255, 255, 255)",
countrycolor="rgb(255, 255, 255)"
),
)
fig = dict(data=promo, layout=layout)
# py.plot( fig, validate=False, filename='store_cluster' )
plotly.offline.plot(fig, validate=False, filename='store_cluster_promo') |
# Copyright 2017 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains methods required for configuring QoS.
"""
from ucscsdk.ucscexception import UcscOperationError
def qos_policy_add(handle, name, descr=None, prio="best-effort", burst="10240",
rate="line-rate", host_control="none",
parent_dn="org-root", **kwargs):
"""
Creates QoS Policy
Args:
handle (UcscHandle)
name (string) : QoS Policy Name
descr (string) :
prio (string) : Qos class
["best-effort", "bronze", "fc", "gold", "platinum", "silver"]
burst (uint): Bytes of burst
rate (string) : ["line-rate"], ["8-40000000"] in Kbps
host_control (string) : ["full", "none"]
parent_dn (string) :
**kwargs: Any additional key-value pair of managed object(MO)'s
property and value, which are not part of regular args.
This should be used for future version compatibility.
Returns:
EpqosDefinition: Managed object
Raises:
UcscOperationError: If EpqosDefinition is not present
Example:
mo = qos_policy_create(handle, "sample_qos", "platinum", 10240,
"line-rate", "full")
"""
from ucscsdk.mometa.epqos.EpqosDefinition import EpqosDefinition
from ucscsdk.mometa.epqos.EpqosEgress import EpqosEgress
obj = handle.query_dn(parent_dn)
if not obj:
raise UcscOperationError("qos_policy_create",
"org '%s' does not exist" % parent_dn)
mo = EpqosDefinition(parent_mo_or_dn=obj,
name=name,
descr=descr)
mo_1 = EpqosEgress(parent_mo_or_dn=mo,
rate=rate,
host_control=host_control,
name="",
prio=prio,
burst=burst)
mo_1.set_prop_multiple(**kwargs)
handle.add_mo(mo, modify_present=True)
handle.commit()
return mo
def qos_policy_get(handle, name, parent_dn="org-root"):
"""
Checks if the given qos policy already exists with the same params
Args:
handle (UcscHandle)
name (string) : QoS Policy Name
parent_dn (string) :
Returns:
EpqosDefinition: Managed object OR None
Example:
qos_policy_get(handle, "sample_qos")
"""
dn = parent_dn + '/ep-qos-' + name
return handle.query_dn(dn)
def qos_policy_exists(handle, name, parent_dn="org-root", **kwargs):
"""
Checks if the given qos policy already exists with the same params
Args:
handle (UcscHandle)
name (string) : QoS Policy Name
parent_dn (string) :
**kwargs: key-value pair of managed object(MO) property and value, Use
'print(ucsccoreutils.get_meta_info(<classid>).config_props)'
to get all configurable properties of class
Returns:
(True/False, MO/None)
Example:
bool_var = qos_policy_exists(handle, "sample_qos", "platinum", 10240,
"line-rate", "full")
"""
mo = qos_policy_get(handle, name, parent_dn)
if not mo:
return (False, None)
args = {'descr': kwargs['descr'] if 'descr' in kwargs else None}
if not mo.check_prop_match(**args):
return (False, None)
kwargs.pop('descr', None)
mo_1_dn = mo.dn + '/egress'
mo_1 = handle.query_dn(mo_1_dn)
if not mo_1:
raise UcscOperationError("qos_policy_exists",
"Egress QoS policy does not exist")
if not mo_1.check_prop_match(**kwargs):
return (False, None)
return (True, mo)
def qos_policy_remove(handle, name, parent_dn="org-root"):
"""
Removes the specified qos policy
Args:
handle (UcscHandle)
name (string) : QoS Policy Name
parent_dn (string) : Dn of the Org in which the policy should reside
Returns:
None
Raises:
UcscOperationError: If the policy is not found
Example:
qos_policy_remove(handle, "sample_qos", parent_dn="org-root")
qos_policy_remove(handle, "demo_qos_policy",
parent_dn="org-root/org-demo")
"""
mo = qos_policy_get(handle, name, parent_dn)
if not mo:
raise UcscOperationError("qos_policy_remove",
"Qos Policy does not exist")
handle.remove_mo(mo)
handle.commit()
|
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
i=0
for j in range(0, len(t)):
if i < len(s) and s[i] == t[j]:
i += 1
if i == len(s):
return True
else:
return False
# Other way to do it
def isSubsequence2(self, s: str, t: str) -> bool:
t = iter(t)
return all(c in t for c in s)
t = 'abcde'
s = "bb"
print(Solution().isSubsequence2(s, t))
|
from __future__ import division
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import argparse
import itertools
def get_breaks(model, N):
if model == "resnet20_v2":
breaks = {
432: [0, 353, 432],
2304: [0, 1847, 2229, 2304],
4608: [0, 4073, 4544, 4608],
9216: [0, 8164, 9012, 9216],
18432: [0, 16094, 18060, 18432],
36864: [0, 33742, 36595, 36864]}
elif model == "vgg16":
breaks = {
1728: [0, 1443, 1663, 1728],
36864: [0, 34097, 36467, 36815, 36864],
73728: [0, 67595, 73032, 73630, 73728],
147456: [0, 132193, 145286, 147125, 147456],
294912: [0, 272485, 292623, 294580, 294844, 294912],
589824: [0, 553577, 586620, 589431, 589764, 589824],
1179648: [0, 1099105, 1172811, 1179005, 1179543, 1179648],
2359296: [0, 2195844, 2343594, 2357633, 2359102, 2359296]}
elif model == "resnet50":
breaks = {
4096: [0, 3656, 4018, 4096],
9408: [0, 8476, 9165, 9408],
16384: [0, 14406, 16145, 16327, 16384],
36864: [0, 32238, 36292, 36726, 36864],
131072: [0, 121069, 130381, 130989, 131072],
32768: [0, 29429, 32320, 32692, 32768],
147456: [0, 133258, 145944, 147255, 147456],
65536: [0, 58690, 64507, 65371, 65536],
524288: [0, 494762, 522078, 524067, 524238, 524288],
589824: [0, 539407, 584654, 589214, 589738, 589824],
262144: [0, 237433, 259437, 261782, 262062, 262144],
2097152: [0, 1990620, 2088919, 2096322, 2097036, 2097152],
2359296: [0, 2188168, 2341896, 2356580, 2358793, 2359296],
1048576: [0, 981145, 1041707, 1047784, 1048461, 1048576],
2050048: [0, 1980923, 2044274, 2049225, 2049929, 2050048]}
return breaks[N]
def find_breaks(curve, num_of_segments=2):
y=curve
breaks = []
break_index = 0
breaks.append(break_index)
for i in range(num_of_segments):
line = np.linspace(y[0], y[-1], len(y))
distance = list(np.abs(line - y))
break_index += distance.index(max(distance))
breaks.append(break_index)
y=curve[break_index:]
breaks.append(len(curve))
return breaks
def get_num_of_segments(model, N):
if model == "resnet20_v2":
segments = {2304: 3, 4608: 3, 9216: 3, 18432: 3, 36864: 3} # 432
elif model == "vgg16":
segments = {1728: 3, 36864: 4, 73728: 4, 147456: 4, 294912: 5, 589824: 5, 1179648: 5, 2359296: 5}
elif model == "resnet50":
segments = {4096: 3, 9408: 3, 16384: 4, 36864: 4, 131072: 4, 32768: 4, 147456: 4, 65536: 4, 524288: 5,
589824: 5, 262144: 5, 2097152: 5, 2359296: 5, 1048576: 5, 2050048: 5}
return segments[N]
def GetInputMatrix_Polynomial(xcol, x):
N = len(x)
Xtrans = [np.ones(N)]
for i in range(1, xcol):
Xtrans = np.vstack([Xtrans, np.power(x, i)])
X = np.transpose(Xtrans)
return X
polynomial_degree = 4
pd.set_option("display.precision", 40)
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default="./", help='')
parser.add_argument('--model', type=str, default="./", help='')
args = parser.parse_args()
path = args.path
Y = pd.read_csv(path+'/values.csv', header=None, sep="\n")[0].values
coefficients = pd.read_csv(path+'/coefficients.csv', header=None, sep="\n")[0].values
#print(Y) ; print(coefficients)
N = Y.size
num_of_segments = get_num_of_segments(args.model, N)
y_abs = np.abs(Y)
mapping = np.argsort(y_abs, axis=0)
sorted_Y = y_abs[mapping]
# breaks = find_breaks(sorted_Y, num_of_segments-1)
breaks = get_breaks(args.model, N)
sizes = [breaks[i+1]-breaks[i] for i in range(num_of_segments)]
negative_indices = np.where(np.less(Y[mapping], 0))[0]
Nneg = negative_indices.size
mask = np.ones(N)
mask[negative_indices] = -np.ones(Nneg)
y_est_abs = []
x_segments_ = [] ; x_segments = [] ; y_segments = [] ; X_segments = []
for i in range(num_of_segments):
x_segments_ += [np.arange(breaks[i], breaks[i + 1])]
x_segments += [np.cast['float64'](np.arange(0, sizes[i]))]
y_segments += [sorted_Y[breaks[i]: breaks[i + 1]]]
X_segments += [GetInputMatrix_Polynomial(polynomial_degree, x_segments[i])]
offset = i*polynomial_degree
y_est_abs += [np.matmul(X_segments[i], coefficients[offset : offset+polynomial_degree])]
y_est_abs_np = np.concatenate(y_est_abs)
y = y_est_abs_np * mask
# Compute Root Mean Squared Error
rmse = np.sqrt(np.sum(np.power(sorted_Y-y_est_abs_np, 2))/N)
plt.rcParams["figure.figsize"] = [20, 10]
print(rmse)
colors = itertools.cycle(['yo', 'ro', 'go', 'yo', 'mo'])
with open(path+'/rmse.txt', 'w') as f:
f.write(str(rmse) + "\n")
mapping = mapping+1
plt.plot(range(1, N+1), Y, 'mo', markersize=5, label="True")
plt.plot(mapping, y, 'c.', markersize=5, label="Estimated Values")
plt.plot(range(1, N+1), sorted_Y, 'bo', markersize=6, label="True Sorted")
for x, X, c, y_est, color in zip(x_segments_, X_segments, coefficients, y_est_abs, colors):
plt.plot(x, y_est, color, markersize=2, label="Estimates")
plt.plot(breaks, np.zeros(len(breaks)), 'ko', markersize=6, label="Breaks")
plt.legend()
# plt.show()
plt.savefig(path + 'gradient.png') |
#!/usr/bin/env python2.7
import pysam, argparse, sys
parser = argparse.ArgumentParser(description='Helps with genome curation using linked paired-end read information from subsetted regions of a genome.', formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False)
#Required arguments
required = parser.add_argument_group('REQUIRED')
required.add_argument('-b', help= 'indexed bam file required for random read access', type=str, required=True)
#required.add_argument('-s', help= 'QNAME sorted SAM file required for mate pair lookups', type=str, required=True)
required.add_argument('-f', help= 'indexed fasta file required for random sequence access', type=str, required=True)
#Optional arguments
optional = parser.add_argument_group('OPTIONAL')
optional.add_argument('-h', '--help', action="help", help="show this help message and exit")
optional.add_argument('-c', help= 'contig/scaffold for read detection', type=str)
optional.add_argument('--min', help= 'minimum on scaffold', type=int, default=0)
optional.add_argument('--max', help= 'maximum on scaffold', type=int, default=sys.maxint)
optional.add_argument('--base', help= 'N-base for coordinate access', type=int, choices= [0,1], default=0)
optional.add_argument('--connectivity', help= 'outputs reads that should span multiple scaffolds and the regions they occur', action="store_false")
optional.add_argument('--threshold', help= 'minimum number of reads bridging scaffolds required to print', type=int, default=0)
args = parser.parse_args()
args.min, args.max= args.min-args.base, args.max-args.base
bamfile = pysam.AlignmentFile(args.b, "rb")
fastafile= pysam.FastaFile(args.f)
sys.stderr.write("Reading BAM file into mate-based dictionary...\t")
read_dict= {}
for read in bamfile:
read_dict[read.query_name + ("-1" if read.is_read1 else "-2")]= read
sys.stderr.write("finished!\n")
sys.stderr.write("Piling BAM file...\t")
piled_columns = bamfile.pileup(args.c, args.min, (args.max if args.max != sys.maxint else fastafile.get_reference_length(args.c)))
sys.stderr.write("finished!\n")
if args.connectivity:
for piled_column in piled_columns:
if piled_column.reference_pos < args.min or piled_column.reference_pos > args.max:
continue
if piled_column.reference_pos % 1000 == 0:
sys.stderr.write("Processing column {0}\n".format(str(piled_column.reference_pos)))
if args.threshold == 0:
print "Scaffold: {0}\tPosition: {1}\tReference base: {2}\tDepth: {3}".format(
bamfile.getrname(piled_column.reference_id),
piled_column.reference_pos,
fastafile.fetch(bamfile.getrname(piled_column.reference_id), piled_column.reference_pos, piled_column.reference_pos+1),
piled_column.nsegments)
for piled_read in piled_column.pileups:
seg= piled_read.alignment
mate= read_dict[seg.query_name + ("-2" if read.is_read1 else "-1")]
print "\t".join([
seg.query_name,
seg.query_sequence[piled_read.query_position],
seg.cigarstring,
mate.query_name,
("unmapped" if mate.is_unmapped else bamfile.getrname(mate.reference_id)),
("NA" if mate.is_unmapped else str(mate.reference_start)),
("NA" if mate.is_unmapped else mate.cigarstring)
])
continue
bridge_count= 0
segs= []
mates= []
for piled_read in piled_column.pileups:
seg= piled_read.alignment
if seg.is_proper_pair:
continue
mate= read_dict[seg.query_name + ("-2" if read.is_read1 else "-1")]
if bamfile.getrname(mate.reference_id) != bamfile.getrname(seg.reference_id):
bridge_count+= 1
segs.append(seg), mates.append(mate)
if bridge_count >= args.threshold:
print "Scaffold: {0}\tPosition: {1}\tReference base: {2}\tDepth: {3}".format(
bamfile.getrname(piled_column.reference_id),
piled_column.reference_pos,
fastafile.fetch(bamfile.getrname(piled_column.reference_id), piled_column.reference_pos, piled_column.reference_pos+1),
piled_column.nsegments)
print "READ\tBASE_CALL\tMAPPED?"
for seg, mate in zip(segs, mates):
print "\t".join([
seg.query_name,
seg.query_sequence[piled_read.query_position],
seg.cigarstring,
mate.query_name,
("unmapped" if mate.is_unmapped else bamfile.getrname(mate.reference_id)),
("NA" if mate.is_unmapped else str(mate.reference_start)),
("NA" if mate.is_unmapped else mate.cigarstring)
])
"""
for piled_column in piled_columns:
if piled_column.reference_pos < args.min or piled_column.reference_pos > args.max:
continue
print "Scaffold: {0}\tPosition: {1}\tReference base: {2}\tDepth: {3}".format(
bamfile.getrname(piled_column.reference_id),
piled_column.reference_pos,
fastafile.fetch(bamfile.getrname(piled_column.reference_id), piled_column.reference_pos, piled_column.reference_pos+1),
piled_column.nsegments)
print "READ\tBASE_CALL\tMAPPED?"
for piled_read in piled_column.pileups:
seg= piled_read.alignment
pos = bamfile.tell()
try:
mate= bamfile.mate(seg)
except ValueError:
1
finally:
bamfile.seek(pos)
print "{0}\t{1}\t{2}".format(
seg.query_name,
seg.query_sequence[piled_read.query_position],
("unmapped" if seg.mate_is_unmapped else "{0} is on {1}".format(mate.query_name, bamfile.getrname(mate.reference_id))))
print "\n\n\n"
"""
|
#!/usr/bin/env python
"""An example of a tagging service using RESTful Open Annotation."""
import logging
import re
import sys
from cgi import FieldStorage
from http.server import BaseHTTPRequestHandler, HTTPServer
from json import dumps
from logging import info, warn
import requests
TAGGER_URI = 'http://tagger.jensenlab.org/OpenAnnotation'
DEFAULT_PORT = 47111
logging.basicConfig(level=logging.DEBUG)
def argparser():
import argparse
parser = argparse.ArgumentParser(
description='HTTP tagging service using RESTful Open Annotation')
parser.add_argument('-p', '--port', type=int, default=DEFAULT_PORT,
help='run service on PORT (default %d)' % DEFAULT_PORT)
return parser
def _apply_tagger(text):
r = requests.post(TAGGER_URI, data={'document': text})
return r.json()
def _target_to_offset(target):
m = re.match(r'^.*?\#char=(\d+),(\d+)$', target)
start, end = m.groups()
return int(start), int(end)
def _split_ref(ref):
return ref.split(':', 1)
def _oa_to_ann(data, text):
anns = {}
nidx = 1
for i, a in enumerate(data['@graph']):
start, end = _target_to_offset(a['target'])
# textbound
anns['T%d' % (i + 1)] = {
'type': 'Entity',
'offsets': ((start, end), ),
'texts': (text[start:end], ),
}
# normalization(s)
bodies = a['body'] if isinstance(a['body'], list) else [a['body']]
for b in bodies:
refdb, refid = _split_ref(b['@id'])
anns['N%d' % (nidx)] = {
'type': 'Reference',
'target': 'T%d' % (i + 1),
'refdb': refdb,
'refid': refid,
}
nidx += 1
return anns
class RestOATaggerHandler(BaseHTTPRequestHandler):
def do_POST(self):
fields = FieldStorage(headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-type'],
},
fp=self.rfile)
try:
text = fields.value.decode('utf-8')
except KeyError:
warn('query did not contain text')
text = ''
data = _apply_tagger(text)
info(data)
anns = _oa_to_ann(data, text)
# Write the response
self.send_response(200)
self.send_header('Content-type', 'application/json; charset=utf-8')
self.end_headers()
self.wfile.write(dumps(anns))
info('Generated %d annotations' % len(anns))
def main(argv):
args = argparser().parse_args(argv[1:])
httpd = HTTPServer(('localhost', args.port), RestOATaggerHandler)
info('REST-OA tagger service started')
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
info('REST-OA tagger service stopped')
httpd.server_close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
import logging, os, platform, posixpath, subprocess, shlex, string, sys
from docutils import nodes
from docutils.parsers.rst import directives, states
from docutils.parsers.rst.roles import set_classes
from sphinx.util.compat import Directive
from sphinx.util.osutil import ensuredir
from .phix import (PhixError,
program_files_32,
relfn2path,
temp_path)
log = logging.getLogger('phix.dia')
logging.basicConfig()
class dia(nodes.General, nodes.Element):
'''A docutils node representing a Dia diagram'''
def astext(self):
'''
Returns:
The 'alt' text for the node as specified by the :alt: option on
the dia directive.
'''
return self.get('alt', '')
class DiaDirective(Directive):
'''The argouml directive.
The implementation of directives is covered at
http://docutils.sourceforge.net/docs/howto/rst-directives.html
'''
align_h_values = ('left', 'center', 'right')
align_v_values = ('top', 'middle', 'bottom')
align_values = align_v_values + align_h_values
def align(argument):
'''Convert and validate the :align: option.
Args:
argument: The argument passed to the :align: option.
'''
# This is not callable as self.align. We cannot make it a
# staticmethod because we're saving an unbound method in
# option_spec below.
return directives.choice(argument, directives.images.Image.align_values)
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'postprocess' : directives.unchanged,
'new-window' : directives.flag,
'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.percentage,
'align': align,
'border': directives.positive_int,
'class': directives.class_option}
def run(self):
'''Process the dia directive.
Creates and returns an list of nodes, including a dia node.
'''
log.info('self.arguments[0] = {0}'.format(self.arguments[0]))
messages = []
# Get the one and only argument of the directive which contains the
# name of the Dia file.
reference = directives.uri(self.arguments[0])
env = self.state.document.settings.env
_, filename = relfn2path(env, reference)
log.info('filename = {0}'.format(filename))
# Validate the :align: option
if 'align' in self.options:
if isinstance(self.state, states.SubstitutionDef):
# Check for align_v_values.
if self.options['align'] not in self.align_v_values:
raise self.error(
'Error in "{0}" directive: "{1}" is not a valid value '
'for the "align" option within a substitution '
'definition. Valid values for "align" are: "{2}".'.format(
self.name,
self.options['align'],
'", "'.join(self.align_v_values)))
elif self.options['align'] not in self.align_h_values:
raise self.error(
'Error in "{0}" directive: "{1}" is not a valid value for '
'the "align" option. Valid values for "align" are: "{2}".'.format(
self.name,
self.options['align'],
'", "'.join(self.align_h_values)))
set_classes(self.options)
log.info("self.block_text = {0}".format(self.block_text))
log.info("self.options = {0}".format(self.options))
dia_node = dia(self.block_text, **self.options)
dia_node['uri'] = os.path.normpath(filename)
dia_node['width'] = self.options['width'] if 'width' in self.options else '100%'
dia_node['height'] = self.options['height'] if 'height' in self.options else '100%'
dia_node['border'] = self.options['border'] if 'border' in self.options else 0
dia_node['postprocess_command'] = self.options['postprocess'] if 'postprocess' in self.options else None
dia_node['new_window_flag'] = 'new-window' in self.options
log.info("dia_node['new_window_flag'] = {0}".format(
dia_node['new_window_flag']))
return messages + [dia_node]
def get_image_filename(self, uri):
'''
Get paths of output file.
Args:
uri: The URI of the source Dia file
Returns:
A 2-tuple containing two paths. The first is a relative URI which can
be used in the output HTML to refer to the produced image file. The
second is an absolute path to which the generated image should be
rendered.
'''
uri_dirname, uri_filename = os.path.split(uri)
uri_basename, uri_ext = os.path.splitext(uri_filename)
fname = '{0}.svg'.format(uri_basename)
log.info('fname = {0}'.format(fname))
if hasattr(self.builder, 'imgpath'):
# HTML
refer_path = posixpath.join(self.builder.imgpath, fname)
render_path = os.path.join(self.builder.outdir, '_images', fname)
else:
# LaTeX
refer_path = fname
render_path = os.path.join(self.builder.outdir, fname)
if os.path.isfile(render_path):
return refer_path, render_path
ensuredir(os.path.dirname(render_path))
return refer_path, render_path
def create_graphics(self, dia_uri, render_path, postprocess_command=None):
'''
Use Dia in batch mode to render a diagram from a dia file into graphics of
the specified format.
Args:
dia_uri: The path to the Dia file.
render_path: The path to which the graphics output is to be rendered.
postprocess_command: An optional command into which the Dia SVG
output will be piped before it is placed in the output document.
The command should accept SVG on stdin and produce SVG on stdout.
Raises:
PhixError: If the graphics could not be rendered.
'''
log.info("create_graphics()")
log.info("dia_uri = {0}".format(dia_uri))
log.info("render_path = {0}".format(render_path))
output_path = render_path if postprocess_command is None else temp_path('.svg')
log.info("output_path = {0}".format(output_path))
# Launch Dia and instruct it to export the diagram as SVG
args = [str(dia_uri),
'-e', str(output_path)]
command = dia_command() + args
log.info("command = {0}".format(command))
returncode = subprocess.call(command)
log.info("returncode = {0}".format(returncode))
if returncode != 0:
raise PhixError("Could not launch Dia with command {0}".format(' '.join(command)))
# If a postprocess command has been specified
if postprocess_command is not None:
log.info("postprocess_command = {0}".format(postprocess_command))
# We use our own variable interpolation with the $VAR syntax rather than
# relying on the underlying shell, so that we can support the same
# variable syntax on both Windows and Linux.
postprocess_command_template = string.Template(str(postprocess_command))
interpolated_postprocess_command = postprocess_command_template.substitute(os.environ)
log.info("interpolated_postprocess_command = {0}".format(
interpolated_postprocess_command))
postprocess_command_fragments = shlex.split(interpolated_postprocess_command, posix=False)
log.info("postprocess_command_fragments = {0}".format(
postprocess_command_fragments))
with open(output_path, 'rb') as intermediate_file:
with open(render_path, 'wb') as render_file:
returncode = subprocess.call(postprocess_command_fragments, stdin=intermediate_file, stdout=render_file)
log.info("returncode = {0}".format(returncode))
if returncode != 0:
raise PhixError("Could not launch postprocess with command {0}".format(' '.join(postprocess_command)))
if os.path.exists(output_path):
log.info("Removing {0}".format(output_path))
os.remove(output_path)
def dia_command():
'''Get a command for launching Dia.
Returns a list based on the DIA_LAUNCH environment variable if set. This
will be used as the basis for the list of arguments that is returned.
Otherwise, it takes a guess at something that will work for the platform.
Returns:
A list of command line arguments - the first of which is an executable
name or alias - which when passed to a shell can be used to launch
dia.
'''
if 'DIA_LAUNCH' in os.environ:
return shlex.split(os.environ['DIA_LAUNCH'])
if platform.system() == 'Windows':
# This is the location used by the Dia installer for Windows
return [os.path.join(program_files_32(), "Dia", "bin", "diaw.exe")]
return ['dia']
def render_html(self, node):
'''
Render the supplied node as HTML.
Note: This method *always* raises docutils.nodes.SkipNode to ensure that the
child nodes are not visited.
Args:
node: An dia docutils node.
Raises:
SkipNode: Do not visit the current node's children, and do not call the
current node's ``depart_...`` method.
'''
has_thumbnail = False
try:
refer_path, render_path = get_image_filename(self, node['uri'])
log.info("refer_path = {0}".format(refer_path))
log.info("render_path = {0}".format(render_path))
log.info("node['uri'] = {0}".format(node['uri']))
#if not os.path.isfile(render_path):
create_graphics(self, node['uri'], render_path, node.get('postprocess'))
except PhixError:
exc = sys.exc_info()
log.info('Could not render {0}'.format(node['uri']),
exc_info = exc)
self.builder.warn('Could not render {0} because of {1}'.format(
node['uri'],
exc[1]))
raise nodes.SkipNode
self.body.append(self.starttag(node, 'p', CLASS='dia'))
objtag_format = '<object data="%s" width="%s" height="%s" border="%s" type="image/svg+xml" class="img">\n'
self.body.append(objtag_format % (refer_path, node['width'], node['height'], node['border']))
self.body.append('</object>')
if node['new_window_flag']:
self.body.append('<p align="right">\n')
new_window_tag_format = '<a href="{0}" target="_blank">Open in new window</a>'
self.body.append(new_window_tag_format.format(refer_path))
self.body.append('</p>\n')
self.body.append('</p>\n')
raise nodes.SkipNode
def html_visit_dia(self, node):
'''Visit an dia node during HTML rendering.'''
render_html(self, node)
def latex_visit_dia(self, node):
'''Visit an dia node during latex rendering.'''
render_latex(self, node, node['code'], node['options'])
def setup(app):
'''Register the services of this plug-in with Sphinx.'''
app.add_node(dia,
html=(html_visit_dia, None))
app.add_directive('dia', DiaDirective)
|
from . import data
from . import manager
from . import analyzer
|
from math import floor
import wx
from PIL import Image
from ZMatrix import ZMatrix
from svgelements import *
"""
Laser Render provides GUI relevant methods of displaying the given project.
"""
DRAW_MODE_FILLS = 0x000001
DRAW_MODE_GUIDES = 0x000002
DRAW_MODE_GRID = 0x000004
DRAW_MODE_LASERPATH = 0x000008
DRAW_MODE_RETICLE = 0x000010
DRAW_MODE_SELECTION = 0x000020
DRAW_MODE_STROKES = 0x000040
DRAW_MODE_CACHE = 0x000080 # Set means do not cache.
DRAW_MODE_REFRESH = 0x000100
DRAW_MODE_ANIMATE = 0x000200
DRAW_MODE_PATH = 0x000400
DRAW_MODE_IMAGE = 0x000800
DRAW_MODE_TEXT = 0x001000
DRAW_MODE_BACKGROUND = 0x002000
DRAW_MODE_ICONS = 0x0040000
DRAW_MODE_TREE = 0x0080000
DRAW_MODE_INVERT = 0x400000
DRAW_MODE_FLIPXY = 0x800000
def swizzlecolor(c):
if c is None:
return None
if isinstance(c, int):
c = Color(c)
return c.blue << 16 | c.green << 8 | c.red
class LaserRender:
def __init__(self, device):
self.device = device
self.cache = None
self.pen = wx.Pen()
self.brush = wx.Brush()
self.color = wx.Colour()
def render(self, elements, gc, draw_mode=None, zoomscale=1):
"""
Render scene information.
:param gc:
:param draw_mode:
:return:
"""
if draw_mode is None:
draw_mode = self.device.draw_mode
if draw_mode & (DRAW_MODE_TEXT | DRAW_MODE_IMAGE | DRAW_MODE_PATH) != 0:
types = []
if draw_mode & DRAW_MODE_PATH == 0:
types.append(Path)
if draw_mode & DRAW_MODE_IMAGE == 0:
types.append(SVGImage)
if draw_mode & DRAW_MODE_TEXT == 0:
types.append(SVGText)
elements = [e for e in elements if type(e) in types]
for element in elements:
try:
element.draw(element, gc, draw_mode, zoomscale=zoomscale)
except AttributeError:
if isinstance(element, Path):
element.draw = self.draw_path
elif isinstance(element, SVGImage):
element.draw = self.draw_image
elif isinstance(element, SVGText):
element.draw = self.draw_text
elif isinstance(element, Group):
element.draw = self.draw_group
else:
continue
element.draw(element, gc, draw_mode, zoomscale=zoomscale)
def make_path(self, gc, path):
p = gc.CreatePath()
first_point = path.first_point
if first_point is not None:
p.MoveToPoint(first_point[0], first_point[1])
for e in path:
if isinstance(e, Move):
p.MoveToPoint(e.end[0], e.end[1])
elif isinstance(e, Line):
p.AddLineToPoint(e.end[0], e.end[1])
elif isinstance(e, Close):
p.CloseSubpath()
elif isinstance(e, QuadraticBezier):
p.AddQuadCurveToPoint(e.control[0], e.control[1],
e.end[0], e.end[1])
elif isinstance(e, CubicBezier):
p.AddCurveToPoint(e.control1[0], e.control1[1],
e.control2[0], e.control2[1],
e.end[0], e.end[1])
elif isinstance(e, Arc):
for curve in e.as_cubic_curves():
p.AddCurveToPoint(curve.control1[0], curve.control1[1],
curve.control2[0], curve.control2[1],
curve.end[0], curve.end[1])
return p
def set_pen(self, gc, stroke, width=1.0):
if width < 1.0:
width = 1.0
c = stroke
if c is not None and c != 'none':
swizzle_color = swizzlecolor(c)
self.color.SetRGBA(swizzle_color | c.alpha << 24) # wx has BBGGRR
self.pen.SetColour(self.color)
self.pen.SetWidth(width)
gc.SetPen(self.pen)
else:
gc.SetPen(wx.TRANSPARENT_PEN)
def set_brush(self, gc, fill):
c = fill
if c is not None and c != 'none':
swizzle_color = swizzlecolor(c)
self.color.SetRGBA(swizzle_color | c.alpha << 24) # wx has BBGGRR
self.brush.SetColour(self.color)
gc.SetBrush(self.brush)
else:
gc.SetBrush(wx.TRANSPARENT_BRUSH)
def set_element_pen(self, gc, element, zoomscale=1.0):
try:
sw = Length(element.stroke_width).value(ppi=96.0)
# if sw < 3.0:
# sw = 3.0
except AttributeError:
sw = 1.0
if sw is None:
sw = 1.0
limit = zoomscale**.5
if sw < limit:
sw = limit
self.set_pen(gc, element.stroke, width=sw)
def set_element_brush(self, gc, element):
self.set_brush(gc, element.fill)
def draw_path(self, element, gc, draw_mode, zoomscale=1.0):
"""Default draw routine for the laser path element."""
try:
matrix = element.transform
except AttributeError:
matrix = Matrix()
if not hasattr(element, 'cache') or element.wx_bitmap_image is None:
cache = self.make_path(gc, element)
element.wx_bitmap_image = cache
gc.PushState()
gc.ConcatTransform(wx.GraphicsContext.CreateMatrix(gc, ZMatrix(matrix)))
self.set_element_pen(gc, element, zoomscale=zoomscale)
self.set_element_brush(gc, element)
if draw_mode & DRAW_MODE_FILLS == 0 and element.fill is not None:
gc.FillPath(element.wx_bitmap_image)
if draw_mode & DRAW_MODE_STROKES == 0 and element.stroke is not None:
gc.StrokePath(element.wx_bitmap_image)
gc.PopState()
def draw_text(self, element, gc, draw_mode, zoomscale=1.0):
try:
matrix = element.transform
except AttributeError:
matrix = Matrix()
if hasattr(element, 'wxfont'):
font = element.wxfont
else:
if element.font_size < 1:
if element.font_size > 0:
element.transform.pre_scale(element.font_size,
element.font_size,
element.x,
element.y)
element.font_size = 1 # No zero sized fonts.
font = wx.Font(element.font_size, wx.SWISS, wx.NORMAL, wx.BOLD)
# f = []
# if element.font_family is not None:
# f.append(str(element.font_family))
# if element.font_face is not None:
# f.append(str(element.font_face))
# if element.font_weight is not None:
# f.append(str(element.font_weight))
# f.append("%d" % element.font_size)
# font.SetNativeFontInfoUserDesc(' '.join(f))
element.wxfont = font
gc.PushState()
gc.ConcatTransform(wx.GraphicsContext.CreateMatrix(gc, ZMatrix(matrix)))
self.set_element_pen(gc, element, zoomscale=zoomscale)
self.set_element_brush(gc, element)
if element.fill is None or element.fill == 'none':
gc.SetFont(font, wx.BLACK)
else:
gc.SetFont(font, wx.Colour(swizzlecolor(element.fill)))
text = element.text
x = element.x
y = element.y
if text is not None:
w, h = element.width, element.height
element.width, element.height = gc.GetTextExtent(element.text)
if w != element.width and h != element.height:
element.modified()
if not hasattr(element, 'anchor') or element.anchor == 'start':
y -= element.height
elif element.anchor == 'middle':
x -= (element.width / 2)
y -= element.height
elif element.anchor == 'end':
x -= element.width
y -= element.height
gc.DrawText(text, x, y)
gc.PopState()
def draw_image(self, node, gc, draw_mode, zoomscale=1.0):
try:
matrix = node.transform
except AttributeError:
matrix = Matrix()
gc.PushState()
gc.ConcatTransform(wx.GraphicsContext.CreateMatrix(gc, ZMatrix(matrix)))
if draw_mode & DRAW_MODE_CACHE == 0:
cache = None
try:
cache = node.wx_bitmap_image
except AttributeError:
pass
if cache is None:
try:
max_allowed = node.max_allowed
except AttributeError:
max_allowed = 2048
node.c_width, node.c_height = node.image.size
node.wx_bitmap_image = self.make_thumbnail(node.image, maximum=max_allowed)
gc.DrawBitmap(node.wx_bitmap_image, 0, 0, node.c_width, node.c_height)
else:
node.c_width, node.c_height = node.image.size
cache = self.make_thumbnail(node.image)
gc.DrawBitmap(cache, 0, 0, node.c_width, node.c_height)
gc.PopState()
def draw_group(self, element, gc, draw_mode, zoomscale=1.0):
pass
def make_raster(self, elements, bounds, width=None, height=None, bitmap=False, step=1):
if bounds is None:
return None
xmin, ymin, xmax, ymax = bounds
xmax = ceil(xmax)
ymax = ceil(ymax)
xmin = floor(xmin)
ymin = floor(ymin)
image_width = int(xmax - xmin)
if image_width == 0:
image_width = 1
image_height = int(ymax - ymin)
if image_height == 0:
image_height = 1
if width is None:
width = image_width
if height is None:
height = image_height
width /= float(step)
height /= float(step)
width = int(width)
height = int(height)
bmp = wx.Bitmap(width, height, 32)
dc = wx.MemoryDC()
dc.SelectObject(bmp)
dc.Clear()
matrix = Matrix()
matrix.post_translate(-xmin, -ymin)
scale_x = width / float(image_width)
scale_y = height / float(image_height)
scale = min(scale_x, scale_y)
matrix.post_scale(scale)
gc = wx.GraphicsContext.Create(dc)
gc.PushState()
gc.ConcatTransform(wx.GraphicsContext.CreateMatrix(gc, ZMatrix(matrix)))
if not isinstance(elements, (list,tuple)):
elements = [elements]
self.render(elements, gc, draw_mode=DRAW_MODE_CACHE)
img = bmp.ConvertToImage()
buf = img.GetData()
image = Image.frombuffer("RGB", tuple(bmp.GetSize()), bytes(buf), "raw", "RGB", 0, 1)
gc.Destroy()
del dc
if bitmap:
return bmp
return image
def make_thumbnail(self, pil_data, maximum=None, width=None, height=None):
"""Resizes the given pil image into wx.Bitmap object that fits the constraints."""
image_width, image_height = pil_data.size
if width is not None and height is None:
height = width * image_height / float(image_width)
if width is None and height is not None:
width = height * image_width / float(image_height)
if width is None and height is None:
width = image_width
height = image_height
if maximum is not None and (width > maximum or height > maximum):
scale_x = maximum / width
scale_y = maximum / height
scale = min(scale_x, scale_y)
width = int(round(width * scale))
height = int(round(height * scale))
if image_width != width or image_height != height:
pil_data = pil_data.copy().resize((width, height))
else:
pil_data = pil_data.copy()
if pil_data.mode != "RGBA":
pil_data = pil_data.convert('RGBA')
pil_bytes = pil_data.tobytes()
return wx.Bitmap.FromBufferRGBA(width, height, pil_bytes)
|
"""Defines a bidirectional LSTM-CNNs-CRF as described in https://arxiv.org/abs/1603.01354."""
import tensorflow as tf
import opennmt as onmt
def model():
return onmt.models.SequenceTagger(
inputter=onmt.inputters.MixedInputter([
onmt.inputters.WordEmbedder(
vocabulary_file_key="words_vocabulary",
embedding_size=None,
embedding_file_key="words_embedding",
trainable=True),
onmt.inputters.CharConvEmbedder(
vocabulary_file_key="chars_vocabulary",
embedding_size=30,
num_outputs=30,
kernel_size=3,
stride=1,
dropout=0.5)],
dropout=0.5),
encoder=onmt.encoders.BidirectionalRNNEncoder(
num_layers=1,
num_units=400,
reducer=onmt.layers.ConcatReducer(),
cell_class=tf.contrib.rnn.LSTMCell,
dropout=0.5,
residual_connections=False),
labels_vocabulary_file_key="tags_vocabulary",
crf_decoding=True)
|
import pytest
from pathlib import Path
import shutil
from spikeinterface import set_global_tmp_folder
from spikeinterface.core.testing_tools import generate_recording
from spikeinterface.toolkit.preprocessing import clip, blank_staturation
import numpy as np
if hasattr(pytest, "global_test_folder"):
cache_folder = pytest.global_test_folder / "toolkit"
else:
cache_folder = Path("cache_folder") / "toolkit"
set_global_tmp_folder(cache_folder)
def test_clip():
rec = generate_recording()
rec0 = clip(rec, a_min=-2, a_max=3.)
rec0.save(verbose=False)
rec1 = clip(rec, a_min=-1.5)
rec1.save(verbose=False)
traces0 = rec0.get_traces(segment_index=0, channel_ids=[1])
assert traces0.shape[1] == 1
assert np.all(-2 <= traces0[0] <= 3)
traces1 = rec1.get_traces(segment_index=0, channel_ids=[0, 1])
assert traces1.shape[1] == 2
assert np.all(-1.5 <= traces1[1])
def test_blank_staturation():
rec = generate_recording()
rec0 = blank_staturation(rec, abs_threshold=3.)
rec0.save(verbose=False)
rec1 = blank_staturation(rec, quantile_threshold=0.01, direction='both',
chunk_size=10000)
rec1.save(verbose=False)
traces0 = rec0.get_traces(segment_index=0, channel_ids=[1])
assert traces0.shape[1] == 1
assert np.all(traces0 < 3.)
traces1 = rec1.get_traces(segment_index=0, channel_ids=[0])
assert traces1.shape[1] == 1
# use a smaller value to be sure
a_min = rec1._recording_segments[0].a_min
assert np.all(traces1 >= a_min)
if __name__ == '__main__':
test_clip()
test_blank_staturation()
|
# -*- coding: utf-8 -*-
"""
Base components for wrappers.
"""
from abc import ABC
import functools
import inspect
from types import FunctionType
from types import MethodType
from types import ModuleType
import typing as tp
Wrappable = tp.TypeVar('Wrappable', ModuleType, type, object)
class WrapperInjector(ABC):
"""
Groups a module's functions into a single class with parameter
injection.
Parameters
----------
*args : tuple, optional
Arguments to partially-wrap each module function with.
**kwargs : Mapping[str, Any], optional
Keyword arguments to partially-wrap each module function with.
"""
def __init__(self, *args, **kwargs) -> None:
self._args = args
self._kwargs = kwargs
return
@classmethod
def __wrap_function(cls, func: tp.Callable) -> tp.Callable:
"""Creates a wrapper function which injects object parameters.
This is used to wrap functions defined in other modules into
this class with parameters stored in this instance injected into
the wrapped `func` prior to any additional parameters. This is
done in two parts:
1. Create a new ``partial`` with this classes ``self._args``
and ``self._kwargs``.
2. Run that new ``partial`` from the first step with any
additional ``args`` or ``kwargs`` passed in.
Parameters
----------
func : Callable
The function to wrap for calls from this class.
Returns
-------
Callable
The wrapped/adapted function to use.
"""
@functools.wraps(func)
def _function_wrapper(self, *args, **kwargs) -> tp.Any:
p = functools.partial(func, *self._args, **self._kwargs)
return p(*args, **kwargs)
return _function_wrapper
@classmethod
def __wrap_method(cls, meth: tp.Callable) -> tp.Callable:
"""Creates a wrapper method which injects object parameters.
This is used to wrap methods defined in other classes into
this class with parameters stored in this instance injected into
the wrapped `meth` prior to any additional parameters. This is
done in two parts:
1. Create a new ``partialmethod`` with this classes
``self._args`` and ``self._kwargs``.
2. Run that new ``partialmethod`` from the first step with
any additional ``args`` or ``kwargs`` passed in.
Parameters
----------
meth : Callable
The method to wrap for calls from this class.
Returns
-------
Callable
The wrapped/adapted method to use.
"""
@functools.wraps(meth)
def _method_wrapper(self, *args, **kwargs) -> tp.Any:
p = functools.partialmethod(meth, *self._args, **self.__kwargs)
return p(*args, **kwargs)
return _method_wrapper
def _make_decorator(
obj: Wrappable,
to_wrap: tp.Iterable[str]
) -> tp.Callable[[tp.Type[WrapperInjector]], tp.type[WrapperInjector]]:
"""Makes the decorator function to use for wrapping.
Parameters
----------
obj : :obj:`ModuleType`, :obj:`type` or :obj:`object`
The source object to wrap the `to_wrap` attributes of.
to_wrap : Iterable[str]
The names of the attributes of `obj` to wrap.
Returns
-------
Callable[[Type[WrapperInjector]], Type[WrapperInjector]]
The decorator to use for wrapping a new :obj:`WrapperInjector`
class.
"""
def _wrapper(cls: tp.Type[WrapperInjector]) -> tp.Type[WrapperInjector]:
cls.__wrapped__ = tuple(to_wrap)
to_wrap = {x: getattr(obj, x) for x in to_wrap}
for k, v in to_wrap.items():
if isinstance(v, FunctionType):
setattr(cls, k, cls.__wrap_function(v))
else:
setattr(cls, k, cls.__wrap_method(v))
return cls
return _wrapper
def _get_default_class_exports(cls: type) -> tp.List[str]:
"""Gets the wrappable functions of the given `cls`."""
def _filter_instance_methods(x: tp.Callable):
if isinstance(x, MethodType):
return True
f_sig = inspect.signature(x)
if len(f_sig.parameters) > 0:
p_1 = next(iter(f_sig.parameters.values()))
return p_1.name != 'self'
return True
return [
x for x in _get_default_object_exports(cls)
if _filter_instance_methods(x)
]
def _get_default_module_exports(module: ModuleType) -> tp.List[str]:
"""Gets the wrappable functions of the given `module`."""
if hasattr(module, '__all__'):
return module.__all__
rv = []
for k, v in module.__dict__.items():
if k.startswith('_') or not callable(v):
continue
if inspect.getmodule(v) == module:
rv.append(k)
return rv
def _get_default_object_exports(obj: tp.Union[type, object]) -> tp.List[str]:
"""Gets the wrappable method names from the given `obj`."""
rv = []
for k, v in inspect.getmembers(obj, callable):
if k.startswith('_'):
continue
rv.append(k)
return rv
def get_wrappable_items(
obj: Wrappable,
include: tp.Optional[tp.Iterable[str]] = None,
exclude: tp.Optional[tp.Iterable[str]] = None
) -> tp.Iterable[str]:
"""Gets the wrappable items from the `obj` to wrap.
Parameters
----------
obj : Wrappable
The object to get the wrappable elements of.
include : Iterable[str], optional
The :obj:`callable` elements of the given `obj` to wrap (if
``None`` then all callable elements which are on the given `obj`
will be used).
exclude : Iterable[str], optional
The :obj:`callable` elements of the given `obj` to ignore.
Returns
-------
Iterable[str]
The names of the wrappable elements to export from the given
`obj` and `include`/`exclude` constraints.
Raises
------
AttributeError
If an element to be wrapped (as specified by `include`) doesn't
exist on the given `obj`.
ValueError
If an element to be wrapped from the given `obj` is not
:obj:`callable`.
"""
rv = []
if not include:
if isinstance(obj, ModuleType):
rv = _get_default_module_exports(obj)
elif isinstance(obj, type):
rv = _get_default_class_exports(obj)
else:
rv = _get_default_object_exports(obj)
else:
rv.extend(include)
if exclude:
rv = [x for x in rv if x not in exclude]
for nm in rv:
elem = getattr(obj, nm)
if not callable(elem):
raise ValueError(f"Not callable: {elem!r}")
return rv
def module_wrapper(
module: ModuleType,
include: tp.Optional[tp.Iterable[str]] = None,
exclude: tp.Optional[tp.Iterable[str]] = None
) -> tp.Callable[[tp.Type[WrapperInjector]], tp.Type[WrapperInjector]]:
"""Decorator for exporting module functions.
To use this, suppose we have a module containing similar functions
for working with our database to manage data. Suppose the module
interacts with the database for information about people:
.. code-block:: python
# person.py - for working with people data in the database
def add(db, first_name, last_name):
...
def update(db, first_name, last_name):
...
def remove(db, first_name, last_name):
...
Now we want to be able to call these functions from another class
(such as a Unit of Work implementation) and inject the ``db`` into
the functions. We could wrap this module and then in the new
:obj:`ModuleWrapper` class inject the same ``db`` into each call:
.. code-block:: python
# person_wrapper.py - a class to wrap person.py functions
from wrappers import WrapperInject
from wrappers import module_wrapper
import person
@module_wrapper(person)
class PersonWrapper(WrapperInject):
pass
Now, in our unit of work class (or whatever else needs to have this
"bundle" of functions):
.. code-block:: python
# unit_of_work.py - Unit of work for database operations
from .person_wrapper import PersonWrapper
class UnitOfWork(object):
def __init__(self, db):
self._db = db
return
@property
def people(self):
return PersonWrapper(self._db)
...
Then using this unit of work becomes very simple:
.. code-block:: python
with UnitOfWork(db_connection) as uow:
uow.people.add('John', 'Doe')
Parameters
----------
module : ModuleType
The python module to wrap with the decorated class.
include : Iterable[str], optional
The :obj:`callable` attributes of the given `module` to wrap (if
``None`` then all callables which are defined *in* the given
`module` will be used).
exclude : Iterable[str], optional
The :obj:`callable` attributes of the given `module` to ignore
when performing the wrapping.
Returns
-------
Callable[[Type[WrapperInjector]], Type[WrapperInjector]]
The decorator, tailored for the given model, which will add the
specified functions to the wrapped class.
Raises
------
ValueError
If an attribute being wrapped from the given `module` is not a
:obj:`callable`.
"""
to_wrap = get_wrappable_items(module, include=include, exclude=exclude)
return _make_decorator(module, to_wrap)
def class_wrapper(
cls: type,
include: tp.Optional[tp.Iterable[str]] = None,
exclude: tp.Optional[tp.Iterable[str]] = None
) -> tp.Callable[[tp.Type[WrapperInjector]], tp.Type[WrapperInjector]]:
"""Wraps the given `cls` type."""
to_wrap = get_wrappable_items(cls, include=include, exclude=exclude)
return _make_decorator(cls, to_wrap)
def object_wrapper(
obj: object,
include: tp.Optional[tp.Iterable[str]] = None,
exclude: tp.Optional[tp.Iterable[str]] = None
) -> tp.Callable[[tp.Type[WrapperInjector]], tp.Type[WrapperInjector]]:
"""Wraps the given `obj` object instance."""
to_wrap = get_wrappable_items(obj, include=include, exclude=exclude)
return _make_decorator(obj, to_wrap)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Author : cold
# E-mail : wh_linux@126.com
# Date : 14/01/16 12:00:06
# Desc : 读取URL信息(标题)插件
#
import re
from plugins import BasePlugin
from _linktitle import get_urls, fetchtitle
class URLReaderPlugin(BasePlugin):
URL_RE = re.compile(r"(http[s]?://(?:[-a-zA-Z0-9_]+\.)+[a-zA-Z]+(?::\d+)"
"?(?:/[-a-zA-Z0-9_%./]+)*\??[-a-zA-Z0-9_&%=.]*)",
re.UNICODE)
def is_match(self, from_uin, content, type):
urls = get_urls(content)
if urls:
self._urls = urls
return True
return False
def handle_message(self, callback):
fetchtitle(self._urls, callback)
|
from webdnn.backend.webgpu.kernels.elementwise import register_elementwise_kernel
from webdnn.graph.operators.softsign import Softsign
register_elementwise_kernel(Softsign, "y = x0 / (fabs(x0) + 1.0f);")
|
from .constraint_rewriter import rewrite_constraints, rewrite_ast
from .. import SuccessorsNode, SuccessorNode, DecisionNotTakenNode, TextDecisionNode, TextFlatNode, \
InputByteSwitchTableNode, ReadEvalLoopNode, ReadEvalNode, ActionsNode, DecisionNode
from ..visitor import Visitor
def readable_encode_dnf(dnf, conjunction_sep=' AND ', disjunction_sep='\nOR\n'):
return disjunction_sep.join(conjunction_sep.join(inner for inner in outer) for outer in dnf)
def readable_encode_cnf(dnf, conjunction_sep=' AND ', disjunction_sep='\nOR\n'):
return conjunction_sep.join(disjunction_sep.join(inner for inner in outer) for outer in dnf)
def readable_encode_action(act):
if act['type'] == 'write':
return "output @ {} = {}".format(act['addrs'], rewrite_ast(act['value']))
elif act['type'] == 'read':
return "Read input @ {}".format(act['addrs'])
else:
assert False
class ReadabilityCompacter(Visitor):
def _visit_actions_node(self, node, replacements):
"""
:param node:
:type node: ActionsNode
:return:
"""
succs = replacements[node.successor] if node.successor else []
assert len(succs) < 2, "An action node cannot be follow by more than one node! Got: {}".format(succs)
succ = succs[0] if len(succs) > 0 else None
acts = node.actions_info
text = '\n'.join(readable_encode_action(act) for act in acts)
return [TextFlatNode(text, succ)]
def _visit_successors_node(self, node, replacements):
"""
:param node:
:type node: SuccessorsNode
:return:
"""
sat_succs = [compact_succ for succ in node.sat_succs for compact_succ in replacements[succ]]
if len(node.taken_constraints) == 0:
return sat_succs
unsat_succs = [compact_succ for succ in node.unsat_succs for compact_succ in replacements[succ]]
text = ''
text += "Successful checks: \n-> " + '\n-> '.join(rewrite_constraints(node.taken_constraints))
return [TextDecisionNode(sat_succs, unsat_succs, text=text)]
def _visit_successor_node(self, node, replacements):
"""
:param node:
:type node: SuccessorNode
:return:
"""
succs = replacements[node.successor] if node.successor else []
assert len(succs) < 2, "A successor node cannot be follow by more than one node! Got: {}".format(succs)
succ = succs[0] if len(succs) > 0 else None
text = ''
text += "Necessary checks: \n-> " + '\n-> '.join(rewrite_constraints(node.fresh_constraints))
text += "Nearby reachable text: \n-> "
text += '\n-> '.join([s.replace('\n', '') for s in node.reachable_string_refs])
return [TextFlatNode(text, succ)]
def _visit_decision_not_taken_node(self, node, replacements):
"""
:param node:
:type node: DecisionNotTakenNode
:return:
"""
text = ''
text += "Reachable if: \n-> "
dnf = [[c_txt for c_txt in rewrite_constraints(constr)] for constr in node.reachable_constraints_options]
text += readable_encode_dnf(dnf).encode('string-escape')
text += "\n\nNearby reachable text: \n-> "
text += '\n-> '.join([s.replace('\n', '<newline>') for s in node.reachable_string_refs])
return [TextFlatNode(text, None)]
def _visit_decision_node(self, node, replacements):
"""
:param node:
:type node: DecisionNode
:return:
"""
sat_succs = [compact_succ for succ in node.taken_successors for compact_succ in replacements[succ]]
unsat_succs = [compact_succ for succ in node.unsat_successors for compact_succ in replacements[succ]]
text = ''
text += readable_encode_dnf([rewrite_constraints(node.taken_constraints)]).encode('string-escape') + '\n'
return [TextDecisionNode(sat_succs, unsat_succs, text=text)]
def _visit_input_byte_switch_table_node(self, node, replacements):
"""
:param node:
:type node: InputByteSwitchTableNode
:return:
"""
return self._visit_decision_node(node, replacements)
def _visit_read_eval_node(self, node, replacements):
"""
:type node: ReadEvalNode
:param node:
:param replacements:
:return:
"""
sat_succs = [compact_succ for succ in node.taken_successors for compact_succ in replacements[succ]]
unsat_succs = [compact_succ for succ in node.unsat_successors for compact_succ in replacements[succ]]
text = ''
actions = node.read_actions
read_bytes_str = ''
addrs = set()
for action in actions:
assert action['type'] == 'read'
addrs.update(action['addrs'])
read_bytes_str += ','.join(map(str, sorted(addrs)))
const = readable_encode_dnf([rewrite_constraints(node.taken_constraints)]).encode('string-escape') + '\n'
text += 'Read input characters at {} => Check {}'.format(read_bytes_str, const)
return [TextDecisionNode(sat_succs, unsat_succs, text)]
def _visit_read_eval_loop_node(self, node, replacements):
"""
:type node: ReadEvalLoopNode
:param node:
:param replacements:
:return:
"""
sat_succs = [compact_succ for succ in node.taken_successors for compact_succ in replacements[succ]]
unsat_succs = [compact_succ for succ in node.unsat_successors for compact_succ in replacements[succ]]
text = ''
for act_const_pair in node.actions_constraints_pairs:
actions = act_const_pair[0]
read_bytes_str = ''
addrs = set()
for action in actions:
assert action['type'] == 'read'
addrs.update(action['addrs'])
read_bytes_str += ','.join(map(str, sorted(addrs)))
const = ('(' + ') AND ('.join(rewrite_constraints(act_const_pair[1])) + ')').encode('string-escape') + '\n'
text += 'Read input characters at {} => Check {}'.format(read_bytes_str, const)
return [TextDecisionNode(sat_succs, unsat_succs, text)]
|
import operator
class Explanation:
def __init__(self, observation, shifts, ranges):
"""
Initializes an explanation with a dictionary from features to their shifts.
"""
self.shifts = shifts
self.ranges = ranges
self.observation = observation
self.EPSILON = 1e-5
# TODO: sort by absolute value
def top_k(self, k):
"""
Returns the top k features ranked by amount shifted in descending order.
"""
return sorted(self.shifts.items(), key = lambda v: abs(v[1]), reverse = True)[:k]
def features(self):
"""
Returns all features ranked by amount shifted in descending order.
Equivalent to calling top_k(n) where n is the number of features
"""
return sorted(self.shifts.items(), key = operator.itemgetter(1), reverse = True)[:]
def confidence(self):
"""
Returns a quantity representing the robustness of the model. A higher confidence value
indicates a robust observation given the model. A lower confidence value indicates
a volatile observation given the model.
"""
total = 0
count = 0
n = len(self.observation[0])
for i in range(n):
if abs(self.shifts["feature " + str(i)]) > self.EPSILON:
total = total + (abs(self.shifts["feature " + str(i)])/float(self.ranges[i]))
count += 1
return total/float(count)
class ExplainableModel(object):
def fit(self, X, y):
"""
Trains the model with the provided training data.
"""
pass
def predict(self, X):
"""
Predicts the output given the trained model and an observation.
"""
pass
def score(self, X, y):
"""
Returns the coefficient of determination of the model.
"""
pass
def explain(self, X):
"""
Returns an explanation given the trained model and an observation.
"""
pass
class Explainer:
def __init__(self, optimizer):
self.optimizer = optimizer
def explain(self, model, data, X):
"""
Returns an Explanation given a model and an observation
"""
shifts, ranges = self.optimizer.optimize(model, data, X)
return Explanation(X, shifts, ranges)
|
import json
import os
import re
import shutil
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, cast
import setuptools
import tomlkit
from editables import EditableProject # type: ignore
from ppsetuptools.ppsetuptools import _parse_kwargs # type: ignore
from vulcan import Vulcan, flatten_reqs
from vulcan.plugins import PluginRunner
version: Callable[[str], str]
if sys.version_info >= (3, 8):
from importlib.metadata import version
else:
from importlib_metadata import version
def _filter_nones(vals_dict: Dict[str, Optional[Any]]) -> Dict[str, Any]:
return {k: v for k, v in vals_dict.items() if v is not None}
def setup(**kwargs: Any) -> Any:
with open('pyproject.toml', 'r') as pptoml:
pyproject_data = cast(Dict[str, Any], tomlkit.parse(pptoml.read()))
if 'project' in pyproject_data:
if 'dependencies' in pyproject_data['project']:
raise RuntimeError("May not use [project]:dependencies key with vulcan")
if 'optional-dependencies' in pyproject_data['project']:
raise RuntimeError("May not use [project]:optional-dependencies key with vulcan")
parsed_kwargs = _parse_kwargs(pyproject_data['project'], '.')
parsed_kwargs.update(_filter_nones(kwargs))
parsed_kwargs = _filter_nones(parsed_kwargs)
# ppsetuptools doesn't handle entry points correctly
if 'scripts' in parsed_kwargs:
if 'entry_points' not in parsed_kwargs:
parsed_kwargs['entry_points'] = {}
parsed_kwargs['entry_points']['console_scripts'] = parsed_kwargs['scripts']
del parsed_kwargs['scripts']
if 'gui-scripts' in parsed_kwargs:
parsed_kwargs['entry_points']['gui_scripts'] = parsed_kwargs['gui-scripts']
del parsed_kwargs['gui-scripts']
if 'entry_points' in parsed_kwargs:
for ep_group in list(parsed_kwargs['entry_points']):
parsed_kwargs['entry_points'][ep_group] = [
f'{k}={v}' for k, v in parsed_kwargs['entry_points'][ep_group].items()]
return setuptools.setup(**parsed_kwargs)
else:
return setuptools.setup(**_filter_nones(kwargs))
__all__ = ['build_wheel',
'build_sdist']
@contextmanager
def patch_argv(argv: List[str]) -> Generator[None, None, None]:
old_argv = sys.argv[:]
sys.argv = [sys.argv[0]] + argv
yield
sys.argv = old_argv
def build(outdir: str, config_settings: Dict[str, str] = None) -> str:
config = Vulcan.from_source(Path().absolute())
options: Dict[str, Any] = {}
if config.packages:
options['packages'] = config.packages
if config.version:
options['version'] = config.version
if config.no_lock or (config_settings and config_settings.get('no-lock') == 'true'):
options['install_requires'] = flatten_reqs(config.configured_dependencies)
options['extras_require'] = config.configured_extras
else:
options['install_requires'] = config.dependencies
options['extras_require'] = config.extras
# https://setuptools.readthedocs.io/en/latest/userguide/keywords.html
# https://docs.python.org/3/distutils/apiref.html
with PluginRunner(config):
dist = setup(**options, include_package_data=True)
rel_dist = Path(dist.dist_files[0][-1])
shutil.move(str(rel_dist), Path(outdir) / rel_dist.name)
return rel_dist.name
def build_wheel(wheel_directory: str, config_settings: Dict[str, str] = None,
metadata_directory: str = None) -> str:
with patch_argv(['bdist_wheel']):
return build(wheel_directory, config_settings)
def build_sdist(sdist_directory: str,
config_settings: Dict[str, str] = None,
) -> str:
with patch_argv(['sdist']):
return build(sdist_directory, config_settings)
def get_virtualenv_python() -> Path:
virtual_env = os.environ.get('VIRTUAL_ENV')
if virtual_env is None:
raise RuntimeError("No virtualenv active")
if sys.platform == 'win32':
# sigh
return Path(virtual_env, 'Scripts', 'python')
else:
# if this isn't in an else,
# mypy complains on windows that it is unreachable
return Path(virtual_env, 'bin', 'python')
# tox requires these two for some reason :(
def get_requires_for_build_sdist(config_settings: Dict[str, str] = None) -> List[str]:
return []
def get_requires_for_build_wheel(config_settings: Dict[str, str] = None) -> List[str]:
return []
def get_pip_version(python_callable: Path) -> Optional[Tuple[int, ...]]:
out = subprocess.check_output([str(python_callable), '-m', 'pip', '--version'], encoding='utf-8')
m = re.search(r'pip (\d+\.\d+(\.\d+)?)', out)
if not m:
return None
return tuple((int(n) for n in m.group(1).split('.')))
@contextmanager
def maybe_gen_setuppy(venv: Path, config: Vulcan) -> Generator[None, None, None]:
pip_version = get_pip_version(venv)
if pip_version is None or pip_version < (21, 3):
print(f"pip version {pip_version} does not support editable installs for PEP517 projects,"
" falling back to generated setup.py")
options: Dict[str, Any] = {}
if config.packages:
options['packages'] = config.packages
if config.version:
options['version'] = config.version
if config.no_lock:
options['install_requires'] = flatten_reqs(config.configured_dependencies)
options['extras_require'] = config.configured_extras
else:
options['install_requires'] = config.dependencies
options['extras_require'] = config.extras
setup = Path('setup.py')
if setup.exists():
exit('may not use vulcan develop when setup.py is present')
try:
with tempfile.NamedTemporaryFile(suffix='.json', mode="w+", delete=False) as mdata_file:
try:
mdata_file.write(json.dumps(options))
mdata_file.flush()
mdata_file.close()
with setup.open('w+') as setup_file:
setup_file.write(f"""\
from vulcan.build_backend import setup
import json, pathlib
setup(**json.load(pathlib.Path(r'{mdata_file.name}').open()))
""")
yield
finally:
os.unlink(mdata_file.name)
finally:
setup.unlink()
else:
yield
def install_develop() -> None:
config = Vulcan.from_source(Path().absolute())
try:
virtual_env = get_virtualenv_python()
except RuntimeError:
exit('may not use vulcan develop outside of a virtualenv')
with maybe_gen_setuppy(virtual_env, config):
path = str(Path().absolute())
if config.configured_extras:
path = f'{path}[{",".join(config.configured_extras)}]'
subprocess.check_call([
str(virtual_env), '-m', 'pip', 'install', '-e', path])
# pep660 functions
def unpack(whl: Path) -> Path:
with tempfile.TemporaryDirectory() as tmp:
subprocess.check_output(f'wheel unpack {whl} -d {tmp}'.split())
unpacked = list(Path(tmp).glob('*'))
assert len(unpacked) == 1
shutil.copytree(unpacked[0], whl.parent / unpacked[0].name)
return whl.parent / unpacked[0].name
def pack(unpacked_wheel: Path) -> Path:
with tempfile.TemporaryDirectory() as tmp:
subprocess.check_output(f'wheel pack {unpacked_wheel} -d {tmp}'.split())
packed = list(Path(tmp).glob('*.whl'))
assert len(packed) == 1
shutil.copy(packed[0], unpacked_wheel.parent)
return unpacked_wheel.parent / packed[0].name
def add_requirement(unpacked_whl_dir: Path, req: str) -> None:
metadata = next(unpacked_whl_dir.glob('*.dist-info')) / 'METADATA' # is mandatory
with metadata.open() as f:
metadata_lines = list(f)
i = 0
for i, line in enumerate(metadata_lines):
if not (line.strip() and not line.startswith('Requires-Dist: ')):
# find the start of the requires-dist, or the end of the metadata keys
break
metadata_lines.insert(i, f'Requires-Dist: {req}\n')
metadata.write_text(''.join(metadata_lines))
def make_editable(whl: Path) -> None:
config = Vulcan.from_source(Path().absolute())
unpacked_whl_dir = unpack(whl)
add_requirement(unpacked_whl_dir, f"editables (~={version('editables')})")
# https://www.python.org/dev/peps/pep-0427/#escaping-and-unicode
project_name = re.sub(r'[^\w\d.]+', '_', config.name, re.UNICODE)
project = EditableProject(project_name, Path().absolute())
for package in (config.packages or []):
project.map(package, package)
# removing the actual code packages because they will conflict with the .pth files, and take
# precendence over them
shutil.rmtree(unpacked_whl_dir / package)
for name, content in project.files():
(unpacked_whl_dir / name).write_text(content)
assert whl == pack(unpacked_whl_dir), 'pre-wheel and post-wheel should be the same path'
shutil.rmtree(unpacked_whl_dir)
def build_editable(wheel_directory: str, config_settings: Dict[str, str] = None,
metadata_directory: str = None) -> str:
whl_path = Path(wheel_directory) / build_wheel(wheel_directory, config_settings, metadata_directory)
make_editable(whl_path)
return whl_path.name
|
import os
from collections import defaultdict
import h5py
import numpy as np
class ModelDataset(object):
def __init__(self, dataset_name, overwrite=False, mode='a', difficulty="challenge"):
self.dataset_name = dataset_name
self.dataset_path = self.append_file_type(self.dataset_name)
self.overwrite = overwrite
self.mode = mode
if self.overwrite and self.mode not in {"a", "r+", "w", "w+", "x", "w-"}:
raise ValueError("Mode must be a, r+, w, or w+ while in overwrite mode!")
if self.overwrite and os.path.isfile(self.dataset_path):
os.remove(self.dataset_path)
if not self.overwrite:
self.mode = "r"
# ensure these dataset names are somewhat unique
self.dataset_names = ["features", "labels", "sample_weights", "arrows", "label_encoded_arrows",
"binary_encoded_arrows", "string_arrows", "onehot_encoded_arrows", "file_names",
"song_index_ranges"]
self.difficulty_dataset_names = ["labels", "sample_weights", "arrows", "label_encoded_arrows",
"binary_encoded_arrows", "string_arrows", "onehot_encoded_arrows"]
self.scaler_dataset_names = ["file_names"]
self.dataset_attr = {"labels": {"num_valid_samples", "pos_samples", "neg_samples"},
"features": {"num_samples"}}
self.difficulties = {"challenge", "hard", "medium", "easy", "beginner"}
self.difficulty = difficulty
self.h5py_file = None
def __getitem__(self, item):
data = [self.features[item], self.labels[item], self.sample_weights[item], self.arrows[item],
self.label_encoded_arrows[item], self.binary_encoded_arrows[item], self.string_arrows[item],
self.onehot_encoded_arrows[item]]
return data
def __len__(self):
try:
return self.num_samples
except KeyError:
return 0
def __enter__(self):
self.reset_h5py_file()
self.set_difficulty(difficulty=self.difficulty)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
self.h5py_file.flush()
self.h5py_file.close()
def reset_h5py_file(self):
if self.h5py_file is not None:
try:
self.h5py_file.close()
except IOError:
pass
self.h5py_file = h5py.File(self.dataset_path, self.mode, libver='latest')
def create_dataset(self, data, dataset_name):
if dataset_name in self.scaler_dataset_names:
self.h5py_file.create_dataset(dataset_name, data=data, compression="lzf", dtype='S1024')
else:
if len(data.shape) > 1:
data_shape = (None,) + data.shape[1:]
else:
data_shape = (None,)
self.h5py_file.create_dataset(dataset_name, data=data, chunks=True, compression="lzf", maxshape=data_shape)
def extend_dataset(self, data, dataset_name):
if dataset_name in self.scaler_dataset_names:
saved_dataset = self.h5py_file[dataset_name][:]
del self.h5py_file[dataset_name]
self.create_dataset(data=np.append(saved_dataset, [data]), dataset_name=dataset_name)
else:
self.h5py_file[dataset_name].resize((self.h5py_file[dataset_name].shape[0] + data.shape[0]), axis=0)
self.h5py_file[dataset_name][-data.shape[0]:] = data
def dump_difficulty_dataset(self, dataset_name, difficulty, value):
difficulty_dataset_name = self.append_difficulty(dataset_name=dataset_name, difficulty=difficulty)
if not self.h5py_file.get(difficulty_dataset_name):
self.create_dataset(value, difficulty_dataset_name)
else:
self.extend_dataset(value, difficulty_dataset_name)
saved_attributes = self.save_attributes(self.h5py_file, difficulty_dataset_name)
self.set_dataset_attrs(self.h5py_file, difficulty_dataset_name, saved_attributes)
self.update_dataset_attrs(self.h5py_file, difficulty_dataset_name, value)
def dump(self, features, labels, sample_weights, arrows, label_encoded_arrows, binary_encoded_arrows, file_names,
string_arrows, onehot_encoded_arrows):
try:
all_data = self.get_dataset_name_to_data_map(features=features,
labels=labels,
sample_weights=sample_weights,
arrows=arrows,
label_encoded_arrows=label_encoded_arrows,
binary_encoded_arrows=binary_encoded_arrows,
string_arrows=string_arrows,
onehot_encoded_arrows=onehot_encoded_arrows,
file_names=file_names,
song_index_ranges=[[len(self), len(self) + len(features)]])
for dataset_name, data in all_data.items():
if data is None:
continue
if dataset_name in self.difficulty_dataset_names and isinstance(data, (dict, defaultdict)):
diff_copy = self.difficulties.copy()
for difficulty, value in data.items():
if difficulty in diff_copy:
diff_copy.remove(difficulty)
self.dump_difficulty_dataset(dataset_name, difficulty, value)
data_shape = data[next(iter(data))].shape
dtype = data[next(iter(data))].dtype
if dtype == np.dtype('S4'):
null_values = np.chararray(data_shape, itemsize=4)
null_values[:] = '0000'
else:
null_values = np.full(data_shape, fill_value=-1)
for remaining_diff in diff_copy:
self.dump_difficulty_dataset(dataset_name, remaining_diff, null_values)
continue
else:
if not self.h5py_file.get(dataset_name):
self.create_dataset(data, dataset_name)
else:
self.extend_dataset(data, dataset_name)
saved_attributes = self.save_attributes(self.h5py_file, dataset_name)
self.set_dataset_attrs(self.h5py_file, dataset_name, saved_attributes)
self.update_dataset_attrs(self.h5py_file, dataset_name, data)
self.h5py_file.flush()
except Exception as ex:
self.close()
raise ex
def set_difficulty(self, difficulty):
if difficulty not in self.difficulties:
raise ValueError(
"%s is not a valid difficulty! Choose a valid difficulty: %s" % (difficulty, self.difficulties))
self.difficulty = difficulty
def get_dataset_name_to_data_map(self, **data_dict):
dataset_name_to_data_map = {}
for dataset_name in self.dataset_names:
if dataset_name in data_dict:
data = data_dict[dataset_name]
if isinstance(data, str):
data = np.array([data], dtype='S1024')
elif isinstance(data, list):
data = np.array(data)
dataset_name_to_data_map[dataset_name] = data
else:
dataset_name_to_data_map[dataset_name] = None
return dataset_name_to_data_map
def set_dataset_attrs(self, h5py_file, dataset_name, saved_attributes=None):
if "labels" in dataset_name:
for dataset_attr in self.dataset_attr["labels"]:
if saved_attributes is not None and dataset_attr in saved_attributes:
h5py_file[dataset_name].attrs[dataset_attr] = saved_attributes[dataset_attr]
else:
h5py_file[dataset_name].attrs[dataset_attr] = 0
if "features" in dataset_name:
for dataset_attr in self.dataset_attr["features"]:
if saved_attributes is not None and dataset_attr in saved_attributes:
h5py_file[dataset_name].attrs[dataset_attr] = saved_attributes[dataset_attr]
else:
h5py_file[dataset_name].attrs[dataset_attr] = 0
@staticmethod
def update_dataset_attrs(h5py_file, dataset_name, attr_value):
if "labels" in dataset_name:
if not any(attr_value < 0):
h5py_file[dataset_name].attrs["num_valid_samples"] += len(attr_value)
h5py_file[dataset_name].attrs["pos_samples"] += attr_value.sum()
h5py_file[dataset_name].attrs["neg_samples"] += len(attr_value) - attr_value.sum()
elif "features" in dataset_name:
h5py_file[dataset_name].attrs["num_samples"] += len(attr_value)
@staticmethod
def save_attributes(h5py_file, dataset_name):
saved_attributes = {}
if dataset_name in h5py_file:
for attr_name in h5py_file[dataset_name].attrs:
saved_attributes[attr_name] = h5py_file[dataset_name].attrs[attr_name]
return saved_attributes
@staticmethod
def append_file_type(path):
return path + '.hdf5'
@staticmethod
def append_difficulty(dataset_name, difficulty):
return "%s_%s" % (dataset_name, difficulty)
@property
def num_samples(self):
return self.h5py_file["features"].attrs["num_samples"]
@property
def num_valid_samples(self):
return self.h5py_file[self.append_difficulty("labels", self.difficulty)].attrs["num_valid_samples"]
@property
def pos_samples(self):
return self.h5py_file[self.append_difficulty("labels", self.difficulty)].attrs["pos_samples"]
@property
def neg_samples(self):
return self.h5py_file[self.append_difficulty("labels", self.difficulty)].attrs["neg_samples"]
@property
def labels(self):
return self.h5py_file[self.append_difficulty("labels", self.difficulty)]
@property
def sample_weights(self):
return self.h5py_file[self.append_difficulty("sample_weights", self.difficulty)]
@property
def arrows(self):
return self.h5py_file[self.append_difficulty("arrows", self.difficulty)]
@property
def label_encoded_arrows(self):
return self.h5py_file[self.append_difficulty("label_encoded_arrows", self.difficulty)]
@property
def binary_encoded_arrows(self):
return self.h5py_file[self.append_difficulty("binary_encoded_arrows", self.difficulty)]
@property
def string_arrows(self):
return self.h5py_file[self.append_difficulty("string_arrows", self.difficulty)]
@property
def onehot_encoded_arrows(self):
return self.h5py_file[self.append_difficulty("onehot_encoded_arrows", self.difficulty)]
@property
def file_names(self):
return [file_name.decode('ascii') for file_name in self.h5py_file["file_names"]]
@property
def song_index_ranges(self):
return self.h5py_file["song_index_ranges"]
@property
def features(self):
return self.h5py_file["features"]
|
import sys
import os
import re
from collections import defaultdict
TK_RE = re.compile(
r"""[\s,]*(~@|[\[\]{}()'`~^@]|"(?:[\\].|[^\\"])*"?|;.*|[^\s\[\]{}()'"`@,;]+)"""
)
INT_RE = re.compile(r"-?[0-9]+$")
FLOAT_RE = re.compile(r"-?[0-9][0-9.]*$")
class Symbol(str):
pass
class State:
def __init__(self):
self._bindings = defaultdict(str)
def set(self, key, value):
self._bindings[key] = value
def get(self, key):
return self._bindings[key]
def bindings(self):
return self._bindings.keys()
def find(self, key):
return key in self._bindings.keys()
global_state = State()
class LispParser:
pointer = 0
def __init__(self, code: str):
self.code = code
self.tokens = re.findall(TK_RE, code)
self.back = self.tokens[self.pointer]
def peek(self):
return self.back
def skip(self):
self.pointer += 1
assert (self.pointer < len(self.tokens))
self.back = self.tokens[self.pointer]
def parse1(self):
if (re.match(INT_RE, self.peek())):
return int(self.peek())
elif (re.match(FLOAT_RE, self.peek())):
return float(self.peek())
else:
return Symbol(self.peek())
def parse_list(self, l_d='(', r_d=')'):
ast = []
assert self.peek() == l_d
self.skip()
while (self.peek() != r_d):
ast.append(self.parse())
self.skip()
return ast
def parse(self) -> list:
if self.peek() == '(':
return self.parse_list()
elif self.peek() == '[':
return self.parse_list(l_d='[', r_d=']')
else:
return self.parse1()
def many_curlies(self, a):
return f"{{{a}}}"
def lookup(self, symbol, state):
if symbol in global_state.bindings():
return global_state.get(symbol)
elif symbol in state.bindings():
return state.get(symbol)
elif symbol == '+' or symbol == 'plus':
return lambda *a: " + ".join([f"{i}" for i in a]).strip()
elif symbol == '-' or symbol == 'minus':
return lambda a, b: f"{a} - {b}"
elif symbol == '*' or symbol == 'times':
return lambda a, b: f"{a} * {b}"
elif symbol == '/' or symbol == 'div':
return lambda a, b: f"{a} / {b}"
elif symbol == '=' or symbol == 'eq':
return lambda a, b: f"{a} = {b}"
elif symbol == '%' or symbol == 'mod':
return lambda a, b: f"{a} % {b}"
elif symbol == '^' or symbol == 'up':
return lambda a, b: f"{a}^{{{b}}}"
elif symbol == '_' or symbol == 'sub':
return lambda a, b: f"{a}_{{{b}}}"
elif symbol == '<' or symbol == 'lt':
return lambda a, b: f"{a} < {b}"
elif symbol == '>' or symbol == 'gt':
return lambda a, b: f"{a} > {b}"
elif symbol.endswith('!'):
return lambda *a: f"\\{symbol.rstrip('!')}" + "".join(
[self.many_curlies(i) for i in a])
else:
return symbol
def eval1(self, ast, level, state) -> str:
if type(ast) == Symbol:
return self.lookup(ast, state)
elif type(ast) == list:
return list(map(lambda a: self.eval(a, level, state), ast))
else:
return ast
def indent(self, i):
return " " * (2 * i)
def eval(self, ast, level=0, state=State()) -> str:
if not type(ast) == list:
return self.eval1(ast, level, state)
if len(ast) == 0:
return ast
if (ast[0] == "documentclass!"):
name, arglist, body = ast[1], ast[2], self.eval(
ast[3], level, state).strip()
if arglist:
a = ", ".join([_ for _ in arglist])
header = self.indent(
level) + f"\\documentclass{{{name}}}[{a}]\n"
else:
header = self.indent(level) + f"\\documentclass{{{name}}}\n"
body = self.indent(level) + f"{body}"
return header + body
elif ast[0] == "begin!":
name, body = ast[1], self.eval(ast[2], level + 1, state).strip()
header = self.indent(level) + f"\\begin{{{name}}}\n"
body = self.indent(level + 1) + f"{body}\n"
close = self.indent(level) + f"\\end{{{name}}}"
return header + body + close
elif ast[0] == "defvar":
var, decl = ast[1], self.eval(ast[2], level, state)
global_state.set(var, decl)
return ""
elif ast[0] == "defun":
name, arglist, body = ast[1], ast[2], ast[3]
def f(*args):
env = State()
for arg, val in zip(arglist, args):
env.set(arg, val)
return self.eval(body, level, env)
global_state.set(name, f)
return ""
elif ast[0] == "let":
let_bindings = State()
varlist, body = ast[1], ast[2]
for vardecl in varlist:
var, val = vardecl[0], vardecl[1]
let_bindings.set(var, self.eval(val, level, state))
return self.eval(body, level, let_bindings)
elif ast[0] == "quote":
return ast[1]
else:
element = self.eval1(ast, level, state)
function = element[0]
return function(*element[1:])
def value(self, inlined=False):
eval = self.eval(self.parse())
if (inlined):
return "$" + eval + "$"
else:
return eval
class Parser:
def __init__(self, data, stream):
self.data = data
self.pointer = 0
self.stream = stream
def eprint(self, *args, **kwargs):
print(*args, **kwargs, file=self.stream, end="")
def __str__(self):
return self.data[self.pointer::]
def peek(self) -> None:
return self.data[self.pointer]
def skip(self) -> None:
self.pointer += 1
def skip_checked(self, token) -> None:
assert self.peek() == token
self.pointer += 1
def is_eof(self) -> bool:
return self.pointer == len(self.data)
def find_next_token(self, token: str) -> bool:
while (not self.is_eof() and self.peek() != token):
self.skip()
return False if self.is_eof() else True
def find_next_token_no_walk(self, token: str) -> int:
ptr = self.pointer
while (ptr < len(self.data) and self.data[ptr] != token):
ptr += 1
return ptr
def find_matching_parenthesis(self) -> int:
assert self.peek() == '('
self.skip()
ptr = self.pointer
st = 1
while (ptr < len(self.data)):
if self.data[ptr] == '(':
st += 1
elif self.data[ptr] == ')':
st -= 1
if st == 0:
break
ptr += 1
return ptr
def skip_ws(self) -> str:
begin_of_ws = self.pointer
while (not self.is_eof() and self.peek().isspace()):
self.skip()
return self.data[begin_of_ws:self.pointer]
def next_word(self) -> str:
self.skip_ws()
begin_of_word = self.pointer
while (not self.is_eof() and self.peek().isalpha()):
self.skip()
return self.data[begin_of_word:self.pointer]
def parse_comments(self) -> None:
begin_ptr = self.pointer
while self.find_next_token('@'):
self.eprint(self.data[begin_ptr:self.pointer])
inlined = self.data[self.pointer - 1] != '\n'
self.skip() # skip '@'
if self.next_word() == "lisp":
assert self.find_next_token('(')
ptr = self.pointer
parser = LispParser(
self.data[self.pointer:self.find_matching_parenthesis() +
1])
value = parser.value(inlined=inlined)
if value:
self.eprint(value)
else:
ptr += 1
self.pointer = ptr + len(parser.code)
begin_ptr = self.pointer
self.eprint(self.data[begin_ptr::])
class Args:
def __init__(self, argv):
if (len(argv) < 2):
print("Error: No arguments!", file=sys.stderr)
self.help()
self.outfile = sys.stdout
modifiers = [f for f in argv[1::] if f.startswith('-')]
for mod in modifiers:
if mod == "-f":
input_argument_index = argv.index("-f")
if (input_argument_index + 1 < len(argv)):
filepath = argv[input_argument_index + 1]
if os.path.exists(filepath):
self.path = filepath
self.file = open(self.path, "r")
else:
print("Error: File not found!", file=sys.stderr)
self.help()
else:
print("Error: Missing filepath argument for input!",
file=sys.stderr)
self.help()
elif mod == "-o":
output_argument_index = argv.index("-o")
if (output_argument_index + 1 < len(argv)):
self.outpath = argv[output_argument_index + 1]
self.outfile = open(self.outpath, "w")
else:
print("Error: Missing filepath argument for output!",
file=sys.stderr)
self.help()
elif mod == "-h":
self.help()
else:
print("Error: Invalid argument modifier %s" % mod)
self.help()
def read(self):
return self.file.read()
def help(self):
print("Lispy LaTeX: Expands lisp snippets to LaTeX", file=sys.stderr)
print("Usage: python lispytex [FILE]", file=sys.stderr)
print("Options:", file=sys.stderr)
print("-f [FILE]" + 10 * " " + "Input to program", file=sys.stderr)
print("-o [FILE]" + 10 * " " + "Redirect output to [FILE]",
file=sys.stderr)
print("-h" + 17 * " " + "Help/usage page", file=sys.stderr)
exit(1)
if __name__ == '__main__':
args = Args(sys.argv)
lex = Parser(data=args.read(), stream=args.outfile)
lex.parse_comments()
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
import unittest
import os
import json
class MonitorTest(unittest.TestCase):
def test_describe_alarms(self):
cmd = """python ../../main.py monitor describe-alarms """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_alarm(self):
cmd = """python ../../main.py monitor create-alarm --client-token 'xxx' --product 'xxx' --resource-option '{"":""}' --rule-name 'xxx' --rule-option '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_alarm(self):
cmd = """python ../../main.py monitor describe-alarm --alarm-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_alarm(self):
cmd = """python ../../main.py monitor update-alarm --alarm-id 'xxx' --product 'xxx' --resource-option '{"":""}' --rule-name 'xxx' --rule-option '{"":""}'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_alarms(self):
cmd = """python ../../main.py monitor delete-alarms --alarm-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_alarm_contacts(self):
cmd = """python ../../main.py monitor describe-alarm-contacts --alarm-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_metrics_for_alarm(self):
cmd = """python ../../main.py monitor describe-metrics-for-alarm """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_products_for_alarm(self):
cmd = """python ../../main.py monitor describe-products-for-alarm """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_enable_alarms(self):
cmd = """python ../../main.py monitor enable-alarms """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_alarm_history(self):
cmd = """python ../../main.py monitor describe-alarm-history """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_metrics(self):
cmd = """python ../../main.py monitor describe-metrics --service-code 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_one_data_point(self):
cmd = """python ../../main.py monitor describe-one-data-point --metric 'xxx' --service-code 'xxx' --resource-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_metric_data(self):
cmd = """python ../../main.py monitor describe-metric-data --metric 'xxx' --resource-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_services(self):
cmd = """python ../../main.py monitor describe-services """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_put_custom_metric_data(self):
cmd = """python ../../main.py monitor put-custom-metric-data """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_put_custom_metric_data(self):
cmd = """python ../../main.py monitor put-custom-metric-data """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
|
from typing import Any
from example8_4birds import *
woody = Bird()
alert(woody)
alert_duck(woody)
alert_bird(woody)
def double(x: Any) -> Any:
return x * 2 |
import unittest
import nzmath.ring as ring
import nzmath.rational as rational
import nzmath.finitefield as finitefield
import nzmath.poly.termorder as termorder
import nzmath.poly.univar as univar
import nzmath.poly.ring as poly_ring
import nzmath.poly.uniutil as uniutil
class DivisionProviderTest (unittest.TestCase):
"""
use DivisionProvider
"""
def setUp(self):
"""
define a class using DivisionProvider
"""
class BasicPolynomialWithDivision (univar.BasicPolynomial,
uniutil.OrderProvider,
uniutil.DivisionProvider):
def __init__(self, coefficients, **kwds):
univar.BasicPolynomial.__init__(self, coefficients, **kwds)
uniutil.OrderProvider.__init__(self, termorder.ascending_order)
uniutil.DivisionProvider.__init__(self)
self.f = BasicPolynomialWithDivision([(0, 1.0), (3, 1.0), (1, 2.0)])
self.g = BasicPolynomialWithDivision([(0, 1.0), (1, 1.0)])
Q = rational.Rational
self.h = BasicPolynomialWithDivision([(0, Q(1)), (3, Q(1)), (1, Q(2))])
self.i = BasicPolynomialWithDivision([(0, Q(1)), (1, Q(1))])
def testDivision(self):
"""
divisions
"""
self.assertEqual("RationalFunction", (self.f / self.g).__class__.__name__)
self.assertTrue(self.f % self.g)
def testGcd(self):
one = univar.BasicPolynomial({0: rational.Rational(1)})
self.assertEqual(one, self.h.gcd(self.i))
class ContentProviderTest (unittest.TestCase):
"""
use ContentProvider
"""
def setUp(self):
"""
define a class using ContentProvider
"""
class PolynomialWithContent (univar.SortedPolynomial,
uniutil.OrderProvider,
uniutil.DivisionProvider,
uniutil.ContentProvider):
def __init__(self, coefficients, **kwds):
univar.SortedPolynomial.__init__(self, coefficients, **kwds)
uniutil.OrderProvider.__init__(self, termorder.ascending_order)
uniutil.DivisionProvider.__init__(self)
Q = rational.Rational
self.f = PolynomialWithContent([(0, Q(1)), (3, Q(2)), (1, Q(1, 2))])
self.g = PolynomialWithContent([(0, Q(8)), (3, Q(2)), (1, Q(2, 3))])
def testContent(self):
Q = rational.Rational
self.assertEqual(Q(1, 2), self.f.content())
self.assertEqual(Q(2, 3), self.g.content())
def testPrimitivePart(self):
Q = rational.Rational
h = self.g.scalar_mul(Q(3, 2))
self.assertEqual(self.f * 2, self.f.primitive_part())
self.assertEqual(h, self.g.primitive_part())
class PrimeCharacteristicFunctionsProviderTest (unittest.TestCase):
def setUp(self):
"""
define a class using ContentProvider
"""
self.F2 = F2 = finitefield.FinitePrimeField.getInstance(2)
self.F5 = F5 = finitefield.FinitePrimeField.getInstance(5)
self.f = uniutil.FinitePrimeFieldPolynomial([(0, F2.one), (3, F2.one), (1, F2.one)], coeffring=F2)
self.g = uniutil.FinitePrimeFieldPolynomial([(0, F2.one), (2, F2.one)], coeffring=F2)
self.h = uniutil.FinitePrimeFieldPolynomial([(1, F2.one), (2, F2.one)], coeffring=F2)
self.p = uniutil.FinitePrimeFieldPolynomial([(0, F5.one), (1, F5.createElement(2)), (2, F5.createElement(3)), (5, F5.createElement(3)), (6, F5.createElement(2)), (7, F5.one)], coeffring=F5)
self.q = uniutil.FinitePrimeFieldPolynomial([(0, self.F2.one), (1, self.F2.one), (2, self.F2.one)], coeffring=F2) * self.h
self.thirty = uniutil.FinitePrimeFieldPolynomial([(0, self.F2.one), (3, self.F2.one), (30, self.F2.one)], coeffring=F2)
def testSquareFreeDecomposition(self):
g_decomp = self.g.squarefree_decomposition()
self.assertEqual(1, len(g_decomp))
self.assertEqual(2, list(g_decomp.keys())[0])
h_decomp = self.h.squarefree_decomposition()
self.assertEqual(1, len(h_decomp))
self.assertEqual(1, list(h_decomp.keys())[0])
self.assertEqual(self.h, list(h_decomp.values())[0], str(list(h_decomp.items())[0]))
self.assertTrue(self.thirty.gcd(self.thirty.differentiate()))
p_decomp = self.h.squarefree_decomposition()
self.assertEqual(1, len(p_decomp))
self.assertEqual(1, list(p_decomp.keys())[0])
self.assertEqual(self.h, list(p_decomp.values())[0], str(list(p_decomp.items())[0]))
self.assertTrue(self.thirty.gcd(self.thirty.differentiate()))
def testDistinctDegreeFactorization(self):
h_ddf = self.h.distinct_degree_factorization()
self.assertEqual(1, len(h_ddf))
q_ddf = self.q.distinct_degree_factorization()
self.assertEqual(2, len(q_ddf))
def testSplitSameDegrees(self):
h_ssd = self.h.split_same_degrees(1)
self.assertEqual(self.h.degree(), len(h_ssd))
phi7 = uniutil.FinitePrimeFieldPolynomial(enumerate([self.F2.one]*7), coeffring=self.F2)
result7 = phi7.split_same_degrees(3)
self.assertEqual(2, len(result7))
self.assertEqual(phi7, result7[0] * result7[1])
def testFactor(self):
factored = self.p.factor()
self.assertTrue(isinstance(factored, list))
self.assertEqual(3, len(factored), str(factored))
for factor in factored:
self.assertTrue(isinstance(factor, tuple))
self.assertEqual(2, len(factor))
self.assertTrue(isinstance(factor[1], int))
product = self.p.__class__([(0, ring.getRing(next(self.p.itercoefficients())).one)], coeffring=self.p.getCoefficientRing())
for factor, index in factored:
product = product * factor ** index
self.assertEqual(self.p, product)
# F2 case
g_factor = self.g.factor()
self.assertEqual(1, len(g_factor), g_factor)
self.assertEqual(2, g_factor[0][1])
h_factor = self.h.factor()
self.assertEqual(2, len(h_factor), h_factor)
def testRamify(self):
F3 = F3 = finitefield.FinitePrimeField.getInstance(3)
rami = uniutil.FinitePrimeFieldPolynomial([(0, F3.one), (2, F3.zero), (3, F3.one)], coeffring=F3)
r_factor = rami.factor()
self.assertEqual(1, len(r_factor), r_factor)
r_factor = r_factor[0]
self.assertEqual(3, r_factor[1])
self.assertEqual(rami, r_factor[0] ** 3)
def testIsIrreducible(self):
self.assertTrue(self.f.isirreducible())
self.assertFalse(self.g.isirreducible())
self.assertFalse(self.h.isirreducible())
# degree 1 polynomial is irreducible.
x = uniutil.FinitePrimeFieldPolynomial({1:2}, coeffring=self.F5)
self.assertTrue(x.isirreducible())
class SubresultantGcdProviderTest (unittest.TestCase):
def setUp(self):
"""
define a class using DivisionProvider
"""
R = rational.Rational
Q = rational.theRationalField
self.up1 = up1 = uniutil.polynomial([(1, R(1))], coeffring=Q)
self.u = uniutil.polynomial([(0, up1), (3, up1)], up1.getRing())
self.v = uniutil.polynomial([(0, -up1)], up1.getRing())
def testSubResultantGcd(self):
self.assertEqual(self.up1, self.up1.gcd(self.up1 * self.up1))
self.assertEqual(-self.v, self.u.subresultant_gcd(self.v))
#
Z = rational.theIntegerRing
f = uniutil.polynomial([(0, 1), (2, -1), (4, 1)], Z)
gx = uniutil.polynomial([(1, -2), (3, 4)], Z)
g = uniutil.polynomial([(0, -2), (2, 4)], Z)
self.assertEqual(f.subresultant_gcd(gx), f.subresultant_gcd(g))
def testResultant(self):
Z = rational.theIntegerRing
I = rational.Integer
f = uniutil.polynomial(enumerate(map(I, list(range(5)))), coeffring=Z)
g = uniutil.polynomial(enumerate(map(I, list(range(7, 10)))), coeffring=Z)
self.assertEqual(0, f.resultant(f * g))
h1 = uniutil.polynomial(enumerate(map(I, [-2, 0, 0, 1])), coeffring=Z)
h2 = uniutil.polynomial(enumerate([Z.zero, Z.one]), coeffring=Z)
self.assertEqual(2, h1.resultant(h2))
t = uniutil.polynomial(enumerate(map(I, list(range(7, 0, -1)))), coeffring=Z)
self.assertEqual(2**16 * 7**4, t.resultant(t.differentiate()))
class IntegerPolynomialTest (unittest.TestCase):
def setUp(self):
self.Z = rational.theIntegerRing
def testExactDivision(self):
# sf bug #1922158
f = uniutil.OneVariableDensePolynomial([0, 9], 'x')
g = uniutil.OneVariableDensePolynomial([0, 3], 'x')
q = uniutil.polynomial([(0, 3)], self.Z)
self.assertEqual(q, f.exact_division(g))
def testResultant(self):
Z = self.Z
f = uniutil.polynomial(enumerate(range(1, 6)), Z)
g = uniutil.polynomial(enumerate(range(7, 10)), Z)
self.assertEqual(29661, f.resultant(g))
def testDiscriminant(self):
Z = self.Z
a, b, c = -2, -1, 1
q1 = uniutil.polynomial(enumerate([c, b, a]), Z)
d = b ** 2 - 4 * a * c
self.assertEqual(d, q1.discriminant())
def testAddition(self):
Z = self.Z
f = uniutil.polynomial(enumerate(range(1, 6)), Z)
g = uniutil.polynomial(enumerate(range(7, 10)), Z)
h = uniutil.polynomial(enumerate([8, 10, 12, 4, 5]), Z)
self.assertEqual(h, f + g)
self.assertTrue(isinstance(f + g, uniutil.IntegerPolynomial))
# sf bug # 1937925
f2 = uniutil.polynomial(enumerate([2, 2, 3, 4, 5]), Z)
self.assertEqual(f2, f + 1)
self.assertEqual(f2, 1 + f)
def testSubtraction(self):
Z = self.Z
f = uniutil.polynomial(enumerate(range(1, 6)), Z)
# sf bug # 1937925
f2 = uniutil.polynomial([(1, 2), (2, 3), (3, 4), (4, 5)], Z)
self.assertEqual(f2, f - 1)
self.assertEqual(-f2, 1 - f)
def testPseudoDivisions(self):
Z = self.Z
divisor = uniutil.polynomial({0:1, 1:3}, Z)
monic_divisor = uniutil.polynomial({0:1, 1:1}, Z)
dividend = uniutil.polynomial({0:1, 2:1}, Z)
quotient, remainder = dividend.pseudo_divmod(monic_divisor)
self.assertEqual(uniutil.polynomial({0:-1, 1:1}, Z), quotient)
self.assertEqual(uniutil.polynomial({0:2}, Z), remainder)
quotient, remainder = dividend.monic_divmod(monic_divisor)
self.assertEqual(uniutil.polynomial({0:-1, 1:1}, Z), quotient)
self.assertEqual(uniutil.polynomial({0:2}, Z), remainder)
quotient, remainder = dividend.pseudo_divmod(divisor)
self.assertEqual(uniutil.polynomial({0:-1, 1:3}, Z), quotient)
self.assertEqual(uniutil.polynomial({0:10}, Z), remainder)
# stop in the middle of degree descendence
divident = uniutil.polynomial({0:1, 2:-1, 4:1}, Z)
divisor = uniutil.polynomial({1:-1, 3:2}, Z)
quotient, remainder = divident.pseudo_divmod(divisor)
self.assertEqual(uniutil.polynomial({1:2}, Z), quotient)
self.assertEqual(uniutil.polynomial({0:4, 2:-2}, Z), remainder)
def testIsmonic(self):
Z = self.Z
nonmonic = uniutil.polynomial({0:1, 1:3}, Z)
monic = uniutil.polynomial({0:1, 1:1}, Z)
self.assertFalse(nonmonic.ismonic())
self.assertTrue(monic.ismonic())
class FinitePrimeFieldPolynomialTest (unittest.TestCase):
def testRepr(self):
f = uniutil.FinitePrimeFieldPolynomial([(0, 2), (8, 1)], coeffring=finitefield.FinitePrimeField.getInstance(311))
self.assertEqual(0, repr(f).index("Finite"))
def testMod(self):
f = uniutil.FinitePrimeFieldPolynomial([(0, 1), (3, 1), (30, 1)], coeffring=finitefield.FinitePrimeField.getInstance(2))
df = f.differentiate()
self.assertTrue(f % df)
def testDiscriminant(self):
F7 = finitefield.FinitePrimeField.getInstance(7)
a, b, c = 2, 5, 1
q1 = uniutil.polynomial(enumerate([c, b, a]), coeffring=F7)
d = F7.createElement(b ** 2 - 4 * a * c)
self.assertEqual(d, q1.discriminant())
def testModPow(self):
F17 = finitefield.FinitePrimeField.getInstance(17)
a, b, c = 2, 5, 1
q1 = uniutil.FinitePrimeFieldPolynomial(enumerate([c, b, a]), coeffring=F17)
m = uniutil.FinitePrimeFieldPolynomial(enumerate([c, a]), coeffring=F17)
self.assertEqual(q1 ** 3 % m, m.mod_pow(q1, 3))
self.assertEqual(q1 ** 70 % m, m.mod_pow(q1, 70))
def testEmptyTerm(self):
F17 = finitefield.FinitePrimeField.getInstance(17)
q = uniutil.FinitePrimeFieldPolynomial({1:F17.one}, coeffring=F17)
self.assertTrue(F17.zero is q[0])
self.assertFalse(0 is q[0])
class InjectVariableTest (unittest.TestCase):
def testInject(self):
f = univar.SortedPolynomial([(0, 1), (1, 3), (2, 2)])
X = "X"
self.assertTrue(uniutil.VariableProvider not in f.__class__.__bases__)
uniutil.inject_variable(f, X)
self.assertEqual(X, f.getVariable())
self.assertEqual([X], f.getVariableList())
self.assertTrue(uniutil.VariableProvider in f.__class__.__bases__)
self.assertEqual("VarSortedPolynomial", f.__class__.__name__)
f += f
self.assertEqual("VarSortedPolynomial", f.__class__.__name__)
# varname lost
self.assertTrue(hasattr(f, "getVariable"))
self.assertRaises(AttributeError, f.getVariable)
class FieldPolynomialTest (unittest.TestCase):
def testDivision(self):
Q = rational.theRationalField
f = uniutil.FieldPolynomial([(1, 1)], Q)
g = uniutil.FieldPolynomial([(1, 1)], Q)
q = uniutil.FieldPolynomial([(0, 1)], Q)
self.assertEqual(q, f // g)
self.assertEqual(f.getRing().zero, f % g)
def testDiscriminant(self):
Q = rational.theRationalField
rat = rational.Rational
a, b, c = rat(2, 7), rat(5, 14), rat(1, 7)
q1 = uniutil.polynomial(enumerate([c, b, a]), Q)
d = b ** 2 - 4 * a * c
self.assertEqual(d, q1.discriminant())
def suite(suffix="Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
|
filename = 'test.txt'
filehandle = open('test.txt')
row = 1
filelist = list()
for item in filehandle:
print(item)
if item.startswith('4'):
item = item.strip()
item = int(item)
if item == row:
filelist.append(item)
row = row + 1
else:
print(filelist)
break |
import copy
import pathlib
import random
import string
import typing
import uuid
import ditef_producer_shared.genetic_individual
import ditef_router.api_client
class Individual(ditef_producer_shared.genetic_individual.AbstractIndividual):
def individual_type(self) -> str:
return 'string'
@staticmethod
def configuration_values() -> dict:
return {
'type': 'default',
# string to strive for
'target_string': "This is an example string.",
# Pool for random string operations to choose characters from
'character_pool': string.ascii_letters + " .",
# Maximum amount of mutations
'maximum_amount_of_mutations': 10,
}
@staticmethod
def random(task_api_client: ditef_router.api_client.ApiClient, configuration: dict, state_path: pathlib.Path) -> 'Individual':
'''Generates a new random individual'''
target_length = len(configuration['target_string'])
individual_id = str(uuid.uuid4())
Individual.individuals[individual_id] = Individual(
task_api_client,
configuration,
individual_id,
[
random.choice(configuration['character_pool'])
for _ in range(random.randrange(target_length // 2, target_length * 2))
],
'random',
state_path/'individuals'/f'{individual_id}.json',
)
Individual.individuals[individual_id].write_to_file()
return Individual.individuals[individual_id]
@staticmethod
def clone(parent: 'Individual', task_api_client: ditef_router.api_client.ApiClient, configuration: dict, creation_type: str, state_path: pathlib.Path) -> 'Individual':
'''Creates a copy of a parent individual'''
individual_id = str(uuid.uuid4())
Individual.individuals[individual_id] = Individual(
task_api_client,
configuration,
individual_id,
copy.deepcopy(parent.genome),
creation_type,
state_path/'individuals'/f'{individual_id}.json',
)
Individual.individuals[individual_id].genealogy_parents = [parent.id]
Individual.individuals[individual_id].write_to_file()
parent.add_child(individual_id)
return Individual.individuals[individual_id]
@staticmethod
def cross_over_one(parent_a: 'Individual', parent_b: 'Individual', task_api_client: ditef_router.api_client.ApiClient, configuration: dict, state_path: pathlib.Path) -> 'Individual':
'''Creates one cross-overed individual from two parent individuals'''
individual_id = str(uuid.uuid4())
Individual.individuals[individual_id] = Individual(
task_api_client,
configuration,
individual_id,
[
random.choice([gene_a, gene_b])
for gene_a, gene_b in zip(parent_a.genome, parent_b.genome)
],
'cross_over_one',
state_path/'individuals'/f'{individual_id}.json',
)
Individual.individuals[individual_id].genealogy_parents = [
parent_a.id,
parent_b.id,
]
Individual.individuals[individual_id].write_to_file()
parent_a.add_child(individual_id)
parent_b.add_child(individual_id)
return Individual.individuals[individual_id]
def mutate(self):
for _ in range(random.randrange(self.configuration['maximum_amount_of_mutations'])):
if random.choice([True] * 9 + [False]):
# change character
i = random.randrange(len(self.genome))
self.genome[i] = random.choice(
self.configuration['character_pool'])
else:
if random.choice([True, False]) and len(self.genome) > 0:
# remove one character
i = random.randrange(len(self.genome))
del self.genome[i]
else:
# insert new character
i = random.randrange(len(self.genome))
self.genome.insert(i, random.choice(
self.configuration['character_pool']))
self.write_to_file()
self.update_event.notify()
async def evaluate(self):
self.evaluation_result = await self.task_api_client.run(
'ditef_worker_genetic_individual_string',
payload={
'genome': self.genome,
'target_string': self.configuration['target_string'],
},
)
self.write_to_file()
self.update_event.notify()
def fitness(self) -> typing.Optional[float]:
try:
return self.evaluation_result['correct_characters'] - abs(self.evaluation_result['length_difference']) * 2
except TypeError:
return None
|
# Copyright 2017 Bernhard Walter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from ...utils.color import rgb2str
import pandas as pd
class D3Colors(object):
"""
Source: http://d3js.org
"""
categorialScheme = {
"c10": [( 31,119,180), (255,127, 14), ( 44,160, 44), (214, 39, 40), (148,103,189), (140, 86, 75), (227,119,194), (127,127,127), (188,189, 34), ( 23,190,207)], # noqa E501,E231,E201
"c20": [( 31,119,180), (174,199,232), (255,127, 14), (255,187,120), ( 44,160, 44), (152,223,138), (214, 39, 40), (255,152,150), (148,103,189), (197,176,213), # noqa E501,E231,E201
(140, 86, 75), (196,156,148), (227,119,194), (247,182,210), (127,127,127), (199,199,199), (188,189, 34), (219,219,141), ( 23,190,207), (158,218,229)], # noqa E501,E231,E201
"c20b": [( 57, 59,121), ( 82, 84,163), (107,110,207), (156,158,222), ( 99,121, 57), (140,162, 82), (181,207,107), (206,219,156), (140,109, 49), (189,158, 57), # noqa E501,E231,E201
(231,186, 82), (231,203,148), (132, 60, 57), (173, 73, 74), (214, 97,107), (231,150,156), (123, 65,115), (165, 81,148), (206,109,189), (222,158,214)], # noqa E501,E231,E201
"c20c": [( 49,130,189), (107,174,214), (158,202,225), (198,219,239), (230, 85, 13), (253,141, 60), (253,174,107), (253,208,162), ( 49,163, 84), (116,196,118), # noqa E501,E231,E201
(161,217,155), (199,233,192), (117,107,177), (158,154,200), (188,189,220), (218,218,235), ( 99, 99, 99), (150,150,150), (189,189,189), (217,217,217)], # noqa E501,E231,E201
}
#
# Get discrete results
#
@classmethod
def _get(cls, scheme, color, size):
colors = scheme.get(color)
if colors is None:
return []
elif size is None:
return colors
elif size <= len(colors):
return colors[:size]
else:
print("Warning: size too large for scheme, stacking largest color list to match size")
factor = math.ceil(size / (len(colors)))
return (colors * factor)[:size]
#
# Accessors
#
@classmethod
def c10(cls, sizeOrSeries, asString=False):
result = cls._get(cls.categorialScheme, "c10", size=sizeOrSeries)
return rgb2str(result) if asString else result
@classmethod
def c20(cls, sizeOrSeries, asString=False):
result = cls._get(cls.categorialScheme, "c20", size=sizeOrSeries)
return rgb2str(result) if asString else result
@classmethod
def c20b(cls, sizeOrSeries, asString=False):
result = cls._get(cls.categorialScheme, "c20b", size=sizeOrSeries)
return rgb2str(result) if asString else result
@classmethod
def c20c(cls, sizeOrSeries, asString=False):
result = cls._get(cls.categorialScheme, "c20c", size=sizeOrSeries)
return rgb2str(result) if asString else result
#
# Info
#
@classmethod
def info(cls):
return list(cls.categorialScheme.keys())
@classmethod
def toDF(cls):
typ = "d3"
result = []
for palette, v2 in cls.categorialScheme.items():
p = cls.categorialScheme[palette]
for j in range(len(p)):
result.append({"typ": typ, "palette": palette, "size": len(p), "element": j,
"color": "%d,%d,%d" % p[j]})
return pd.DataFrame(result)
#
# Quick Accessor
#
def getD3(typ, size):
return getattr(D3Colors, typ)(size)
|
# Imports
from flask_login import LoginManager # Auth
from flask_sqlalchemy import SQLAlchemy # Database
from flask import Flask # App
from flask_socketio import SocketIO # Socket IO for quick-chat feature
import eventlet # Eventlet for async
eventlet.monkey_patch() # Eventlet for async
# Initialize the app
app = Flask(
__name__,
)
# App configuraton
app.config["SECRET_KEY"] = "5791628bb0b13ce0c676dfde280ba245"
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///site.db"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
# SocketIO initialization
socketio = SocketIO(app, async_mode="eventlet")
# Database setup
db = SQLAlchemy(app)
# Login stuff
login_manager = LoginManager(app)
login_manager.init_app(app)
login_manager.login_view = "login"
# Loading user model, when all else is done
from app.models import User
# User loader for login manager
@login_manager.user_loader
def load_user(user_id):
"""
Required for login manager to work.
"""
return User.query.get(int(user_id)) # Return user object
# Other imports, to avoid circular imports
from app import routes # Routes
from util import filters # Filters
|
import pytest
from evolution.population import Population
from fixtures import get_example_model, TicTacToeMaker, SmallTicTacToeMaker
from numpy.random import seed
from test_game import first_open_square_player, always_middle_square_player
def test_weights_change_in_evolution():
seed(0)
model = get_example_model(board_dims=[2,2])
my_pop = Population(2, model=model, game_maker=SmallTicTacToeMaker)
player_weights_before = my_pop.players[0].model.get_weights()
my_pop.score_and_evolve()
player_weights_after = my_pop.players[0].model.get_weights()
for layer_num in range(len(my_pop.layer_shapes)):
assert((player_weights_before[layer_num] != player_weights_after[layer_num]).all())
#def test_weight_centers_move_towards_better_player():
# seed(0)
# model = get_example_model(board_dims=[2,2])
# my_pop = Population(2, model=model, game_maker=TicTacToeMaker)
# all_weights_before = [p[i].model.get_weights() for p in my_pop.players]
# weight_centers_before =
# better_player_before =
# my_pop.score_and_evolve()
# weight_centers_after =
# assert weight centers have moved along gradient
|
import logging
import json
import urllib
from google.appengine.api import urlfetch, urlfetch_errors
class PartOfSpeechTagger(object):
def __init__(self, url, field_name):
self.url = url
self.field_name = field_name
def tag(self, source):
try:
response = urlfetch.fetch(
self.url,
payload = urllib.urlencode({self.field_name: source}),
method = urlfetch.POST,
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
)
except urlfetch_errors.DeadlineExceededError:
return "Timed out, your request has. Mmmmmm. Try again later, you must."
if response.status_code == 200:
return json.loads(response.content)['text']
|
from sqlalchemy.exc import DBAPIError
from sqlalchemy.orm.exc import FlushError
from pyramid.response import Response
from pyramid.view import view_config
from pyramid.response import Response
from pyramid.httpexceptions import HTTPFound, HTTPNotFound, HTTPBadRequest, HTTPClientError
from pyramid.security import NO_PERMISSION_REQUIRED, remember, forget
from semantics3.error import Semantics3Error
from ..sample_data import MOCK_DATA
from semantics3.error import Semantics3Error
from . import DB_ERR_MSG
import requests
import json
from ..models import Account
from ..models import Product
from ..models import Assoc
from .default import sem3
@view_config(
route_name='pantry',
renderer='../templates/pantry.jinja2',
request_method='GET',
)
def pantry_view(request):
"""
Directs user to their pantry
"""
# import pdb; pdb.set_trace()
try:
query = request.dbsession.query(Account)
current_account = query.filter(Account.username == request.authenticated_userid).first()
except DBAPIError:
return DBAPIError(DB_ERR_MSG, content_type='text/plain', status=500)
pantry = []
cart = []
for assoc in current_account.pantry_items:
if assoc.in_pantry:
pantry.append(assoc.item)
if assoc.in_cart:
cart.append(assoc.item)
return {'pantry': pantry, 'cart': cart}
@view_config(
route_name='detail',
renderer='../templates/detail.jinja2',
request_method='GET',
)
def detail_view(request):
"""
Directs user to a detailed view of an item
"""
if 'upc' not in request.matchdict:
return HTTPClientError()
upc = request.matchdict['upc']
user = request.dbsession.query(Account).filter(
Account.username == request.authenticated_userid).first()
item = filter(lambda n: n.item.upc == upc, user.pantry_items)
try:
product = next(item)
except StopIteration:
raise HTTPNotFound
return {'item': product.item}
def parse_upc_data(data):
result = data['results'][0]
upc_data = {
'upc': result['upc'],
'name': result['name'] if 'name' in result else 'Unknown',
'brand': result['brand'] if 'brand' in result else None,
'description': result['description'] if 'description' in result else None,
'category': result['category'] if 'category' in result else None,
'image': result['images'] if 'images' in result else None,
'size': result['size'] if 'size' in result else None,
'manufacturer': result['manufacturer'] if 'manufacturer' in result else None,
}
return upc_data
@view_config(
route_name='manage_item',
renderer='../templates/manage_item.jinja2',
request_method=('GET', 'POST'))
def manage_items_view(request):
if request.method == 'GET':
try:
upc = request.GET['upc']
except KeyError:
return {}
try:
query = request.dbsession.query(Product)
upc_data = query.filter(Product.upc == upc).one_or_none()
except DBAPIError:
return Response(DB_ERR_MSG, content_type='text/plain', status=500)
acc_query = request.dbsession.query(Account)
current_acc = acc_query.filter(Account.username == request.authenticated_userid).first()
if upc_data is None:
try:
sem3.products_field("upc", upc)
query_data = sem3.get_products()
product = parse_upc_data(query_data)
upc_data = Product(**product)
except (KeyError, IndexError, Semantics3Error):
# import pdb; pdb.set_trace()
return {'err': 'UPC not in database. Enter your own fields below, submit, we will add it for future use.'}
try:
request.dbsession.add(upc_data)
except DBAPIError:
return Response(DB_ERR_MSG, content_type='text/plain', status=500)
location = request.GET.getall('location') if hasattr(request.GET, 'getall') else request.GET['location']
in_pantry = in_cart = False
if 'both' in location:
in_pantry = True
in_cart = True
if 'pantry' in location:
in_pantry = True
if 'cart' in location:
in_cart = True
for assoc in current_acc.pantry_items:
if upc == assoc.item.upc:
break
else:
assoc = Assoc()
assoc.item = upc_data
current_acc.pantry_items.append(assoc)
assoc.in_cart = in_cart
assoc.in_pantry = in_pantry
request.dbsession.flush()
return HTTPFound(location=request.route_url('pantry'))
if request.method == 'POST':
try:
upc = request.POST['upc']
except KeyError:
print('KeyError')
return {}
acc_query = request.dbsession.query(Account)
current_acc = acc_query.filter(Account.username == request.authenticated_userid).first()
for assoc in current_acc.pantry_items:
if upc == assoc.item.upc:
break
if 'cart' in request.POST:
assoc.in_cart = False
if 'pantry' in request.POST:
assoc.in_pantry = False
request.dbsession.flush()
return HTTPFound(location=request.route_url('pantry'))
|
class BaseError(Exception):
"""Base class for exceptions from this package."""
class MockServerError(BaseError):
pass
class HandlerNotFoundError(MockServerError):
pass
|
#!/usr/bin/python
from opencv.cv import *
from opencv.highgui import *
from random import randint
MAX_CLUSTERS = 5
if __name__ == "__main__":
color_tab = [
CV_RGB(255,0,0),
CV_RGB(0,255,0),
CV_RGB(100,100,255),
CV_RGB(255,0,255),
CV_RGB(255,255,0)]
img = cvCreateImage( cvSize( 500, 500 ), 8, 3 )
rng = cvRNG(-1)
cvNamedWindow( "clusters", 1 )
while True:
cluster_count = randint(2, MAX_CLUSTERS)
sample_count = randint(1, 1000)
points = cvCreateMat( sample_count, 1, CV_32FC2 )
clusters = cvCreateMat( sample_count, 1, CV_32SC1 )
# generate random sample from multigaussian distribution
for k in range(cluster_count):
center = CvPoint()
center.x = cvRandInt(rng)%img.width
center.y = cvRandInt(rng)%img.height
first = k*sample_count/cluster_count
last = sample_count
if k != cluster_count:
last = (k+1)*sample_count/cluster_count
point_chunk = cvGetRows(points, first, last)
cvRandArr( rng, point_chunk, CV_RAND_NORMAL,
cvScalar(center.x,center.y,0,0),
cvScalar(img.width*0.1,img.height*0.1,0,0))
# shuffle samples
cvRandShuffle( points, rng )
cvKMeans2( points, cluster_count, clusters,
cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0 ))
cvZero( img )
for i in range(sample_count):
cluster_idx = clusters[i]
# a multi channel matrix access returns a scalar of
#dimension 4,0, which is not considerate a cvPoint
#we have to create a tuple with the first two elements
pt = (cvRound(points[i][0]), cvRound(points[i][1]))
cvCircle( img, pt, 2, color_tab[cluster_idx], CV_FILLED, CV_AA, 0 )
cvShowImage( "clusters", img )
key = cvWaitKey(0)
if( key == 27 or key == 'q' or key == 'Q' ): # 'ESC'
break
cvDestroyWindow( "clusters" )
|
# script to convert files into a dataset
import os
import argparse
import glob
import json
inputDir = r"/Users/prakrutigogia/Documents/Microsoft/AlwaysBeLearning/MSHack/Round7"
dataset = "podcast_round7"
location = "orcasound_lab"
all_jsons = glob.glob(inputDir + "/*.json")
all_wavs = glob.glob(inputDir + "/round7/*.wav")
tsv_file = os.path.join(inputDir, "annotations.tsv")
if os.path.exists(tsv_file):
os.remove(tsv_file)
with open(tsv_file, "a+") as t:
header = "dataset\twav_filename\tstart_time_s\tduration_s\tlocation\tdate\tpst_or_master_tape_identifier\n"
t.write(header)
for wav in all_wavs:
wav_filename = os.path.basename(wav)
date_tokens = os.path.splitext(wav_filename)[0].split("_")
date = date_tokens[1] + "-" + date_tokens[2] + "-" + date_tokens[3]
pst = date_tokens[4] + ":" + date_tokens[5] + ":" + date_tokens[6]
line = dataset + "\t" + wav_filename + "\t" + "0.0" + "\t" + "0.0" + "\t" + location + "\t" + date + "\t" + pst + "\n"
t.write(line)
# for annotation_json in all_jsons:
# with open(annotation_json, "r") as f:
# data = json.load(f)
# wav_filename = os.path.basename(data["uri"])
# date_tokens = data["absolute_time"].split("_")
# pst = date_tokens[3] + ":" + date_tokens[4] + ":" + date_tokens[5]
# if len(data["annotations"]) == 0:
# line = dataset + "\t" + wav_filename + "\t" + "0.0" + "\t" + "0.0" + "\t" + location + "\t" + date + "\t" + pst + "\n"
# t.write(line)
# else:
# for annotation in data["annotations"]:
# line = dataset + "\t" + wav_filename + "\t" + str(annotation["start_s"]) + "\t" + str(annotation["duration_s"]) + "\t" + location + "\t" + date + "\t" + pst + "\n"
# t.write(line)
|
import unittest
from gentsp import TSPSolver
class TestDamages(unittest.TestCase):
def test_creation(self):
distances = [
[1, 3],
[3, 5]
]
TSPSolver(distances)
def test_creation_with_parameters(self):
distances = [
[1, 3],
[3, 5]
]
TSPSolver(distances, population_size=1000, mutation_rate=0.5, new_individuals=10, selection='best', breeder_count=100)
def test_creation_with_wrong_parameters(self):
distances = [
[1, 3],
[3, 5]
]
with self.assertRaises(ValueError):
TSPSolver([[1, 3], [1]])
with self.assertRaises(ValueError):
TSPSolver(distances, population_size=-1)
with self.assertRaises(ValueError):
TSPSolver(distances, mutation_rate=1.5)
with self.assertRaises(ValueError):
TSPSolver(distances, mutation_rate=-0.2)
with self.assertRaises(ValueError):
TSPSolver(distances, population_size=100, new_individuals=101)
with self.assertRaises(ValueError):
TSPSolver(distances, new_individuals=-1)
with self.assertRaises(ValueError):
TSPSolver(distances, elitism=-1)
with self.assertRaises(ValueError):
TSPSolver(distances, population_size=100, elitism=101)
with self.assertRaises(ValueError):
TSPSolver(distances, selection='string')
with self.assertRaises(ValueError):
TSPSolver(distances, breeder_count=-1)
with self.assertRaises(ValueError):
TSPSolver(distances, population_size=100, breeder_count=101)
def test_fitness_calculation(self):
distances = [
[0, 1, 2],
[1, 0, 3],
[2, 3, 0]
]
solver = TSPSolver(distances)
fitness1 = solver._compute_fitness([0, 2, 1])
fitness2 = solver._compute_fitness([2, 1, 0])
self.assertAlmostEqual(fitness1, 1 / (3 + 2))
self.assertAlmostEqual(fitness2, 1 / (3 + 1))
def test_fitness_sorting(self):
distances = [
[0, 1, 2, 3],
[1, 0, 4, 5],
[2, 4, 0, 6],
[3, 5, 6, 0]
]
solver = TSPSolver(distances)
individuals = [
[0, 1, 2, 3],
[0, 3, 1, 2],
[2, 3, 0, 1]
]
correct_ordering = [[2, 3, 0, 1], [0, 1, 2, 3], [0, 3, 1, 2]]
correct_fitnesses = [1 / (6 + 3 + 1), 1 / (1 + 4 + 6), 1 / (3 + 5 + 4)]
sorted_individuals, total_fitness = solver._sort_by_fitness(individuals)
self.assertAlmostEqual(total_fitness, 1 / (6 + 3 + 1) + 1 / (1 + 4 + 6) + 1 / (3 + 5 + 4))
for k, (fitness, individual) in enumerate(sorted_individuals):
self.assertAlmostEqual(fitness, correct_fitnesses[k])
self.assertListEqual(individual, correct_ordering[k])
def test_create_initial_population(self):
NODE_COUNT = 4
distances = [[0 for _ in range(NODE_COUNT)] for _ in range(NODE_COUNT)]
POPULATION_SIZE = 50
solver = TSPSolver(distances, population_size=POPULATION_SIZE)
self.assertEqual(len(solver.population), POPULATION_SIZE)
for individual in solver.population:
self.assertEqual(len(individual), NODE_COUNT)
self.assertEqual(list(sorted(individual)), list(range(NODE_COUNT)))
def test_select_mating_pool_method_best(self):
BREEDER_COUNT = 2
NODE_COUNT = 3
distances = [[0 for _ in range(NODE_COUNT)] for _ in range(NODE_COUNT)]
fitnesses = [
(0.8, [0, 1, 2]),
(0.5, [1, 2, 0]),
(0.2, [2, 0, 1]),
(0.1, [1, 0, 2])
]
TOTAL_FITNESS = sum(couple[0] for couple in fitnesses)
solver = TSPSolver(distances, population_size=4, selection='best', breeder_count=BREEDER_COUNT)
mating_pool = solver._get_mating_pool(fitnesses, TOTAL_FITNESS)
self.assertListEqual(mating_pool, [[0, 1, 2], [1, 2, 0]])
def test_select_mating_pool_method_weighted(self):
BREEDER_COUNT = 2
NODE_COUNT = 3
distances = [[0 for _ in range(NODE_COUNT)] for _ in range(NODE_COUNT)]
fitnesses = [
(0.8, [0, 1, 2]),
(0.5, [1, 2, 0]),
(0.2, [2, 0, 1]),
(0.1, [1, 0, 2])
]
TOTAL_FITNESS = sum(couple[0] for couple in fitnesses)
solver = TSPSolver(distances, population_size=4, selection='weighted', breeder_count=BREEDER_COUNT)
mating_pool = solver._get_mating_pool(fitnesses, TOTAL_FITNESS)
self.assertEqual(len(mating_pool), BREEDER_COUNT)
for i in range(BREEDER_COUNT):
for j in range(BREEDER_COUNT):
if i != j:
self.assertNotEqual(mating_pool[i], mating_pool[j])
def test_breed(self):
BREEDER_COUNT = 2
NODE_COUNT = 10
distances = [[0 for _ in range(NODE_COUNT)] for _ in range(NODE_COUNT)]
solver = TSPSolver(distances, population_size=4, selection='weighted', breeder_count=BREEDER_COUNT)
parent1 = list(range(NODE_COUNT))
parent2 = list(reversed(range(NODE_COUNT)))
child = solver._breed(parent1, parent2)
child2 = solver._breed(parent1, parent2)
self.assertEqual(list(sorted(set(child))), list(range(NODE_COUNT)))
self.assertNotEqual(child, child2) # Will not always be true
def test_mutate_100(self):
NODE_COUNT = 50
distances = [[0 for _ in range(NODE_COUNT)] for _ in range(NODE_COUNT)]
individual = list(range(NODE_COUNT))
solver = TSPSolver(distances, mutation_rate=1.0)
mutated = solver._mutate(individual)
self.assertNotEqual(individual, list(range(NODE_COUNT)))
def test_mutate_0(self):
NODE_COUNT = 50
distances = [[0 for _ in range(NODE_COUNT)] for _ in range(NODE_COUNT)]
individual = list(range(NODE_COUNT))
solver = TSPSolver(distances, mutation_rate=0.0)
mutated = solver._mutate(individual)
self.assertEqual(individual, list(range(NODE_COUNT)))
def test_get_population_stats(self):
BREEDER_COUNT = 2
NODE_COUNT = 3
distances = [[0 for _ in range(NODE_COUNT)] for _ in range(NODE_COUNT)]
fitnesses = [
(0.8, [0, 1, 2]),
(0.5, [1, 2, 0]),
(0.2, [2, 0, 1]),
(0.1, [1, 0, 2])
]
TOTAL_FITNESS = sum(couple[0] for couple in fitnesses)
solver = TSPSolver(distances, population_size=4, selection='weighted', breeder_count=BREEDER_COUNT)
stats = solver._get_population_stats(fitnesses, TOTAL_FITNESS)
self.assertDictEqual(
stats,
{
'best_fitness': 0.8,
'best_distance': 1 / 0.8,
'best_individual': [0, 1, 2],
'worst_fitness': 0.1,
'worst_distance': 1 / 0.1,
'worst_individual': [1, 0, 2],
'average_fitness': TOTAL_FITNESS / 4,
'average_distance': 4 / TOTAL_FITNESS,
'average_individual': [1, 2, 0],
}
)
if __name__ == '__main__':
unittest.main()
|
import argparse
import os
import utils.spectronaut as post_sn
from utils.ms_prediction import *
from utils.spectronaut import SpectronautLibrary as SNLib
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description='''''')
# library
parser.add_argument('-l', '--library', metavar='Library path', type=str, required=True,
help='Library file path')
# output dir
parser.add_argument('-o', '--output', metavar='Output dir', type=str, required=True,
help='Output directory of library used for training and test, and dataset of training and test')
# Target list file
parser.add_argument('-t', '--target', metavar='The target list file for library splitting', default=None,
help='''To split a library by target protein family members.
The input is a pure text file with targeted protein accessions in one column (No title).
When this argument is defined, -r or --ratio will not be used''')
# ratio
parser.add_argument('-r', '--ratio', metavar='Split ratio', default=None,
help='''Split the library to train and test set with a ratio, such as 9:1 or 8:2''')
args = parser.parse_args()
lib_path = args.library
out_dir = args.output
target_file = args.target
ratio = args.ratio
print(f'''Set params:
library path: {lib_path}
output dir: {out_dir}
target file: {target_file}
split ratio: {ratio}
''')
if not os.path.exists(out_dir):
print('Creating output directory')
os.makedirs(out_dir)
sn = SNLib()
print('Loading library')
sn.set_library(lib_path)
if target_file:
# Test
# Library
print(f'Splitting library with target proteins in {target_file}')
sn.library_keep_target(target_file)
test_lib_path = os.path.join(out_dir, 'TestLibrary.txt')
print(f'Storing target library in {test_lib_path}')
sn.write_current_library(test_lib_path)
# pDeep
print('Extracting test precursors for intensity model')
test_prec = sn.get_prec_list()
inten_test_path = os.path.join(out_dir, 'TestInput-FragmentIntensity.txt')
print(f'Storing test input of fragment intensity in {inten_test_path}')
pdeep.pdeep_input(inten_test_path, test_prec)
# DeepRT
print('Extracting test modified peptides for RT model')
test_modpep = sn.get_modpep_list()
rt_test_path = os.path.join(out_dir, 'TestInput-RT.txt')
print(f'Storing test input of RT in {rt_test_path}')
deeprt.deeprt_input(rt_test_path, test_modpep)
# Train
# Library
sn.backtrack_library()
print(f'Splitting library with non-target proteins in {target_file}')
sn.library_remove_target(target_file)
train_lib_path = os.path.join(out_dir, 'TrainLibrary.txt')
print(f'Storing non-target library in {train_lib_path}')
sn.write_current_library(train_lib_path)
# pDeep
print('Extracting ion intensity for intensity model training')
inten_dict = sn.extract_fragment_data()
inten_train_path = os.path.join(out_dir, 'Trainset-FragmentIntensity.txt')
print(f'Storing trainset of fragment intensity in {inten_train_path}')
pdeep.pdeep_trainset(inten_train_path, inten_dict)
# DeepRT
print('Extracting RT data for RT model training')
modpep_rt_list = sn.extract_rt_data(return_type='list')
rt_train_path = os.path.join(out_dir, 'Trainset-RT.txt')
print(f'Storing trainset of RT in {rt_train_path}')
deeprt.deeprt_trainset(rt_train_path, modpep_rt_list)
else:
if not ratio:
print('Argument must contain one of -t or -r. See details with -h')
else:
ratio = [float(_) for _ in ratio.replace(' ', '').strip(':').split(':')]
sum_ratio = sum(ratio)
ratio = [_ / sum_ratio for _ in ratio]
print(f'Splitting library with ratio {ratio}')
pdeep_train_lib, pdeep_test_lib = sn.split_library(ratio, focus_col='Precursor')
deeprt_train_lib, deeprt_test_lib = sn.split_library(ratio, focus_col='ModifiedPeptide')
print(f'Storing library in {out_dir}')
pdeep_train_lib.to_csv(os.path.join(out_dir, 'TrainLibrary-pDeep.txt'), index=False, sep='\t')
pdeep_test_lib.to_csv(os.path.join(out_dir, 'TestLibrary-pDeep.txt'), index=False, sep='\t')
deeprt_train_lib.to_csv(os.path.join(out_dir, 'TrainLibrary-DeepRT.txt'), index=False, sep='\t')
deeprt_test_lib.to_csv(os.path.join(out_dir, 'TestLibrary-DeepRT.txt'), index=False, sep='\t')
print('')
# pDeep train
print('Extracting ion intensity for intensity model training')
inten_dict = post_sn.get_lib_fragment_info(pdeep_train_lib)
inten_train_path = os.path.join(out_dir, 'Trainset-pDeep.txt')
print(f'Storing trainset of fragment intensity in {inten_train_path}')
pdeep.pdeep_trainset(inten_train_path, inten_dict)
# pDeep test
print('Extracting test precursors for intensity model')
test_prec = post_sn.get_lib_prec(pdeep_test_lib)
inten_test_path = os.path.join(out_dir, 'TestInput-pDeep.txt')
print(f'Storing test input of fragment intensity in {inten_test_path}')
pdeep.pdeep_input(inten_test_path, set(test_prec))
# DeepRT train
print('Extracting RT data for RT model training')
modpep_rt_list = post_sn.get_lib_rt_info(deeprt_train_lib, return_type='list')
rt_train_path = os.path.join(out_dir, 'Trainset-DeepRT.txt')
print(f'Storing trainset of RT in {rt_train_path}')
deeprt.deeprt_trainset(rt_train_path, modpep_rt_list)
# DeepRT test
print('Extracting test modified peptides for RT model')
test_modpep = deeprt_test_lib['ModifiedPeptide'].drop_duplicates().tolist()
rt_test_path = os.path.join(out_dir, 'TestInput-DeepRT.txt')
print(f'Storing test input of RT in {rt_test_path}')
deeprt.deeprt_input(rt_test_path, test_modpep)
print('Done')
|
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
app.config["DEBUG"] = True
@app.route('/', methods=['GET'])
def index():
return "evolved5G echo web-server started"
@app.route('/monitoring/callback', methods=['POST'])
def location_reporter():
print("New notification retrieved:")
print(request.get_json())
return request.get_json()
if __name__ == '__main__':
print("initiating")
app.run(host='0.0.0.0')
|
import asyncio
import json
from concurrent.futures import ProcessPoolExecutor
def sort_in_process(data):
nums = json.loads(data.decode())
curr = 1
while curr < len(nums):
if nums[curr] >= nums[curr - 1]:
curr += 1
else:
nums[curr], nums[curr - 1] = nums[curr - 1], nums[curr]
if curr > 1:
curr -= 1
return json.dumps(nums).encode()
async def sort_request(reader, writer):
print("Received connection")
length = await reader.read(8)
data = await reader.readexactly(int.from_bytes(length, "big"))
result = await asyncio.get_event_loop().run_in_executor(
None, sort_in_process, data
)
print("Sorted list")
writer.write(result)
writer.close()
print("Connection closed")
loop = asyncio.get_event_loop()
loop.set_default_executor(ProcessPoolExecutor())
server = loop.run_until_complete(
asyncio.start_server(sort_request, "127.0.0.1", 2015)
)
print("Sort Service running")
loop.run_forever()
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
|
import sublime
import sublime_plugin
import re
SETTINGS_FILE = "JSTemplatify.sublime-settings"
def js_templatify(string):
string_quote = ''
expressions_count = 0
strings_count = 0
within_expression = False
string_template = '`'
for c, char in enumerate(string):
if string_quote == '' and char in ["'", '"', '`']:
if c == 0 or (string[c - 1] != '\\') or (c > 1 and string[c - 1] == '\\' and string[c - 2] == '\\'):
string_quote = char
strings_count += 1
continue
if string_quote and char == string_quote:
if c == 0 or (string[c - 1] != '\\') or (c > 1 and string[c - 1] == '\\' and string[c - 2] == '\\'):
string_quote = ''
continue
if string_quote:
string_template += char
elif expressions_count == 0 and strings_count == 0:
within_expression = True
expressions_count += 1
string_template += '${' + char
elif char == '+':
if within_expression:
within_expression = False
string_template += '}'
else:
within_expression = True
expressions_count += 1
string_template += '${'
elif not re.match(r'\s', char):
string_template += char
if expressions_count == 0:
return string
if within_expression:
within_expression = False
string_template += '}'
string_template += '`'
return string_template
def run_on_selections(view, edit, func):
settings = sublime.load_settings(SETTINGS_FILE)
for s in view.sel():
region = s if s else view.word(s)
text = view.substr(region)
# Preserve leading and trailing whitespace
leading = text[:len(text)-len(text.lstrip())]
trailing = text[len(text.rstrip()):]
new_text = leading + func(text.strip()) + trailing
if new_text != text:
view.replace(edit, region, new_text)
class JsTemplatify(sublime_plugin.TextCommand):
def run(self, edit):
run_on_selections(self.view, edit, js_templatify)
|
"""Combine bounding boxes and labels obtained from imagej plugin and
then overlay the bounding boxes and labels of cells on raw stack"""
import argparse
import cv2 as cv
import numpy as np
import pandas as pd
import os
import imagej.plot_rois as plot_rois
import imagej.combine_label_annotations as combine_label_annotations
def convert_to_ann_txt(csv_path):
"""
Given a processed annotation/bounding box csv file, convert it to a txt file
with only required columns (image_path,x1,y1,x2,y2,class_name)
:param str csv_path: Full path to csv file with RBC,X,Y,FOV,Width,Height,Slice,ID,Label,image_path
:return str txt_path: Full path to comma separated txt file with image_path,x1,y1,x2,y2,class_name
"""
df = pd.read_csv(csv_path)
txt_file_name = csv_path.replace(".csv", ".txt")
with open(txt_file_name, "w") as f:
f.write("image_path,x1,y1,x2,y2,class_name\n")
for index, row in df.iterrows():
f.write(
"{},{},{},{},{},{}\n".format(row.image_path, row.X, row.Y, row.X + row.Width, row.Y + row.Height, row.label))
return txt_file_name
def main():
parser = argparse.ArgumentParser(
description="Set labels for the bounding box rectangles," +
"save bounding boxes and annotations overlaid on the input image")
parser.add_argument(
"--im_dir", help="Absolute path to folder containing images", required=True, type=str)
parser.add_argument(
"--bounding_boxes_txt_file",
help="Absolute path to the bounding boxes txt file", required=True, type=str)
parser.add_argument(
"--labels_txt_file",
help="Absolute path to selected bounding boxes's labels txt file", required=True, type=str)
parser.add_argument(
"--output_dir",
help="Absolute path to folder to save the roi overlaid images to", required=True, type=str)
parser.add_argument(
"--display",
help="Display overlaid images, Default False", action='store_true')
args = parser.parse_args()
im_dir = args.im_dir
save_path = args.output_dir
os.makedirs(save_path, exist_ok=True)
display = args.display
csv_path = combine_label_annotations.combine_annotations_rois(
args.bounding_boxes_txt_file, args.labels_txt_file, im_dir)
bb_df = pd.read_csv(args.bounding_boxes_txt_file)
positions = np.unique(bb_df.FOV.values.tolist())
for pos in positions:
im_name = 'MalariaRefocused_sl3_ch1_p{}_t1.tif'.format(pos)
im_rgb = plot_rois.plot_bboxes(im_dir, csv_path, pos=pos, display=display)
cv.imwrite(os.path.join(save_path, "{}".format(im_name)), im_rgb)
txt_path = convert_to_ann_txt(csv_path)
print("Formatted annotations file stored at {}".format(txt_path))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 12 16:48:00 2017
@author: gianni
"""
from scipy import constants
import numpy as np
from pythonradex import LAMDA_file,atomic_transition
class Molecule():
"Represents an atom or molecule"
def __init__(self,levels,rad_transitions,coll_transitions,partition_function=None):
'''levels is a list of instances of the Level class
rad_transitions is a list of instances of the RadiativeTransition class
coll_transitions is a dictionary with an entry for each collision partner, where
each entry is a list of instances of the CollisionalTransition class'''
self.levels = levels
self.rad_transitions = rad_transitions #list
#dictionary with list of collisional transitions for each collision
#partner:
self.coll_transitions = coll_transitions
self.n_levels = len(self.levels)
self.n_rad_transitions = len(self.rad_transitions)
self.set_partition_function(partition_function=partition_function)
@classmethod
def from_LAMDA_datafile(cls,datafilepath,read_frequencies=False,
partition_function=None):
"""Alternative constructor using a LAMDA data file"""
data = LAMDA_file.read(datafilepath=datafilepath,
read_frequencies=read_frequencies)
return cls(levels=data['levels'],rad_transitions=data['radiative transitions'],
coll_transitions=data['collisional transitions'],
partition_function=partition_function)
def set_partition_function(self,partition_function):
if partition_function is None:
self.Z = self.Z_from_atomic_data
else:
self.Z = partition_function
def Z_from_atomic_data(self,T):
'''Computes the partition function for a given temperature T. T can
be a float or an array'''
T = np.array(T)
weights = np.array([l.g for l in self.levels])
energies = np.array([l.E for l in self.levels])
if T.ndim > 0:
shape = [self.n_levels,]+[1 for i in range(T.ndim)] #needs to come before T is modified
T = np.expand_dims(T,axis=0) #insert new axis at first position (axis=0)
weights = weights.reshape(shape)
energies = energies.reshape(shape)
return np.sum(weights*np.exp(-energies/(constants.k*T)),axis=0)
def LTE_level_pop(self,T):
'''Computes the level populations in LTE for a given temperature T.
Axis 0 of the output runs along levels, the other axes (if any)
correspond to the shape of T'''
T = np.array(T)
Z = self.Z(T)
pops = [l.LTE_level_pop(T=T,Z=Z) for l in self.levels]
if T.ndim > 0:
shape = [1,]+list(T.shape)
return np.concatenate([p.reshape(shape) for p in pops],axis=0)
else:
return np.array(pops)
def get_rad_transition_number(self,transition_name):
'''Returns the transition number for a given transition name'''
candidate_numbers = [i for i,line in enumerate(self.rad_transitions) if
line.name==transition_name]
assert len(candidate_numbers) == 1
return candidate_numbers[0]
class EmittingMolecule(Molecule):
"Represents an emitting molecule, i.e. a molecule with a specified line profile"
def __init__(self,levels,rad_transitions,coll_transitions,line_profile_cls,
width_v,partition_function=None):
'''levels is a list of instances of the Level class
rad_transitions is a list of instances of the RadiativeTransition class
coll_transitions is a dictionary with an entry for each collision partner, where
each entry is a list of instances of the CollisionalTransition class
line_profile_cls is the line profile class used to represent the line profile
width_v is the width of the line in velocity'''
Molecule.__init__(self,levels=levels,rad_transitions=rad_transitions,
coll_transitions=coll_transitions,
partition_function=partition_function)
#convert radiative transitions to emission lines (but keep the same attribute name)
self.rad_transitions = [atomic_transition.EmissionLine.from_radiative_transition(
radiative_transition=rad_trans,
line_profile_cls=line_profile_cls,width_v=width_v)
for rad_trans in self.rad_transitions]
@classmethod
def from_LAMDA_datafile(cls,datafilepath,line_profile_cls,width_v,
read_frequencies=False,partition_function=None):
"""Alternative constructor using a LAMDA data file"""
data = LAMDA_file.read(datafilepath=datafilepath,
read_frequencies=read_frequencies)
return cls(levels=data['levels'],rad_transitions=data['radiative transitions'],
coll_transitions=data['collisional transitions'],
line_profile_cls=line_profile_cls,width_v=width_v,
partition_function=partition_function)
def get_tau_nu0(self,N,level_population):
'''For a given total column density N and level population,
compute the optical depth at line center for all radiative transitions'''
tau_nu0 = []
for line in self.rad_transitions:
x1 = level_population[line.low.number]
x2 = level_population[line.up.number]
tau_nu0.append(line.tau_nu0(N1=x1*N,N2=x2*N))
return np.array(tau_nu0)
def get_Tex(self,level_population):
'''For a given level population, compute the excitation temperature
for all radiative transitions'''
Tex = []
for line in self.rad_transitions:
x1 = level_population[line.low.number]
x2 = level_population[line.up.number]
Tex.append(line.Tex(x1=x1,x2=x2))
return np.array(Tex) |
from core import bot
def cmd_start():
@bot.message_handler(commands=['start'])
def function_start(message):
bot.send_message(message.chat.id,
f"<b>Welcome {message.from_user.first_name}</b> ༼ つ ◕_◕ ༽つ\n" +
f"I'm <i>{bot.get_me().first_name}</i> and I serve as cloud server\n" +
f"assistant helper for my Genemator master!\n" + "\n" +
f"<b>For further information or help, type</b> /help\n",
parse_mode='HTML')
pass
pass
|
from typing import Optional
class CommandFailed(Exception):
def __init__(self, cmd, stdout: Optional[bytes], stderr: Optional[bytes]):
self.cmd = cmd
self.stdout = stdout
self.stderr = stderr
|
import requests
import json
response = requests.get("http://api.open-notify.org/astros.json")
print(response.status_code)
data = response.json()
print(data)
print(json.dumps(data, sort_keys=True, indent=4))
|
from HyperLogLog import HyperLogLog
def wc_naive(dataset):
st = set([])
with open(dataset, "r") as f:
for line in f:
for w in line.strip().split():
st.add(w)
return len(st)
def hloglog(dataset):
hobj = HyperLogLog(b = 64, p = 14, plot = True)
with open(dataset, "r") as f:
for line in f:
for w in line.strip().split():
hobj.add_word(w)
print(hobj.buckets)
hobj.plot_result()
return hobj.get_estimation()
def main():
dataset = "shakespeare.txt"
correct = wc_naive(dataset)
estimation = hloglog(dataset)
print("Correct: %d\nEstimated: %d\n" % (correct, estimation))
if __name__ == '__main__':
main() |
from random import randint
print('-=-' * 11)
print('Bem-vindo ao jogo da adivinhação')
print('-=-' * 11)
print('Em que número eu pensei?!')
n = int(input('Escolha um número entre 0 e 10: '))
numero_secreto = randint(0, 5)
if n == numero_secreto:
print('Parabéns você acertou')
else:
print('Que pena você errou :(')
|
"""Show some sweeps around the last 2 comments."""
import os
import sys
if not os.path.abspath('../../../') in sys.path:
sys.path.append('../../../')
import swhlab
import matplotlib.pyplot as plt
import numpy as np
import warnings
import time
import webbrowser
OUTPUT_PATH=R"X:\Data Analysis\SCOTT\SWHLab development\phasic2"
def tagInspect(abf,saveToo=False): #TODO: put in ABF class?
if len(abf.comment_tags)<2:
warnings.warn("no tags in ABF!")
return
S1,S2=abf.comment_sweeps[-2],abf.comment_sweeps[-1]
nSweeps = 20
vertOffset = 50
plt.close('all')
plt.figure(figsize=(15,10))
for i in range(nSweeps):
abf.setsweep(S1-i*2)
Y=swhlab.common.lowpass(abf.sweepY,abf.pointsPerMs*5)
Y=Y-np.nanmean(Y)+i*vertOffset
Y[:int(.5*abf.pointsPerSec)]=np.nan
plt.plot(abf.sweepX2,Y,color='b',alpha=.5)
plt.text(abf.sweepX2[.5*abf.pointsPerSec],i*vertOffset,
"%s "%str(abf.sweep),ha='right')
abf.setsweep(S2-i*2)
Y=swhlab.common.lowpass(abf.sweepY,abf.pointsPerMs*5)
Y=Y-np.nanmean(Y)+i*vertOffset
Y[:int(.5*abf.pointsPerSec)]=np.nan
plt.plot(abf.sweepX2+abf.sweepLength,Y,color='g',alpha=.5)
plt.text(abf.sweepX2[.5*abf.pointsPerSec]+abf.sweepLength,i*vertOffset,
"%s "%str(abf.sweep),ha='right')
plt.margins(0,0)
plt.axis([None,None,-100,nSweeps*vertOffset])
plt.axis('off')
plt.title("[%s] sw %d (%s) - sw %d (%s)"%(abf.ID,S1,abf.comment_tags[-2],S2,abf.comment_tags[-1]))
plt.tight_layout()
if saveToo:
plt.savefig(R"X:\Data Analysis\SCOTT\SWHLab development\phasic2\%s.png"%abf.ID)
plt.show()
print()
return
def picpage():
html="<html><body>"
for fname in [x for x in sorted(os.listdir(OUTPUT_PATH)) if x.endswith(".png") or x.endswith(".jpg")]:
html+='<a name="%s" href="#%s"><h1>%s</h1></a>'%(fname,fname,fname)
fname=os.path.abspath(os.path.join(OUTPUT_PATH,fname))
html+='<a href="%s"><img src="%s"></a>'%(fname,fname)
html+="</body></html>"
htmlFname=os.path.join(OUTPUT_PATH,"index.html")
with open(htmlFname,'w') as f:
f.write(html)
webbrowser.open(htmlFname)
if __name__=="__main__":
abfPath=R"X:\Data\2P01\2016\2016-09-01 PIR TGOT"
good=[]
for fname in sorted(os.listdir(abfPath)):
if not fname.endswith(".abf"):
continue
if fname[:5] in ["16831","16906","16907","16909"]:
fname=os.path.join(abfPath,fname)
if os.stat(fname).st_size/10**6>10: # only do files > ~10MB
abf=swhlab.ABF(fname)
if len(abf.comment_sweeps)<2:
print("SKIP:",fname)
else:
good.append(fname)
tagInspect(abf,saveToo=True)
picpage()
print("\n".join(good))
print("DONE") |
import numpy as np
import pandas as pd
import random
from model import model_fit
def data_from_model(baseline_params, startle_scaling_params, sound_scaling_params,
prepulse_conditions, startle_sounds_each_condition, noise=0.01):
data = []
for i, condition in enumerate(prepulse_conditions):
startle_scaling = startle_scaling_params[i]
sound_scaling = sound_scaling_params[i]
for startle_sound in startle_sounds_each_condition[condition]:
model_point = sigmoid(startle_sound, *baseline_params, startle_scaling, sound_scaling)
jitter_point = model_point + random.uniform(-noise * baseline_params[0], noise*baseline_params[0])
data.append([condition[0], condition[1], startle_sound, jitter_point])
return data
# Should accurately fit baseline and one prepulse condition
data = data_from_model([2, 0.2, 35], [1, 0.8], [1, 0.94], [(0, 100), (14, 100)],
{(0, 100): [0, 20, 30, 50, 60], (14,100): [0, 20, 30, 50, 60]})
model = model_fit(data)
# Should only fit the baseline condition and throw out the other condition because
# it doesn't have enough startle sound levels
data = data_from_model([2, 0.2, 35], [1, 0.8], [1, 0.94], [(0, 100), (14, 100)],
{(0, 100): [0, 20, 30, 50, 60], (14,100): [0, 20, 30]})
model = model_fit(data)
# Make sure it can handle just a baseline curve
data = data_from_model([2, 0.2, 35], [1], [1], [(0, 100)],
{(0, 100): [0, 20, 30, 50, 60]})
model = model_fit(data)
# Test situation where a baseline condition doesn't come close to covering the startle curve
# Output a warning that the fractional satuaration is low
data = data_from_model([2, 0.2, 35], [1, 0.8], [1, 0.94], [(0, 100), (14, 100)],
{(0, 100): [0, 5, 10, 20, 30], (14,100): [0, 20, 30, 40, 50]})
model = model_fit(data)
# Test situation where a prepulse condition doesn't come close to covering the startle curve
# This should not throw an error, although the situation should be avoided.
data = data_from_model([2, 0.2, 35], [1, 0.8], [1, 0.94], [(0, 100), (14, 100)],
{(0, 100): [0, 20, 30, 50, 60], (14,100): [0, 5, 10, 20]})
model = model_fit(data)
# Test not having a control startle sound for the baseline prepulse condition
data = data_from_model([4, 0.15, 35], [1, 0.8], [1, 0.94], [(0, 100), (14, 100)],
{(0, 100): [20, 30, 50, 60], (14,100): [0, 20, 30, 50, 60]})
model = model_fit(data)
# ---------------------
# Failure cases: uncomment one at a time to run
# --------------------
# # Throw an error if the data isn't an Nx4 array or list
# data = [[0, 1, 2], [0, 1, 2]]
# model = model_fit(data)
# # Throw an error if the input isn't a numpy array or list
# data = 'hello world'
# model = model_fit(data)
# # Throw an error if we see multiple baseline prepulse conditions
# data = data_from_model([4, 0.15, 35], [1, 1, 0.8], [1, 1, 0.94], [(0, 100), (0, 0), (14, 100)],
# {(0, 100): [0, 20, 30, 50, 60],
# (0, 0): [0, 20, 30, 50, 60], (14,100): [0, 20, 30, 50, 60]})
# model = model_fit(data)
# # Throw an error on the empty input
# data = []
# model = model_fit(data)
# # Should throw error that baseline condition doesn't have enough startle sound levels
# data = data_from_model([2, 0.2, 35], [1, 0.8], [1, 0.94], [(0, 100), (14, 100)],
# {(0, 100): [0, 20, 30], (14,100): [0, 20, 30, 50, 60]})
# model = model_fit(data)
# # Throw an error that data needs to include a baseline condition (prepulse sound = 0 dB above baseline)
# data = data_from_model([2, 0.2, 35], [0.9], [0.94], [(14, 100)],
# {(14, 100): [0, 20, 30, 50, 60]})
# model = model_fit(data)
# # Throw an error that the data must contain at least one control stimulus (startle sound = 0 dB above baseline)
# data = data_from_model([2, 0.2, 35], [1, 0.8], [1, 0.94], [(0, 100), (14, 100)],
# {(0, 100): [20, 30, 50, 60], (14,100): [20, 30]})
# model = model_fit(data)
|
# proxy module
from traitsui.wx.themed_slider_editor import *
|
# coding=utf-8
import random
import os
if __name__ == "__main__":
DATA_PATH='argoLog_pro_finish.txt'
DATA_PATH_SF='argoLog_pro_finish_sf.txt'
sentences=[]
for sentence in open(DATA_PATH):
sentences.append(sentence)
sentences=[s.encode('utf-8').split() for s in sentences]
# print sentences[0]
#打乱顺序
random.shuffle(sentences)
# with open(DATA_PATH_SF,'w') as f:
# for sentence in sentences:
# print sentence
# f.write(sentence)
# f.write("\n")
# f.close()
with open(DATA_PATH_SF,'w') as f:
content=''
for sentence in sentences:
for word in sentence:
content+=word
content+=' '
content+='\n'
f.write(content)
f.close()
|
#########################################################################
# Copyright (C) 2007, 2008, 2009
# Alex Clemesha <alex@clemesha.org> & Dorian Raymer <deldotdr@gmail.com>
#
# This module is part of codenode, and is distributed under the terms
# of the BSD License: http://www.opensource.org/licenses/bsd-license.php
#########################################################################
def introspect(item, format='print'):
"""Print useful information about item."""
if item == '?':
print 'Type <object>? for info on that object.'
return
_name = 'N/A'
_class = 'N/A'
_doc = 'No Documentation.'
if hasattr(item, '__name__'):
_name = item.__name__
if hasattr(item, '__class__'):
_class = item.__class__.__name__
_id = id(item)
_type = type(item)
_repr = repr(item)
if callable(item):
_callable = "Yes"
else:
_callable = "No"
if hasattr(item, '__doc__'):
_doc = getattr(item, '__doc__')
_doc = _doc.strip() # Remove leading/trailing whitespace.
info = {'name':_name, 'class':_class, 'type':_type, 'repr':_repr, 'doc':_doc}
if format is 'print':
for k,v in info.iteritems():
print k.capitalize(),': ', v
return
elif format is 'dict':
return info
|
import os
import urllib
from cosalib.cmdlib import run_verbose
from tenacity import (
retry,
stop_after_attempt
)
@retry(reraise=True, stop=stop_after_attempt(3))
def remove_azure_image(image, resource_group, auth, profile):
print(f"Azure: removing image {image}")
try:
run_verbose([
'ore', 'azure',
'--azure-auth', auth,
'--azure-profile', profile,
'delete-image',
'--image-name', image,
'--resource-group', resource_group
])
except SystemExit:
raise Exception("Failed to remove image")
@retry(reraise=True, stop=stop_after_attempt(3))
def azure_run_ore(build, args):
"""
Execute ore to upload the vhd image in blob format
See:
- https://github.com/coreos/mantle/#azure
- https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction
:param args: The command line arguments
:type args: argparse.Namespace
:param build: Build instance to use
:type build: Build
"""
azure_vhd_name = f"{build.image_name_base}.vhd"
ore_args = [
'ore',
'--log-level', args.log_level,
'azure', 'upload-blob',
'--azure-auth', args.auth,
'--azure-location', args.location,
'--azure-profile', args.profile,
'--blob-name', azure_vhd_name,
'--file', f"{build.image_path}",
'--container', args.container,
'--resource-group', args.resource_group,
'--storage-account', args.storage_account
]
if args.force:
ore_args.append('--overwrite')
run_verbose(ore_args)
url_path = urllib.parse.quote((
f"{args.storage_account}.blob.core.windows.net/"
f"{args.container}/{azure_vhd_name}"
))
build.meta['azure'] = {
'image': azure_vhd_name,
'url': f"https://{url_path}",
}
build.meta_write() # update build metadata
@retry(reraise=True, stop=stop_after_attempt(3))
def azure_run_ore_replicate(*args):
print("""
Azure currently does not produce virtual machine
registrations. This command is a place-holder only.
""")
def azure_cli(parser):
"""
Common Azure CLI
"""
parser.add_argument(
'--auth',
help='Path to Azure auth file',
default=os.environ.get("AZURE_AUTH"))
parser.add_argument(
'--container',
help='Storage location to write to',
default=os.environ.get("AZURE_CONTAINER")
)
parser.add_argument(
'--location',
help='Azure location (default westus)',
default=os.environ.get("AZURE_LOCATION", "westus")
)
parser.add_argument(
'--profile',
help='Path to Azure profile',
default=os.environ.get('AZURE_PROFILE')
)
parser.add_argument(
'--resource-group',
help='Resource group',
default=os.environ.get('AZURE_RESOURCE_GROUP')
)
parser.add_argument(
'--storage-account',
help='Storage account',
default=os.environ.get('AZURE_STORAGE_ACCOUNT')
)
return parser
|
#!/usr/bin/env python
import numpy as np
import time
if "flush" in dir(np):
np.flush()
begin = time.time()
#a = np.sum(((np.ones(100)+1.0)*2.0)/2.0)
a = np.sum(np.random.random(50000000))
#a = np.multiply.accumulate(np.ones((8,8), dtype=np.float32))
print(a)
if "flush" in dir(np):
np.flush()
end = time.time() - begin
print(end)
|
#!/usr/bin/python3
import timeit
from jk_tokenizingparsing import *
from jk_tokenizingparsing.tokenmatching import *
tokens1 = [
Token("w", "someVar", None, None, None, None, None),
Token("d", "=", None, None, None, None, None),
Token("w", "a", None, None, None, None, None),
Token("d", "+", None, None, None, None, None),
Token("w", "b", None, None, None, None, None),
Token("eos", "", None, None, None, None, None),
]
ts1 = TokenStream(tokens1)
tokens2 = [
Token("w", "someVar", None, None, None, None, None),
Token("d", "=", None, None, None, None, None),
Token("w", "b", None, None, None, None, None),
Token("d", "+", None, None, None, None, None),
Token("w", "a", None, None, None, None, None),
Token("eos", "", None, None, None, None, None),
]
ts2 = TokenStream(tokens2)
tokens3 = [
Token("w", "someVar", None, None, None, None, None),
Token("d", "=", None, None, None, None, None),
Token("d", "+", None, None, None, None, None),
Token("w", "b", None, None, None, None, None),
Token("w", "a", None, None, None, None, None),
Token("eos", "", None, None, None, None, None),
]
ts3 = TokenStream(tokens3)
x = TPSeq(
TP("w", "someVar", emitName="varName"),
TP("d", "="),
TPUnordSeq(
TP("w", "a", emitName="value"),
TP("w", "b", emitName="value"),
TP("d", "+")
),
)
m = x.match(ts1)
print(m)
print(m.values())
print()
m = x.match(ts2)
print(m)
print(m.values())
print()
m = x.match(ts3)
print(m)
print(m.values())
print()
|
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_array
import numpy as np
from scipy import sparse
from varsvm import CD
class weightsvm(BaseEstimator, ClassifierMixin):
"""
the function use coordinate descent to update the drift linear SVM
C \sum_{i=1}^n w_i V(y_i(\beta^T x_i + drift_i)) + 1/2 \beta^T \beta
"""
def __init__(self, C=1., max_iter=1000, print_step=1, eps=1e-4, loss='hinge'):
self.loss = loss
self.alpha = []
self.beta = []
self.C = C
self.max_iter = max_iter
self.eps = eps
self.print_step = print_step
def get_params(self, deep=True):
return {"C": self.C, "loss": self.loss, 'print_step': self.print_step}
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def fit(self, X, y, sample_weight=1.):
X, y = check_X_y(X, y)
n, d = X.shape
self.alpha = np.zeros(n)
diff = 1.
sample_weight = self.C*np.array(sample_weight)
sample_weight = sample_weight * np.ones(n)
## compute Xy matrix
if sparse.issparse(X):
Xy = sparse.csr_matrix(X.multiply(y.reshape(-1, 1)))
else:
Xy = X * y[:, np.newaxis]
## compute diag vector
if sparse.issparse(X):
diag = np.array([Xy[i].dot(Xy[i].T).toarray()[0][0] for i in range(n)])
else:
diag = np.array([Xy[i].dot(Xy[i]) for i in range(n)])
self.beta = np.dot(self.alpha, Xy)
# coordinate descent
alpha_C, beta_C = CD(Xy, diag, self.alpha, self.beta, sample_weight, self.max_iter, self.eps, self.print_step)
self.alpha, self.beta = np.array(alpha_C), np.array(beta_C)
# for ite in range(self.max_iter):
# if diff < self.eps:
# break
# beta_old = np.copy(self.beta)
# for i in range(n):
# if diag[i] != 0:
# delta_tmp = (1. - drift[i] - np.dot(self.beta, Xy[i])) / diag[i]
# delta_tmp = max(-self.alpha[i], min(sample_weight[i] - self.alpha[i], delta_tmp))
# if diag[i] == 0:
# if np.dot(self.beta, Xy[i]) < 1 - drift[i]:
# delta_tmp = sample_weight[i] - self.alpha[i]
# else:
# delta_tmp = -self.alpha[i]
# self.alpha[i] = self.alpha[i] + delta_tmp
# self.beta = self.beta + delta_tmp*Xy[i]
# obj = self.dual_obj(Xy=Xy, drift=drift)
# diff = np.sum(np.abs(beta_old - self.beta))/np.sum(np.abs(beta_old+1e-10))
# if self.print_step:
# if ite > 0:
# print("ite %s coordinate descent with diff: %.3f; obj: %.3f" %(ite, diff, obj))
def dual_obj(self, Xy):
## compute the dual objective function
sum_tmp = np.dot(self.alpha, Xy)
return np.dot(1., self.alpha) - .5 * np.dot(sum_tmp, sum_tmp)
def decision_function(self, X):
return np.dot(X, self.beta)
def predict(self, X):
X = check_array(X)
return np.sign(self.decision_function(X))
|
import plotly
import pandas as pd
import numpy as np
import plotly.express as px
from pytest import approx
import pytest
import random
def test_facets():
df = px.data.tips()
fig = px.scatter(df, x="total_bill", y="tip")
assert "xaxis2" not in fig.layout
assert "yaxis2" not in fig.layout
assert fig.layout.xaxis.domain == (0.0, 1.0)
assert fig.layout.yaxis.domain == (0.0, 1.0)
fig = px.scatter(df, x="total_bill", y="tip", facet_row="sex", facet_col="smoker")
assert fig.layout.xaxis4.domain[0] - fig.layout.xaxis.domain[1] == approx(0.02)
assert fig.layout.yaxis4.domain[0] - fig.layout.yaxis.domain[1] == approx(0.03)
fig = px.scatter(df, x="total_bill", y="tip", facet_col="day", facet_col_wrap=2)
assert fig.layout.xaxis4.domain[0] - fig.layout.xaxis.domain[1] == approx(0.02)
assert fig.layout.yaxis4.domain[0] - fig.layout.yaxis.domain[1] == approx(0.07)
fig = px.scatter(
df,
x="total_bill",
y="tip",
facet_row="sex",
facet_col="smoker",
facet_col_spacing=0.09,
facet_row_spacing=0.08,
)
assert fig.layout.xaxis4.domain[0] - fig.layout.xaxis.domain[1] == approx(0.09)
assert fig.layout.yaxis4.domain[0] - fig.layout.yaxis.domain[1] == approx(0.08)
fig = px.scatter(
df,
x="total_bill",
y="tip",
facet_col="day",
facet_col_wrap=2,
facet_col_spacing=0.09,
facet_row_spacing=0.08,
)
assert fig.layout.xaxis4.domain[0] - fig.layout.xaxis.domain[1] == approx(0.09)
assert fig.layout.yaxis4.domain[0] - fig.layout.yaxis.domain[1] == approx(0.08)
def test_facets_with_marginals():
df = px.data.tips()
fig = px.histogram(df, x="total_bill", facet_col="sex", marginal="rug")
assert len(fig.data) == 4
fig = px.histogram(df, x="total_bill", facet_row="sex", marginal="rug")
assert len(fig.data) == 2
fig = px.histogram(df, y="total_bill", facet_col="sex", marginal="rug")
assert len(fig.data) == 2
fig = px.histogram(df, y="total_bill", facet_row="sex", marginal="rug")
assert len(fig.data) == 4
fig = px.scatter(df, x="total_bill", y="tip", facet_col="sex", marginal_x="rug")
assert len(fig.data) == 4
fig = px.scatter(
df, x="total_bill", y="tip", facet_col="day", facet_col_wrap=2, marginal_x="rug"
)
assert len(fig.data) == 8 # ignore the wrap when marginal is used
fig = px.scatter(df, x="total_bill", y="tip", facet_col="sex", marginal_y="rug")
assert len(fig.data) == 2 # ignore the marginal in the facet direction
fig = px.scatter(df, x="total_bill", y="tip", facet_row="sex", marginal_x="rug")
assert len(fig.data) == 2 # ignore the marginal in the facet direction
fig = px.scatter(df, x="total_bill", y="tip", facet_row="sex", marginal_y="rug")
assert len(fig.data) == 4
fig = px.scatter(
df, x="total_bill", y="tip", facet_row="sex", marginal_y="rug", marginal_x="rug"
)
assert len(fig.data) == 4 # ignore the marginal in the facet direction
fig = px.scatter(
df, x="total_bill", y="tip", facet_col="sex", marginal_y="rug", marginal_x="rug"
)
assert len(fig.data) == 4 # ignore the marginal in the facet direction
fig = px.scatter(
df,
x="total_bill",
y="tip",
facet_row="sex",
facet_col="sex",
marginal_y="rug",
marginal_x="rug",
)
assert len(fig.data) == 2 # ignore all marginals
@pytest.fixture
def bad_facet_spacing_df():
NROWS = 101
NDATA = 1000
categories = [n % NROWS for n in range(NDATA)]
df = pd.DataFrame(
{
"x": [random.random() for _ in range(NDATA)],
"y": [random.random() for _ in range(NDATA)],
"category": categories,
}
)
return df
def test_bad_facet_spacing_eror(bad_facet_spacing_df):
df = bad_facet_spacing_df
with pytest.raises(
ValueError, match="Use the facet_row_spacing argument to adjust this spacing\."
):
fig = px.scatter(
df, x="x", y="y", facet_row="category", facet_row_spacing=0.01001
)
with pytest.raises(
ValueError, match="Use the facet_col_spacing argument to adjust this spacing\."
):
fig = px.scatter(
df, x="x", y="y", facet_col="category", facet_col_spacing=0.01001
)
# Check error is not raised when the spacing is OK
try:
fig = px.scatter(df, x="x", y="y", facet_row="category", facet_row_spacing=0.01)
except ValueError:
# Error shouldn't be raised, so fail if it is
assert False
try:
fig = px.scatter(df, x="x", y="y", facet_col="category", facet_col_spacing=0.01)
except ValueError:
# Error shouldn't be raised, so fail if it is
assert False
def test_mismatched_facet_weights():
dates = [pd.to_datetime("2010-1-1") + pd.DateOffset(days=i * 10) for i in range(300)]
y1 = pd.Series(np.random.normal(0.25, 1.0, 300)).cumsum()
y2 = pd.Series(np.random.normal(0.1, 1.0, 300)).cumsum()
y3 = pd.Series(np.random.normal(0.1, 1.0, 300)).cumsum()
df = pd.concat([
pd.DataFrame({'date': dates, 'value': y1, 'what': 'v1', 'pane': 'price'}),
pd.DataFrame({'date': dates, 'value': y2, 'what': 'v2', 'pane': 'price'}),
pd.DataFrame({'date': dates, 'value': y3, 'what': 'v3', 'pane': 'metrics'})
])
try:
fig = px.line(df, x='date', y='value', color='what', facet_row='pane', facet_row_weights=[2, 1])
except ValueError:
# Error shouldn't be raised, so fail if it is
assert False
try:
fig = px.line(df, x='date', y='value', color='what', facet_col='pane', facet_col_weights=[2, 1])
except ValueError:
# Error shouldn't be raised, so fail if it is
assert False
with pytest.raises(ValueError, match="mismatched facet_row_weights and # of facet rows"):
fig = px.line(df, x='date', y='value', color='what', facet_row='pane', facet_row_weights=[2, 1, 1])
with pytest.raises(ValueError, match="mismatched facet_col_weights and # of facet columns"):
fig = px.line(df, x='date', y='value', color='what', facet_col='pane', facet_col_weights=[2, 1, 1])
|
'''
Plots a histogram showing the durations of the sound sampled.
This script was written to verify that the vast majority of sound samples
was exactly 4s of length (>95%).
@date 2017-05-19
'''
import wave
import contextlib
import sys
import os
from os import listdir
from os.path import isfile, isdir, join
import matplotlib.pyplot as plt
# if sys input is given to specify directory, take it
try:
mypath = sys.argv[1]
# else use this
except IndexError:
mypath = "../../TrainingData/UrbanSound8K/audio/"
# Get list of all directories/classes
dirs = [f for f in listdir(mypath) if isdir(join(mypath, f))]
durations = []
for dir_name in dirs: # classes
# adjust path variable
path = mypath + dir_name + "/processed/"
# Get list of all files in directory
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
for fname in onlyfiles: # for files in resp. class
if fname.split(".")[-1] == "wav":
f = wave.open(path+fname,"r")
frames = f.getnframes()
rate = f.getframerate()
duration = frames / float(rate)
durations.append(duration)
print("Longest duration: " + str(max(durations)))
print("Shortest duration: " + str(min(durations)))
print("How often 4 seconds: " + str(durations.count(4.0)))
plt.hist(durations,bins=8)
plt.xlabel("duration in seconds")
plt.ylabel("number of occurences")
plt.show()
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
fields = {"Cost Center": "project", "Project": "cost_center"}
for budget_against, field in fields.items():
frappe.db.sql(""" update `tabBudget` set {field} = null
where budget_against = %s """.format(field = field), budget_against)
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import json
import os
import pkg_resources
import shutil
from urllib.parse import quote
import scipy
import numpy as np
import pandas as pd
import qiime2
from statsmodels.sandbox.stats.multicomp import multipletests
import q2templates
TEMPLATES = pkg_resources.resource_filename('q2_diversity', '_alpha')
def alpha_group_significance(output_dir: str, alpha_diversity: pd.Series,
metadata: qiime2.Metadata) -> None:
metadata_df = metadata.to_dataframe()
metadata_df = metadata_df.apply(pd.to_numeric, errors='ignore')
pre_filtered_cols = set(metadata_df.columns)
metadata_df = metadata_df.select_dtypes(exclude=[np.number])
post_filtered_cols = set(metadata_df.columns)
filtered_numeric_categories = pre_filtered_cols - post_filtered_cols
filtered_group_comparisons = []
categories = metadata_df.columns
metric_name = alpha_diversity.name
if len(categories) == 0:
raise ValueError('Only numeric data is present in metadata file.')
filenames = []
filtered_categories = []
for category in categories:
metadata_category = metadata.get_category(category).to_series()
metadata_category = metadata_category[alpha_diversity.index]
metadata_category = metadata_category.replace(r'', np.nan).dropna()
initial_data_length = alpha_diversity.shape[0]
data = pd.concat([alpha_diversity, metadata_category], axis=1,
join='inner')
filtered_data_length = data.shape[0]
names = []
groups = []
for name, group in data.groupby(metadata_category.name):
names.append('%s (n=%d)' % (name, len(group)))
groups.append(list(group[alpha_diversity.name]))
if (len(groups) > 1 and len(groups) != len(data.index)):
escaped_category = quote(category)
filename = 'category-%s.jsonp' % escaped_category
filenames.append(filename)
# perform Kruskal-Wallis across all groups
kw_H_all, kw_p_all = scipy.stats.mstats.kruskalwallis(*groups)
# perform pairwise Kruskal-Wallis across all pairs of groups and
# correct for multiple comparisons
kw_H_pairwise = []
for i in range(len(names)):
for j in range(i):
try:
H, p = scipy.stats.mstats.kruskalwallis(groups[i],
groups[j])
kw_H_pairwise.append([names[j], names[i], H, p])
except ValueError:
filtered_group_comparisons.append(
['%s:%s' % (category, names[i]),
'%s:%s' % (category, names[j])])
kw_H_pairwise = pd.DataFrame(
kw_H_pairwise, columns=['Group 1', 'Group 2', 'H', 'p-value'])
kw_H_pairwise.set_index(['Group 1', 'Group 2'], inplace=True)
kw_H_pairwise['q-value'] = multipletests(
kw_H_pairwise['p-value'], method='fdr_bh')[1]
kw_H_pairwise.sort_index(inplace=True)
pairwise_fn = 'kruskal-wallis-pairwise-%s.csv' % escaped_category
pairwise_path = os.path.join(output_dir, pairwise_fn)
kw_H_pairwise.to_csv(pairwise_path)
with open(os.path.join(output_dir, filename), 'w') as fh:
df = pd.Series(groups, index=names)
fh.write("load_data('%s'," % category)
df.to_json(fh, orient='split')
fh.write(",")
json.dump({'initial': initial_data_length,
'filtered': filtered_data_length}, fh)
fh.write(",")
json.dump({'H': kw_H_all, 'p': kw_p_all}, fh)
fh.write(",'")
table = kw_H_pairwise.to_html(classes="table table-striped "
"table-hover")
table = table.replace('border="1"', 'border="0"')
fh.write(table.replace('\n', ''))
fh.write("','%s', '%s');" % (quote(pairwise_fn), metric_name))
else:
filtered_categories.append(category)
index = os.path.join(
TEMPLATES, 'alpha_group_significance_assets', 'index.html')
q2templates.render(index, output_dir, context={
'categories': [quote(fn) for fn in filenames],
'filtered_numeric_categories': ', '.join(filtered_numeric_categories),
'filtered_categories': ', '.join(filtered_categories),
'filtered_group_comparisons':
'; '.join([' vs '.join(e) for e in filtered_group_comparisons])})
shutil.copytree(
os.path.join(TEMPLATES, 'alpha_group_significance_assets', 'dst'),
os.path.join(output_dir, 'dist'))
_alpha_correlation_fns = {'spearman': scipy.stats.spearmanr,
'pearson': scipy.stats.pearsonr}
def alpha_correlation(output_dir: str,
alpha_diversity: pd.Series,
metadata: qiime2.Metadata,
method: str='spearman') -> None:
try:
alpha_correlation_fn = _alpha_correlation_fns[method]
except KeyError:
raise ValueError('Unknown alpha correlation method %s. The available '
'options are %s.' %
(method, ', '.join(_alpha_correlation_fns.keys())))
metadata_df = metadata.to_dataframe()
metadata_df = metadata_df.apply(pd.to_numeric, errors='ignore')
pre_filtered_cols = set(metadata_df.columns)
metadata_df = metadata_df.select_dtypes(include=[np.number])
post_filtered_cols = set(metadata_df.columns)
filtered_categories = pre_filtered_cols - post_filtered_cols
categories = metadata_df.columns
if len(categories) == 0:
raise ValueError('Only non-numeric data is present in metadata file.')
filenames = []
for category in categories:
metadata_category = metadata_df[category]
metadata_category = metadata_category[alpha_diversity.index]
metadata_category = metadata_category.dropna()
# create a dataframe containing the data to be correlated, and drop
# any samples that have no data in either column
df = pd.concat([metadata_category, alpha_diversity], axis=1,
join='inner')
# compute correlation
correlation_result = alpha_correlation_fn(df[metadata_category.name],
df[alpha_diversity.name])
warning = None
if alpha_diversity.shape[0] != df.shape[0]:
warning = {'initial': alpha_diversity.shape[0],
'method': method.title(),
'filtered': df.shape[0]}
escaped_category = quote(category)
filename = 'category-%s.jsonp' % escaped_category
filenames.append(filename)
with open(os.path.join(output_dir, filename), 'w') as fh:
fh.write("load_data('%s'," % category)
df.to_json(fh, orient='split')
fh.write(",")
json.dump(warning, fh)
fh.write(",")
json.dump({
'method': method.title(),
'testStat': '%1.4f' % correlation_result[0],
'pVal': '%1.4f' % correlation_result[1],
'sampleSize': df.shape[0]}, fh)
fh.write(");")
index = os.path.join(TEMPLATES, 'alpha_correlation_assets', 'index.html')
q2templates.render(index, output_dir, context={
'categories': [quote(fn) for fn in filenames],
'filtered_categories': ', '.join(filtered_categories)})
shutil.copytree(os.path.join(TEMPLATES, 'alpha_correlation_assets', 'dst'),
os.path.join(output_dir, 'dist'))
|
import numpy as np
import time
import torch
from torch import nn, optim, distributions
import torch.backends.cudnn as cudnn
cudnn.benchmark = False
from batchgenerators.dataloading import MultiThreadedAugmenter
from trixi.util import Config, ResultLogDict
from trixi.experiment import PytorchExperiment
from model import GenerativeQueryNetwork
from util import get_default_experiment_parser, set_seeds, run_experiment
from data import loader
DESCRIPTION = """This experiment just tries to reproduce GQN results,
specifically for the Shepard-Metzler-5 dataset."""
def make_defaults():
DEFAULTS = Config(
# Base
name="gqn",
description=DESCRIPTION,
n_epochs=1000000,
batch_size=36,
batch_size_val=36,
seed=1,
device="cuda",
# Data
split_val=3, # index for set of 5
split_test=4, # index for set of 5
data_module=loader,
dataset="shepard_metzler_5_parts",
data_dir=None, # will be set for data_module if not None
debug=0, # 1 for single repeating batch, 2 for single viewpoint (i.e. reconstruct known images)
generator_train=loader.RandomBatchGenerator,
generator_val=loader.LinearBatchGenerator,
num_viewpoints_val=8, # use this many viewpoints in validation
shuffle_viewpoints_val=False,
augmenter=MultiThreadedAugmenter,
augmenter_kwargs={"num_processes": 8},
# Model
model=GenerativeQueryNetwork,
model_kwargs={
"in_channels": 3,
"query_channels": 7,
"r_channels": 256,
"encoder_kwargs": {
"activation_op": nn.ReLU
},
"decoder_kwargs": {
"z_channels": 64,
"h_channels": 128,
"scale": 4,
"core_repeat": 12
}
},
model_init_weights_args=None, # e.g. [nn.init.kaiming_normal_, 1e-2],
model_init_bias_args=None, # e.g. [nn.init.constant_, 0],
# Learning
optimizer=optim.Adam,
optimizer_kwargs={"weight_decay": 1e-5},
lr_initial=5e-4,
lr_final=5e-5,
lr_cutoff=16e4, # lr is increased linearly in cutoff epochs
sigma_initial=2.0,
sigma_final=0.7,
sigma_cutoff=2e4, # sigma is increased linearly in cutoff epochs
kl_weight_initial=0.05,
kl_weight_final=1.0,
kl_weight_cutoff=1e5, # kl_weight is increased linearly in cutoff epochs
nll_weight=1.0,
# Logging
backup_every=10000,
validate_every=1000,
validate_subset=0.01, # validate only this percentage randomly
show_every=100,
val_example_samples=10, # draw this many random samples for last validation item
test_on_val=True, # test on the validation set
)
SHAREDCORES = Config(
model_kwargs={"decoder_kwargs": {"core_shared": True}}
)
MODS = {
"SHAREDCORES": SHAREDCORES
}
return {"DEFAULTS": DEFAULTS}, MODS
class GQNExperiment(PytorchExperiment):
def setup(self):
set_seeds(self.config.seed, "cuda" in self.config.device)
self.setup_data()
self.setup_model()
self.config.epoch_str_template = "{:0" + str(len(str(self.config.n_epochs))) + "d}"
self.clog.show_text(self.model.__repr__(), "Model")
def setup_data(self):
c = self.config
if c.data_dir is not None:
c.data_module.data_dir = c.data_dir
# set actual data
self.data_train_val = c.data_module.load(c.dataset, "train", image_kwargs={"mmap_mode": "r"})
self.data_test = c.data_module.load(c.dataset, "test", image_kwargs={"mmap_mode": "r"})
# train, val, test split
indices_split = c.data_module.split(c.dataset)
indices_val = indices_split[c.split_val]
indices_test = indices_split[c.split_test]
indices_train = []
for i in range(5):
if i not in (c.split_val, c.split_test):
indices_train += indices_split[i]
indices_train = sorted(indices_train)
# for debugging we only use a single batch and validate on training data
if c.debug > 0:
indices_train = indices_train[:c.batch_size]
indices_val = indices_train
indices_test = indices_test[:c.batch_size_val]
# construct generators
self.generator_train = c.generator_train(
self.data_train_val,
c.batch_size,
data_order=indices_train,
num_viewpoints=1 if c.debug == 2 else "random",
shuffle_viewpoints=not c.debug,
number_of_threads_in_multithreaded=c.augmenter_kwargs.num_processes)
self.generator_val = c.generator_val(
self.data_train_val,
c.batch_size_val,
data_order=indices_val,
num_viewpoints=1 if c.debug == 2 else c.num_viewpoints_val,
shuffle_viewpoints=c.shuffle_viewpoints_val,
number_of_threads_in_multithreaded=c.augmenter_kwargs.num_processes)
self.generator_test = c.generator_val(
self.data_test,
c.batch_size_val,
data_order=indices_test,
num_viewpoints=1 if c.debug == 2 else c.num_viewpoints_val,
number_of_threads_in_multithreaded=c.augmenter_kwargs.num_processes)
# construct augmenters (no actual augmentation at the moment, just multithreading)
self.augmenter_train = c.augmenter(self.generator_train, None, **c.augmenter_kwargs)
self.augmenter_val = c.augmenter(self.generator_val, None, **c.augmenter_kwargs)
self.augmenter_test = c.augmenter(self.generator_test, None, **c.augmenter_kwargs)
def setup_model(self):
c = self.config
# intialize model and weights
self.model = c.model(**c.model_kwargs)
if c.model_init_weights_args is not None and hasattr(self.model, "init_weights"):
self.model.init_weights(*c.model_init_weights_args)
if c.model_init_bias_args is not None and hasattr(self.model, "init_bias"):
import IPython
IPython.embed()
self.model.init_bias(*c.model_init_bias_args)
# optimization
self.optimizer = c.optimizer(self.model.parameters(), lr=c.lr_initial, **c.optimizer_kwargs)
self.lr = c.lr_initial
self.sigma = c.sigma_initial
def _setup_internal(self):
super(GQNExperiment, self)._setup_internal()
self.elog.save_config(self.config, "config") # default PytorchExperiment only saves self._config_raw
# we want a results dictionary with running mean, so close default and construct new
self.results.close()
self.results = ResultLogDict("results-log.json", base_dir=self.elog.result_dir, mode="w", running_mean_length=self.config.show_every)
def prepare(self):
# move everything to selected device
for name, model in self.get_pytorch_modules().items():
model.to(self.config.device)
def train(self, epoch):
c = self.config
t0 = time.time()
# set learning rates, sigmas, loss weights
self.train_prepare(epoch)
# get data
data = next(self.augmenter_train)
data["data"] = torch.from_numpy(data["data"]).to(dtype=torch.float32, device=c.device)
data["viewpoints"] = torch.from_numpy(data["viewpoints"]).to(dtype=torch.float32, device=c.device)
# forward
image_pred, image_query, representation, kl = self.model(data["data"], data["viewpoints"], data["num_viewpoints"])
loss_elbo, loss_nll, loss_kl = self.criterion(image_pred, image_query, kl)
# backward
loss_elbo.backward()
self.optimizer.step()
self.optimizer.zero_grad()
training_time = time.time() - t0
# use data dictionary as training summary
data["data"] = data["data"].cpu()
data["viewpoints"] = data["viewpoints"].cpu()
data["image_query"] = image_query.cpu() # also in "data" but we're lazy
data["image_pred"] = image_pred.cpu()
data["loss_elbo"] = loss_elbo.item()
data["loss_nll"] = loss_nll.item()
data["loss_kl"] = loss_kl.item()
data["training_time"] = training_time
self.train_log(data, epoch)
def train_prepare(self, epoch):
c = self.config
# sets parameters as is done in the paper, additionally start with lower KL weight
self.lr = max(c.lr_final + (c.lr_initial - c.lr_final) * (1 - epoch / c.lr_cutoff), c.lr_final)
self.sigma = max(c.sigma_final + (c.sigma_initial - c.sigma_final) * (1 - epoch / c.sigma_cutoff), c.sigma_final)
_lr = self.lr * np.sqrt(1 - 0.999**(epoch+1)) / (1 - 0.9**(epoch+1))
for group in self.optimizer.param_groups:
group["lr"] = _lr
self.nll_weight = c.nll_weight
self.kl_weight = min(c.kl_weight_final, c.kl_weight_initial + (c.kl_weight_final - c.kl_weight_initial) * epoch / c.kl_weight_cutoff)
self.model.train()
self.optimizer.zero_grad()
def criterion(self, image_predicted, image_query, kl, batch_mean=True):
# mean over batch but sum over individual
nll = -distributions.Normal(image_predicted, self.sigma).log_prob(image_query)
# nll = nn.MSELoss(reduction="none")(image_predicted, image_query)
nll = nll.view(nll.shape[0], -1)
kl = kl.view(kl.shape[0], -1)
if batch_mean:
nll = nll.mean(0)
kl = kl.mean(0)
nll = nll.sum(-1)
kl = kl.sum(-1)
elbo = self.nll_weight * nll + self.kl_weight * kl
return elbo, nll, kl
def train_log(self, summary, epoch):
_backup = (epoch + 1) % self.config.backup_every == 0
_show = (epoch + 1) % self.config.show_every == 0
self.elog.show_text("{}/{}: {}".format(epoch, self.config.n_epochs, summary["training_time"]), name="Training Time")
# add_result will show graphs and log to json file at the same time
self.add_result(summary["loss_elbo"], "loss_elbo", epoch, "Loss", plot_result=_show, plot_running_mean=True)
self.add_result(summary["loss_nll"], "loss_nll", epoch, "Loss", plot_result=_show, plot_running_mean=True)
self.add_result(summary["loss_kl"], "loss_kl", epoch, "Loss", plot_result=_show, plot_running_mean=True)
self.make_images(summary["image_query"],
"reference",
epoch,
save=_backup,
show=_show)
self.make_images(summary["image_pred"],
"reconstruction",
epoch,
save=_backup,
show=_show)
def validate(self, epoch):
c = self.config
if (epoch+1) % c.validate_every == 0:
with torch.no_grad():
t0 = time.time()
self.model.eval()
validation_scores = []
info = {} # holds info on score array axes
info["dims"] = ["Object Index", "Loss"]
info["coords"] = {"Object Index": [], "Loss": ["NLL", "KL", "ELBO"]}
example_output_shown = False
for d, data in enumerate(self.augmenter_val):
# this ensures we always validate at least one item even for very small subset ratios
if c.validate_subset not in (False, None, 1.) and c.debug == 0:
rand_number = np.random.rand()
if rand_number < 1 - c.validate_subset:
if not (d * c.batch_size_val >= len(self.generator_val) - 1 and len(validation_scores) == 0):
continue
# get data
data["data"] = torch.from_numpy(data["data"]).to(dtype=torch.float32, device=c.device)
data["viewpoints"] = torch.from_numpy(data["viewpoints"]).to(dtype=torch.float32, device=c.device)
# forward
image_pred, image_query, representation, kl = self.model(data["data"], data["viewpoints"], data["num_viewpoints"])
loss_elbo, loss_nll, loss_kl = self.criterion(image_pred, image_query, kl, batch_mean=False)
# use data dict as summary dict
data["data"] = data["data"].cpu()
data["viewpoints"] = data["viewpoints"].cpu()
data["image_query"] = image_query.cpu()
data["image_pred"] = image_pred.cpu()
data["loss_elbo"] = loss_elbo.cpu()
data["loss_nll"] = loss_nll.cpu()
data["loss_kl"] = loss_kl.cpu()
current_scores = np.array([data["loss_nll"].cpu().numpy(),
data["loss_kl"].cpu().numpy(),
data["loss_elbo"].cpu().numpy()]).T
validation_scores.append(current_scores)
info["coords"]["Object Index"].append(data["data_indices"])
self.make_images(data["image_query"],
"val/{}_reference".format(d),
epoch,
save=True,
show=False)
self.make_images(data["image_pred"],
"val/{}_prediction".format(d),
epoch,
save=True,
show=False)
self.make_images(data["data"],
"val/{}_seen".format(d),
epoch,
save=True,
show=False,
images_per_row=data["data"].shape[0] // c.batch_size_val)
# only show one validation item
if not example_output_shown:
self.make_images(data["image_query"],
"val_reference",
epoch,
save=False,
show=True)
self.make_images(data["image_pred"],
"val_prediction",
epoch,
save=False,
show=True)
self.make_images(data["data"],
"val_seen",
epoch,
save=False,
show=True,
images_per_row=data["data"].shape[0] // c.batch_size_val)
example_output_shown = True
validation_time = time.time() - t0
validation_scores = np.concatenate(validation_scores, 0)
info["coords"]["Object Index"] = np.concatenate(info["coords"]["Object Index"], 0)
# there can be duplicates in the last batch
for i in range(c.batch_size_val):
if info["coords"]["Object Index"][-(i+1)] not in info["coords"]["Object Index"][:-(i+1)]:
break
if i > 0:
validation_scores = validation_scores[:-i]
info["coords"]["Object Index"] = info["coords"]["Object Index"][:-i]
summary = {}
summary["validation_time"] = validation_time
summary["validation_scores"] = validation_scores
summary["validation_info"] = info
self.validate_log(summary, epoch)
# draw a few different samples for the last data item
# item could have been skipped, so we might need to transfer again
if c.val_example_samples > 0:
if isinstance(data["data"], np.ndarray):
data["data"] = torch.from_numpy(data["data"]).to(dtype=torch.float32, device=c.device)
data["viewpoints"] = torch.from_numpy(data["viewpoints"]).to(dtype=torch.float32, device=c.device)
images_context, viewpoints_context, _, viewpoint_query =\
self.model.split_batch(data["data"],
data["viewpoints"],
data["num_viewpoints"])
images_context = images_context.to(device=c.device)
viewpoints_context = viewpoints_context.to(device=c.device)
viewpoint_query = viewpoint_query.to(device=c.device)
samples = []
for i in range(c.val_example_samples):
samples.append(self.model.sample(images_context, viewpoints_context, viewpoint_query, data["num_viewpoints"] - 1, self.sigma).cpu())
samples = torch.cat(samples, 0)
# samples should now be (batch * samples, 3, 64, 64)
self.make_images(samples, "samples", epoch, save=True, show=True, images_per_row=c.batch_size_val)
def validate_log(self, summary, epoch):
epoch_str = self.config.epoch_str_template.format(epoch)
validation_scores_mean = np.nanmean(summary["validation_scores"], 0)
self.elog.save_numpy_data(summary["validation_scores"], "validation/{}.npy".format(epoch_str))
self.elog.save_dict(summary["validation_info"], "validation/{}.json".format(epoch_str))
self.elog.show_text("{}/{}: {}".format(epoch, self.config.n_epochs, summary["validation_time"]), name="Validation Time")
self.add_result(float(validation_scores_mean[2]), "loss_elbo_val", epoch, "Loss")
self.add_result(float(validation_scores_mean[0]), "loss_nll_val", epoch, "Loss")
self.add_result(float(validation_scores_mean[1]), "loss_kl_val", epoch, "Loss")
def _end_epoch_internal(self, epoch):
self.save_results()
if (epoch+1) % self.config.backup_every == 0:
self.save_temp_checkpoint()
def make_images(self,
images,
name,
epoch,
save=False,
show=True,
images_per_row=None):
n_images = images.shape[0]
if images_per_row is None:
images_per_row = int(np.sqrt(n_images))
if show and self.vlog is not None:
self.vlog.show_image_grid(images, name,
image_args={"normalize": True,
"nrow": images_per_row,
"pad_value": 1})
if save and self.elog is not None:
name = self.config.epoch_str_template.format(epoch) + "/" + name
self.elog.show_image_grid(images, name,
image_args={"normalize": True,
"nrow": images_per_row,
"pad_value": 1})
def test(self):
pass
if __name__ == '__main__':
parser = get_default_experiment_parser()
args, _ = parser.parse_known_args()
DEFAULTS, MODS = make_defaults()
run_experiment(GQNExperiment,
DEFAULTS,
args,
mods=MODS,
explogger_kwargs=dict(folder_format="{experiment_name}_%Y%m%d-%H%M%S"),
globs=globals(),
resume_save_types=("model", "simple", "th_vars", "results"))
|
from setuptools import find_packages
from setuptools import setup
setup(
name='ros2srv',
version='0.6.3',
packages=find_packages(exclude=['test']),
install_requires=['ros2cli'],
zip_safe=True,
author='Dirk Thomas',
author_email='dthomas@osrfoundation.org',
maintainer='Dirk Thomas',
maintainer_email='dthomas@osrfoundation.org',
url='https://github.com/ros2/ros2cli/tree/master/ros2srv',
download_url='https://github.com/ros2/ros2cli/releases',
keywords=[],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
],
description='The srv command for ROS 2 command line tools.',
long_description="""\
The package provides the srv command for the ROS 2 command line tools.""",
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'ros2cli.command': [
'srv = ros2srv.command.srv:SrvCommand',
],
'ros2cli.extension_point': [
'ros2srv.verb = ros2srv.verb:VerbExtension',
],
'ros2srv.verb': [
'list = ros2srv.verb.list:ListVerb',
'package = ros2srv.verb.package:PackageVerb',
'packages = ros2srv.verb.packages:PackagesVerb',
'show = ros2srv.verb.show:ShowVerb',
],
}
)
|
# Init Solution
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from IPython.display import display, Markdown
# Init Solution completed
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.metrics import accuracy_score, confusion_matrix
display(Markdown("##### Loading Data"))
data = pd.read_csv("./Ex07_02_Data.csv")
display(data.head(5))
display(Markdown("##### Creating Labels and Features"))
labels = ["No", "Yes"]
features = data.columns.drop(["Loan_ID", "LoanStatus"]).values
display(labels)
display(features)
display(Markdown("##### Creating Train & Test sets"))
X = data[features]
y = data["LoanStatus"]
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.7, random_state=42)
display(f"Train set size: {len(X_train)}")
display(f"Test set size: {len(X_test)}")
display(Markdown("##### Creating, training and using the model"))
model = DecisionTreeClassifier(min_samples_leaf=24)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
display(accuracy_score(y_test, y_pred))
display(Markdown("##### Confusion Matrix"))
matrix = confusion_matrix(y_test, y_pred)
fig, ax = plt.subplots(figsize=(5,5))
sns.heatmap(matrix.T, ax=ax, square=True, annot=True, fmt="d", cbar=False, xticklabels=labels, yticklabels=labels)
ax.set(xlabel="True Labels", ylabel="Predicted Labels")
plt.show()
display(Markdown("##### Plotting the Tree"))
fig, ax = plt.subplots(figsize=(20, 10))
plot_tree(model, ax=ax, filled=True, rounded=True, feature_names=features, class_names=labels, fontsize=10)
plt.show()
display(Markdown("##### The Problem"))
display(Markdown("The decision is basically made if a credit history exists or not. After that decision, classes won't change."))
display(Markdown("##### Loading new Data"))
data_use = pd.read_csv("./Ex07_02_Data_Use.csv")
display(data_use.head(5))
display(Markdown("##### Predicting the Probabilities"))
y_proba = model.predict_proba(data_use.drop("Loan_ID", axis=1))
display(y_proba[:5])
display(Markdown("##### Converting Probabilities to DataFrame"))
loans = pd.DataFrame(y_proba, columns=["No", "Yes"])[["Yes", "No"]]
display(loans.head(5))
display(Markdown("##### Combining Probabilities with Data"))
loan_proba = pd.concat([data_use["Loan_ID"], loans, data_use.drop("Loan_ID", axis=1)], axis=1)
display(loan_proba.head(5)) |
from itertools import product
import random
import sys
sys.path.append(
'/home/rpl/Documents/rasmus/crazyswarm/ros_ws/src/crazyswarm/scripts/perceived-safety-study/utils'
)
from Participant import Participant
from globalVariables import DRONE_START_X, DRONE_START_Y, GOAL_OFFSET_X, GOAL_OFFSET_Y, HEIGHT_OFFSET, PATH_TO_ROOT, POSSIBLE_ACCELERATIONS
from helpers import userInput
import numpy as np
from matplotlib import pyplot as plt
from SimpleTrajectory import SimpleTrajectory
import os
import platform
USING_MAC = platform.system() == "Darwin"
if not USING_MAC:
from crazyflieController import CrazyflieController, Pose
from planner.cbFunctions import CBF
from planner.drone import DroneGoalState, DroneState
from planner.flyZone import MOCAP_FLY_ZONE
from planner.motionPlanner import Planner, recordFinalTrajectory
from position import Position
import seaborn as sns
from trajectory import Trajectory
from trajectoryUtils import getLatestTrajectory
sns.set_theme()
from mpl_toolkits.mplot3d import Axes3D
import csv
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
MAX_INPUT_VAL = 3
class GPValue:
def __init__(self, aMaxIdx, epsIdx, aMax, eps, safety, std):
self.aMaxIdx = aMaxIdx
self.epsIdx = epsIdx
self.aMax = aMax
self.eps = eps
self.safety = safety
self.std = std
def __str__(self):
# return f"aMaxIdx={self.aMaxIdx}\nepsIdx={self.epsIdx}\naMax={self.aMax}\neps={self.eps}\nsafety={self.safety}"
return f"safety={self.safety}\nstd={self.std}\naMax={self.aMax}\neps={self.eps}"
class GaussianProcess:
NUM_VALUES = 50
def __init__(self, pID, safetyFunction, csvFileName, savedTrajectoriesDir):
self.currentParticipant = Participant.getParticipantById(pID)
self.sfName = safetyFunction
self.csvFileName = csvFileName
self.savedTrajectoriesDir = savedTrajectoriesDir
self.initGP()
self.initDataFromCsv()
def initGP(self):
noise_std = 0.75
self.gp = GaussianProcessRegressor(kernel=RBF(),
alpha=noise_std**2,
n_restarts_optimizer=5)
self.epsilonRange = np.linspace(CBF.EPSILON_MIN, CBF.EPSILON_MAX,
self.NUM_VALUES)
self.decelerationMaxRange = np.linspace(CBF.DECELERATION_MAX_MIN,
CBF.DECELERATION_MAX_MAX,
self.NUM_VALUES)
self.predictions = np.zeros(
(self.epsilonRange.size, self.decelerationMaxRange.size))
self.standard_deviations = np.zeros(
(self.epsilonRange.size, self.decelerationMaxRange.size))
self.nextEpsilonToCheck = round(np.random.choice(self.epsilonRange), 2)
self.nextdecelerationMaxToCheck = round(
np.random.choice(self.decelerationMaxRange), 2)
def initDataFromCsv(self):
self.X = []
self.perceivedSafety = []
self.bestParameterPair = []
self.bestPerceivedSafety = []
self.participantID = []
try:
with open(self.csvFileName, newline='') as csvfile:
csvData = list(csv.reader(csvfile, delimiter=','))[1:]
eps = [float(row[1]) for row in csvData]
a_max = [float(row[2]) for row in csvData]
numDataPoints = len(eps)
self.X = [[eps[i], a_max[i]] for i in range(numDataPoints)]
self.perceivedSafety = [int(row[3]) for row in csvData]
curr_best_eps = [float(row[4]) for row in csvData]
curr_best_a_max = [float(row[5]) for row in csvData]
self.bestParameterPair = [[
curr_best_eps[i], curr_best_a_max[i]
] for i in range(numDataPoints)]
self.bestPerceivedSafety = [float(row[6]) for row in csvData]
self.participantID = [int(row[7]) for row in csvData]
self.updatePredictions()
self.updateNextValuesToAskFor()
except:
print("No previous data found")
self.updateCsv()
@classmethod
def throughTerminalInput(cls):
pID = userInput("Participant id: ", int)
safetyFunction = userInput("Safety function name: ")
return cls(pID=pID,
safetyFunction=safetyFunction,
csvFileName=f"gpData/{safetyFunction}.csv",
savedTrajectoriesDir=
f"{PATH_TO_ROOT}/savedTrajectories/{safetyFunction}")
def addData(self, perceivedSafety):
self.X.append(
[self.nextEpsilonToCheck, self.nextdecelerationMaxToCheck])
self.perceivedSafety.append(perceivedSafety)
self.updatePredictions()
bestParameterPair = np.argmin(np.abs(self.predictions))
eIdx, aMaxIdx = np.unravel_index(bestParameterPair,
(self.NUM_VALUES, self.NUM_VALUES),
order='C')
self.bestParameterPair.append(
[self.epsilonRange[eIdx], self.decelerationMaxRange[aMaxIdx]])
self.bestPerceivedSafety.append(self.predictions[eIdx][aMaxIdx])
self.participantID.append(self.currentParticipant.id)
self.updateCsv()
def updateCsv(self):
with open(self.csvFileName, 'w', encoding='UTF8') as f:
writer = csv.writer(f)
header = [
"id", "eps", "aMax", "perceivedSafety", "bestEps", "bestAMax",
"bestPerceivedSafety", "participantID"
]
writer.writerow(header)
numDataPoints = len(self.perceivedSafety)
for i in range(numDataPoints):
writer.writerow([
i, self.X[i][0], self.X[i][1], self.perceivedSafety[i],
self.bestParameterPair[i][0], self.bestParameterPair[i][1],
self.bestPerceivedSafety[i], self.participantID[i]
])
def updatePredictions(self):
formatedperceivedSafety = np.array([
np.array([currentperceivedSafety])
for currentperceivedSafety in self.perceivedSafety
])
formattedX = np.array(self.X)
self.gp.fit(formattedX, formatedperceivedSafety)
for i, eCurr in enumerate(self.epsilonRange):
for j, aMaxCurr in enumerate(self.decelerationMaxRange):
X_curr_to_predict = np.array([[eCurr, aMaxCurr]])
mean_prediction, std_prediction = self.gp.predict(
X_curr_to_predict, return_std=True)
self.predictions[i, j] = mean_prediction[0]
self.standard_deviations[i, j] = std_prediction[0]
def updateNextValuesToAskFor(self):
parameterComboWithLeastInfo = np.argmax(self.standard_deviations)
eIdx, aMaxIdx = np.unravel_index(parameterComboWithLeastInfo,
(self.NUM_VALUES, self.NUM_VALUES),
order='C')
self.nextEpsilonToCheck = self.epsilonRange[eIdx]
self.nextdecelerationMaxToCheck = self.decelerationMaxRange[aMaxIdx]
def plotCurrentPredictionAsHeatmap(self):
ax = sns.heatmap(self.predictions)
ax.set_ylabel("Epsilon")
ax.set_yticklabels([str(round(e, 2)) for e in self.epsilonRange])
ax.set_xlabel("a_max")
ax.set_xticklabels(
[str(round(aMax, 2)) for aMax in self.decelerationMaxRange])
plt.show()
def plotCurrentPredictionAs3d(self):
ax = plt.axes(projection='3d')
X, Y = np.meshgrid(self.epsilonRange, self.decelerationMaxRange)
xx = np.vstack((X.flatten(), Y.flatten())).T
p = self.gp.predict(xx).reshape(len(X), len(Y))
ax.set_xlabel("Epsilon")
ax.set_ylabel("Max deceleration")
ax.set_zlabel("Preceived safety")
ax.set_zlim(-MAX_INPUT_VAL, MAX_INPUT_VAL)
ax.plot_surface(X,
Y,
p,
rstride=1,
cstride=1,
cmap='viridis',
edgecolor='none',
alpha=0.75,
zorder=100)
ax.scatter3D(self.bestParameterPair[-1][0],
self.bestParameterPair[-1][1],
self.bestPerceivedSafety[-1],
color="red",
s=50,
marker="o",
zorder=500)
ax.contour(X, Y, p, [0.0])
plt.draw()
if len(sys.argv) > 1 and sys.argv[1] == "plot":
plt.pause(9999999999999)
# plt.pause(99999)
plt.show()
def getperceivedSafety(self):
def approvedRating(rating):
return -MAX_INPUT_VAL <= rating and rating <= MAX_INPUT_VAL
rating = -99
while not approvedRating(rating):
rating = input(
f"\nScale: (-{MAX_INPUT_VAL} -> {MAX_INPUT_VAL} where -{MAX_INPUT_VAL} = to unsafe, 0 = perfect, {MAX_INPUT_VAL} = to safe)\nScore the prevoius two trajectories: "
)
print("")
if rating == "q":
exit()
rating = int(rating)
if not approvedRating(rating):
print(
f"ERROR: Rating must be in the interval [-{MAX_INPUT_VAL}, {MAX_INPUT_VAL}]"
)
return rating
def startProcess(self):
print(f"\n- Round #{len(self.perceivedSafety)} -")
print(f"Epsilon = {self.nextEpsilonToCheck}")
print(f"Max deceleration = {self.nextdecelerationMaxToCheck}")
trajectoryKeys = self.findTrajectories(saveAnimation=False)
if not USING_MAC:
self.executeTrajectories(trajectoryKeys)
perceivedSafety = self.getperceivedSafety()
self.addData(perceivedSafety)
self.updateNextValuesToAskFor()
self.plotCurrentPredictionAs3d()
def findTrajectories(self, saveAnimation):
cbf = CBF(decceleration_max=self.nextdecelerationMaxToCheck,
epsilon=self.nextEpsilonToCheck)
planner = Planner(dt=0.1,
obstacles=[],
flyZone=MOCAP_FLY_ZONE,
verboseLevel=3,
sf=cbf,
possibleAccelerations=POSSIBLE_ACCELERATIONS)
currentDroneState = Position(x=DRONE_START_X, y=DRONE_START_Y)
goal_x, goal_y = planner.HUMAN.x + planner.HUMAN.radius + GOAL_OFFSET_X, planner.HUMAN.y + GOAL_OFFSET_Y
goalState = DroneGoalState(x=goal_x, y=goal_y, radius=0.1)
currentDroneState = DroneState(parent=None,
x=currentDroneState.x,
y=currentDroneState.y,
yaw=planner.getYawForInitDroneState(
currentDroneState, goalState))
trajectoryKeys = []
currentDroneState, foundGoalState = planner.findPathToGoal(
currentDroneState, goalState)
if foundGoalState:
print("Found goal :)")
planner.setKey(currentDroneState, True)
try:
os.mkdir(f"{self.savedTrajectoriesDir}")
except:
pass
dirs = self.savedTrajectoriesDir.split("/")
if "mainStudy" in self.savedTrajectoriesDir:
planner.animationKey = f"{dirs[-2]}/{dirs[-1]}/GP | Round #{len(self.perceivedSafety)}.{self.currentParticipant.id} - " + planner.animationKey
filePath = f"{'/'.join(dirs[:-2])}/{planner.animationKey}"
else:
planner.animationKey = f"{dirs[-1]}/Round #{len(self.perceivedSafety)}.{self.currentParticipant.id} - " + planner.animationKey
filePath = f"{'/'.join(dirs[:-1])}/{planner.animationKey}"
os.mkdir(filePath)
trajectory = Trajectory(finalDroneState=currentDroneState)
trajectory.plot(f"{filePath}/trajectoryPlot.png",
cbf,
show=False,
planner=planner)
trajectory.saveToCsv(f"{filePath}/trajectoryData.csv")
trajectoryKeys.append(planner.animationKey)
if saveAnimation:
os.mkdir(f"{filePath}/animationFrames")
else:
recordFinalTrajectory(currentDroneState,
planner,
onlyFinalFrame=True,
fileName="trajectory")
planner.resetParams()
return trajectoryKeys
def executeTrajectories(self, trajectoryKeys):
droneController = CrazyflieController()
trajectoriesToStartPoses = []
pathsToTrajectories = []
plannedTrajectories = []
recordedTrajectoriesToStartPoses = []
recordingsOfPlannedTrajectories = []
for i, trajectoryKey in enumerate(trajectoryKeys):
try:
currentPathToTrajectoryFolder = f"savedTrajectories/{trajectoryKey}"
z_height = (self.currentParticipant.height -
HEIGHT_OFFSET) / 100
currentPlannedTrajectory = SimpleTrajectory(
csv=f"{currentPathToTrajectoryFolder}/trajectoryData.csv",
z_height=z_height)
except:
try:
currentPathToTrajectoryFolder = f"{PATH_TO_ROOT}/preStudy/{trajectoryKey}"
z_height = (self.currentParticipant.height -
HEIGHT_OFFSET) / 100
currentPlannedTrajectory = SimpleTrajectory(
csv=
f"{currentPathToTrajectoryFolder}/trajectoryData.csv",
z_height=z_height)
except:
currentPathToTrajectoryFolder = f"{PATH_TO_ROOT}/mainStudy/participants/{trajectoryKey}"
z_height = (self.currentParticipant.height -
HEIGHT_OFFSET) / 100
currentPlannedTrajectory = SimpleTrajectory(
csv=
f"{currentPathToTrajectoryFolder}/trajectoryData.csv",
z_height=z_height)
currentStartPose = Pose(currentPlannedTrajectory.x[0],
currentPlannedTrajectory.y[0],
currentPlannedTrajectory.z[0],
currentPlannedTrajectory.yaw[0])
currentTrajectoryToStartPose = droneController.getTrajectoryToPose(
goalPose=currentStartPose, velocity=0.5)
pathsToTrajectories.append(currentPathToTrajectoryFolder)
plannedTrajectories.append(currentPlannedTrajectory)
trajectoriesToStartPoses.append(currentTrajectoryToStartPose)
for i, trajectoryKey in enumerate(trajectoryKeys):
trajectoryToStartPose = trajectoriesToStartPoses[i]
plannedTrajectory = plannedTrajectories[i]
if i == 0:
recordedTrajectoryToStartPose = droneController.followTrajectory(
trajectoryToStartPose)[1]
recordedTrajectoriesToStartPoses.append(
recordedTrajectoryToStartPose)
droneController.hover(duration=2)
recordingOfPlannedTrajectory = droneController.followTrajectory(
plannedTrajectory)[1]
recordingsOfPlannedTrajectories.append(
recordingOfPlannedTrajectory)
droneController.hover(duration=2)
droneController.land(velocity=0.5)
for i, trajectoryKey in enumerate(trajectoryKeys):
plannedTrajectory = plannedTrajectories[i]
pathToTrajectoryFolder = pathsToTrajectories[i]
recordingOfPlannedTrajectory = recordingsOfPlannedTrajectories[i]
plannedTrajectory.plotTrajectory(
otherTrajectory=recordingOfPlannedTrajectory,
fileName=f"{pathToTrajectoryFolder}/executedTrajectory")
def setBestParameterPair(self):
def getScore(gpValue):
return abs(gpValue.safety)
def getStd(gpValue):
return gpValue.std
def getAMax(gpValue):
return gpValue.aMax
self.updatePredictions()
gpValues = []
for epsIdx, eps in enumerate(self.epsilonRange):
for aMaxIdx, aMax in enumerate(self.decelerationMaxRange):
gpValue = GPValue(
aMaxIdx=aMaxIdx,
epsIdx=epsIdx,
aMax=aMax,
eps=eps,
safety=self.predictions[epsIdx][aMaxIdx],
std=self.standard_deviations[epsIdx][aMaxIdx])
gpValues.append(gpValue)
numToChoose = 15
gpValues.sort(key=getScore)
chosenGpValues = gpValues[:numToChoose]
# chosenGpValues.sort(key=getStd, reverse=False)
chosenGPValue = np.random.choice(chosenGpValues)
# chosenGPValue = chosenGpValues[0]
# self.bestGPcombo = chosenGPValue
# for i, r in enumerate(chosenGpValues):
# print(f"\n{i}")
# print(r)
# print("\nChosen pair")
# print(chosenGPValue)
# self.plotCurrentPredictionAs3d()
self.bestParameterPair[-1] = [chosenGPValue.eps, chosenGPValue.aMax]
self.updateCsv()
def main():
gp = GaussianProcess.throughTerminalInput()
if len(sys.argv) <= 1:
gp.startProcess()
elif sys.argv[1] == "plot":
gp.plotCurrentPredictionAs3d()
if __name__ == "__main__":
main()
|
"""
29. 两数相除
https://leetcode-cn.com/problems/divide-two-integers/
1. 如果被除数是0 则直接返回0
2. 先对结果的 符号 进行判定, 然后将被除数和除数 全部取正数
3. 由于不能用除法和乘法, 所以准备一个哈希 来存 除数的倍数, 例如 10/3, 哈希存储 {6:2, 3:1}
4. 对哈希的 倍数除数进行排序, 期望先取到最大的
5. 使用DFS 更新结果, 在循环时只要存在除数有效时 直接跳出循环
6. 对临界条件进行判定
"""
def divide(dividend, divisor):
if dividend == 0:
return 0
is_minus = False
if (divisor > 0 and dividend < 0) or (divisor < 0 and dividend > 0):
is_minus = True
dividend = int(abs(dividend))
divisor = int(abs(divisor))
dp = {divisor : 1}
sum_v = divisor
while sum_v + sum_v <= dividend:
count = dp[sum_v]
sum_v += sum_v
dp[sum_v] = count + count
keys = sorted(dp.keys(), reverse=True)
res = 0
def dfs(t):
nonlocal res
if t < divisor:
return
for v in keys:
if t - v >= 0:
res += dp[v]
dfs(t - v)
break
dfs(dividend)
if is_minus:
res = -res
if res >= -2 ** 31 and res <= 2 ** 31 - 1:
return res
return 2 ** 31 - 1
print(divide(10, 3))
print(divide(7, -3))
print(divide(-2147483648, -1)) # 2147483647 |
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.utils
from torchvision import models
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torchattacks import PGD, FGSM, MultiAttack
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import matplotlib.pyplot as plt
mnist_train = dsets.MNIST(root='data/',
train=True,
transform=transforms.ToTensor(),
download=True)
mnist_test = dsets.MNIST(root='data/',
train=False,
transform=transforms.ToTensor(),
download=True)
image, label = mnist_test[0]
print(image.shape)
plt.imshow(image[0],cmap='gray')
plt.show() |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from collections import defaultdict
import sys
import logging
import re
import time
from numpy.core.fromnumeric import alltrue
import pandas as pd
import numpy as np
from torch.nn.modules import transformer
from tqdm import tqdm
# import tensorflow as tf
# import tensorflow.keras.backend as K
import os
# from transformers import *
# print(tf.__version__)
from sklearn.metrics import roc_auc_score, f1_score, confusion_matrix, precision_recall_fscore_support
from pathlib import Path
import logging
from torch.utils.data import DataLoader
from sklearn.model_selection import GroupKFold, StratifiedShuffleSplit, StratifiedKFold
import transformers
from transformers.file_utils import ModelOutput
from house_dataset import HouseDataset, BaseDataset
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertModel, BertTokenizer, BertConfig, AutoModel, AutoTokenizer
import random
from early_stopping import EarlyStopping
from losses import SupConLoss
import sys
sys.path.append(str(Path(__file__).resolve().parent.parent.parent))
# from src.utils.eda import eda, eda_one, get_eda
# formater = logging.Formatter(
# '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# logger = logging.getLogger(__name__)
# logger.setFormatter(formater)
# logger.setLevel(logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
assert torch.cuda.is_available()
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
# Set random seed
RANDOM_SEED = 245680987
setup_seed(RANDOM_SEED)
PROJECT_ROOT_PATH = (Path(__file__)).resolve().parent.parent.parent
DATA_PATH = PROJECT_ROOT_PATH / "data"
RAW_DATA_PATH = DATA_PATH/'raw_data'
# %%
train_left = pd.read_csv(
RAW_DATA_PATH/'./train/train.query.tsv', sep='\t', header=None)
train_left.columns = ['id', 'q1']
train_right = pd.read_csv(
RAW_DATA_PATH/'./train/train.reply.tsv', sep='\t', header=None)
train_right.columns = ['id', 'id_sub', 'q2', 'label']
df_train = train_left.merge(train_right, how='left')
df_train['q2'] = df_train['q2'].fillna('好的')
df_train_aug = pd.read_csv(RAW_DATA_PATH/'./train/train_aug_full.tsv', sep='\t', header=None,
names=['id', 'q1', 'id_sub', 'q2', 'label'])
# df_train_ex = pd.read_csv(RAW_DATA_PATH/'./train/train_ex.tsv', sep='\t', header=None, names=['id', 'q1', 'id_sub', 'q2', 'label'])
# train_reply_bt = pd.read_csv(RAW_DATA_PATH/'./train/train.reply.bt.tsv', sep='\t',
# header=None, names=['id','id_sub', 'q2', 'label','q2_bt'])
# train_reply_bt_pos = train_reply_bt[train_reply_bt['label'] == 1]
# train_reply_bt_pos['q2'] = train_reply_bt_pos['q2_bt']
# train_reply_bt_pos = train_reply_bt_pos.drop('q2_bt', axis='columns')
test_left = pd.read_csv(RAW_DATA_PATH/'./test/test.query.tsv',
sep='\t', header=None, encoding='gbk')
test_left.columns = ['id', 'q1']
test_right = pd.read_csv(
RAW_DATA_PATH/'./test/test.reply.tsv', sep='\t', header=None, encoding='gbk')
test_right.columns = ['id', 'id_sub', 'q2']
df_test = test_left.merge(test_right, how='left')
STOPWORS_PATH = PROJECT_ROOT_PATH / 'src/utils/stopwords/HIT_stop_words.txt'
stopwords = set()
with open(STOPWORS_PATH, 'r') as f:
stopwords.update([x.strip() for x in f.readlines()])
# %%
# PATH = './'
# BERT_PATH = './data/pretrain_model'
# WEIGHT_PATH = './'
MAX_SEQUENCE_LENGTH = 100
# 'chinese_roberta_wwm_ext_pytorch', 'chinese_roberta_wwm_large_ext_pytorch', 'bert-base-chinese', 'chinese_wwm_ext_pytorch', 'ernie'
MODEL_NAME = 'chinese_roberta_wwm_large_ext_pytorch'
TIME_STR = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())
CHECKPOINT_PATH = DATA_PATH / f"model_record/{MODEL_NAME}/{TIME_STR}"
PRETRAIN_MODEL_PATH = PROJECT_ROOT_PATH/'./data/pretrain_model'
input_categories = ['q1', 'q2']
output_categories = 'label'
print('train shape =', df_train.shape)
print('test shape =', df_test.shape)
# %%
def _convert_to_transformer_inputs(question, answer, tokenizer, max_sequence_length):
"""Converts tokenized input to ids, masks and segments for transformer (including bert)"""
def return_id(str1, str2, truncation_strategy, length):
# inputs = tokenizer.encode_plus(str1, str2,
# add_special_tokens=True,
# max_length=length,
# truncation_strategy=truncation_strategy,
# truncation=True,
# )
# input_ids = inputs["input_ids"]
# input_masks = [1] * len(input_ids)
# input_segments = inputs["token_type_ids"]
# padding_length = length - len(input_ids)
# padding_id = tokenizer.pad_token_id
# input_ids = input_ids + ([padding_id] * padding_length)
# input_masks = input_masks + ([0] * padding_length)
# input_segments = input_segments + ([0] * padding_length)
# inputs = tokenizer.encode_plus(str1, str2,
# add_special_tokens=True,
# max_length=length,
# # truncation_strategy='longest_first',
# truncation='longest_first',
# padding='max_length'
# )
# Tokenizer的__call__方法可以直接处理str batch或者单独的str
inputs = tokenizer(str1, str2,
add_special_tokens=True,
max_length=length,
# truncation_strategy='longest_first',
truncation='longest_first',
padding='max_length'
)
# print(inputs.keys())
input_ids = inputs["input_ids"]
input_segments = inputs["token_type_ids"]
input_masks = inputs['attention_mask']
return [input_ids, input_masks, input_segments]
input_ids_q, input_masks_q, input_segments_q = return_id(
question, answer, 'longest_first', max_sequence_length)
return [input_ids_q, input_masks_q, input_segments_q]
def get_overlap_count(a, b):
unique_a, counts_a = np.unique(a, return_counts=True)
unique_b, counts_b = np.unique(b, return_counts=True)
unique_id, counts_id = np.unique(np.concatenate(
[unique_a, unique_b]), return_counts=True)
overlap_id = unique_id[counts_id > 1] # 获得a和b中共现的字符/词/id
if overlap_id.shape[0] == 0:
return 0 # 返回0
overlap_counts = []
for i in overlap_id:
idx_a = np.argmax((unique_a == i).astype(np.int32))
idx_b = np.argmax((unique_b == i).astype(np.int32))
overlap_counts.append(counts_a[idx_a])
overlap_counts.append(counts_b[idx_b])
overlap_count = np.stack(overlap_counts).sum()
# overlap_id = set(unique_a).intersection(set(unique_b))
# if len(overlap_id) == 0:
# return 0
#
# unique2count_a = dict(zip(unique_a, counts_a))
# unique2count_b = dict(zip(unique_b, counts_b))
#
# overlap_count = 0
# for i in overlap_id:
# overlap_count += (unique2count_a[i] + unique2count_b[i])
return overlap_count
def get_overlap_feature(str_a, str_b):
import jieba
char_a = list(str_a)
char_b = list(str_b)
word_a = [x for x in jieba.cut(str_a) if x not in stopwords]
word_b = [x for x in jieba.cut(str_b) if x not in stopwords]
overlap_count_char = get_overlap_count(char_a, char_b)
overlap_count_word = get_overlap_count(word_a, word_b)
return np.asarray([overlap_count_char, overlap_count_word])
def compute_input_arrays(df, columns, tokenizer, max_sequence_length):
input_ids_q, input_masks_q, input_segments_q = [], [], []
input_ids_a, input_masks_a, input_segments_a = [], [], []
input_overlap = []
# 使用tokenizer的批处理优化速度
dataset = BaseDataset(df['q1'], df['q2'])
batch_size = 2048
loader = DataLoader(dataset, batch_size=batch_size)
steps = int(np.ceil(len(dataset) / batch_size))
pbar = tqdm(desc='Computing input arrays', total=steps)
for i, sample in enumerate(loader):
q_batch, a_batch = sample[0], sample[1]
ids_q, masks_q, segments_q = _convert_to_transformer_inputs(
q_batch, a_batch, tokenizer, max_sequence_length)
# TODO: 添加overlap特征
overlap_feat = np.asarray([get_overlap_feature(q, a)
for q, a in zip(q_batch, a_batch)])
input_ids_q.extend(ids_q)
input_masks_q.extend(masks_q)
input_segments_q.extend(segments_q)
input_overlap.extend(overlap_feat)
pbar.update()
pbar.close()
# for _, instance in tqdm(df[columns].iterrows()):
# q, a = instance.q1, instance.q2
# ids_q, masks_q, segments_q = _convert_to_transformer_inputs(q, a, tokenizer, max_sequence_length)
# input_ids_q.append(ids_q)
# input_masks_q.append(masks_q)
# input_segments_q.append(segments_q)
return [np.asarray(input_ids_q, dtype=np.int32),
np.asarray(input_masks_q, dtype=np.int32),
np.asarray(input_segments_q, dtype=np.int32),
], np.asarray(input_overlap, dtype=np.int32)
def compute_output_arrays(df, columns):
return np.asarray(df[columns])
def search_f1(y_true, y_pred):
best = 0
best_t = 0
for i in range(30, 70):
tres = i / 100
y_pred_bin = (y_pred > tres).astype(int)
score = f1_score(y_true, y_pred_bin)
if score > best:
best = score
best_t = tres
print('best', best)
print('thres', best_t)
return best, best_t
# %%
if MODEL_NAME == 'ernie':
tokenizer = AutoTokenizer.from_pretrained(
'nghuyong/ernie-1.0', cache_dir=PRETRAIN_MODEL_PATH/MODEL_NAME)
else:
tokenizer = BertTokenizer.from_pretrained(
str(PRETRAIN_MODEL_PATH/MODEL_NAME))
inputs_path = Path(RAW_DATA_PATH/'./train/inputs.npy')
outputs_path = Path(RAW_DATA_PATH/'./train/outputs.npy')
test_inputs_path = Path(RAW_DATA_PATH/'./test/test_inputs.npy')
# 保存处理好的数据集
if not outputs_path.exists():
outputs = compute_output_arrays(df_train, output_categories)
np.save(outputs_path, outputs)
else:
outputs = np.load(outputs_path, allow_pickle=True)
if not inputs_path.exists():
inputs, inputs_overlap = compute_input_arrays(
df_train, input_categories, tokenizer, MAX_SEQUENCE_LENGTH)
np.save(inputs_path, inputs)
np.save(inputs_path.parent / 'inputs_overlap.npy', inputs_overlap)
else:
inputs = np.load(inputs_path, allow_pickle=True)
inputs_overlap = np.load(inputs_path.parent /
'inputs_overlap.npy', allow_pickle=True)
if not test_inputs_path.exists():
test_inputs, test_inputs_overlap = compute_input_arrays(
df_test, input_categories, tokenizer, MAX_SEQUENCE_LENGTH)
np.save(test_inputs_path, test_inputs)
np.save(test_inputs_path.parent /
'test_inputs_overlap.npy', test_inputs_overlap)
else:
test_inputs = np.load(test_inputs_path, allow_pickle=True)
test_inputs_overlap = np.load(
test_inputs_path.parent / 'test_inputs_overlap.npy', allow_pickle=True)
# 安居客数据集
anjuke_inputs = np.load(DATA_PATH / 'anjuke/inputs_pos.npy', allow_pickle=True)
anjuke_outputs = np.load(
DATA_PATH / 'anjuke/outputs_pos.npy', allow_pickle=True)
# def prepare_data(part='train'):
# input_categories = ['q1','q2']
# output_categories = 'label'
# tokenizer = BertTokenizer.from_pretrained(PRETRAIN_MODEL_PATH/MODEL_NAME/'vocab.txt')
# if part == 'train':
# inputs_path = RAW_DATA_PATH / './train/inputs.npy'
# outputs_path = RAW_DATA_PATH / './train/outputs.npy'
# train_left = pd.read_csv(RAW_DATA_PATH/'./train/train.query.tsv',sep='\t',header=None)
# train_left.columns=['id','q1']
# train_right = pd.read_csv(RAW_DATA_PATH/'./train/train.reply.tsv',sep='\t',header=None)
# train_right.columns=['id','id_sub','q2','label']
# df_train = train_left.merge(train_right, how='left')
# df_train['q2'] = df_train['q2'].fillna('好的')
# # df_train_ex = pd.read_csv(RAW_DATA_PATH/'./train/train_ex.tsv', sep='\t', header=None, names=['id', 'q1', 'id_sub', 'q2', 'label'])
# # train_reply_bt = pd.read_csv(RAW_DATA_PATH/'./train/train.reply.bt.tsv', sep='\t',
# # header=None, names=['id','id_sub', 'q2', 'label','q2_bt'])
# # train_reply_bt_pos = train_reply_bt[train_reply_bt['label'] == 1]
# # train_reply_bt_pos['q2'] = train_reply_bt_pos['q2_bt']
# # train_reply_bt_pos = train_reply_bt_pos.drop('q2_bt', axis='columns')
# logger.info(f'Train shape = {df_train.shape}')
# df = df_train
# elif part == 'test':
# inputs_path = RAW_DATA_PATH / './test/test_inputs.npy'
# outputs_path = None
# outputs = None
# test_left = pd.read_csv(RAW_DATA_PATH/'./test/test.query.tsv',sep='\t',header=None, encoding='gbk')
# test_left.columns = ['id','q1']
# test_right = pd.read_csv(RAW_DATA_PATH/'./test/test.reply.tsv',sep='\t',header=None, encoding='gbk')
# test_right.columns=['id','id_sub','q2']
# df_test = test_left.merge(test_right, how='left')
# logger.info(f'Test shape = {df_test.shape}')
# df = df_test
# else:
# raise ValueError()
# if not inputs_path.exists():
# inputs = compute_input_arrays(
# df, input_categories, tokenizer, MAX_SEQUENCE_LENGTH
# )
# np.save(inputs_path, inputs)
# else:
# inputs = np.load(inputs_path, allow_pickle=True)
# if not outputs_path.exists() and part == 'train':
# outputs = compute_output_arrays(df, output_categories)
# np.save(outputs_path, outputs)
# else:
# outputs = np.load(outputs_path, allow_pickle=True)
# return inputs, outputs
# # 保存处理好的数据集
# train_inputs, train_outputs = prepare_data(part="train")
# test_inputs, _ = prepare_data(part="test")
# Pytorch版BERT模型
class BertForHouseQA(nn.Module):
def __init__(self):
super(BertForHouseQA, self).__init__()
# self.bert = BertModel.from_pretrained(os.path.join(PRETRAIN_MODEL_PATH, 'bert-base-chinese'),
if MODEL_NAME == 'ernie':
self.bert = AutoModel.from_pretrained('nghuyong/ernie-1.0',
cache_dir=PRETRAIN_MODEL_PATH / MODEL_NAME,
output_hidden_states=False,
output_attentions=False)
else:
self.bert = BertModel.from_pretrained(os.path.join(PRETRAIN_MODEL_PATH, MODEL_NAME),
output_hidden_states=False,
output_attentions=False)
self.dropout = nn.Dropout(0.5)
# self.fc = nn.Linear(self.bert.config.hidden_size, 2)
# self.fc = nn.Linear(3*self.bert.config.hidden_size, 2)
self.fc = nn.Linear(self.bert.config.hidden_size, 1)
# self.fc = nn.Linear(self.bert.config.hidden_size + 1, 2)
self.sigmoid = nn.Sigmoid()
# self.tanh = nn.Tanh()
# self.relu = nn.ReLU()
# self.W = nn.Linear(self.bert.config.hidden_size, self.bert.config.hidden_size, bias=False)
def forward(self, x: torch.Tensor):
# hidden_state = self.bert(input_ids=x[:, 0, :], attention_mask=x[:, 1, :], token_type_ids=x[:, 2, :])[0]
# q, _ = torch.max(hidden_state, dim=1) # [b, hid]
# a = torch.mean(hidden_state, dim=1) # [b, hid]
# t = hidden_state[:, -1] # [b, hid]
# e = hidden_state[:, 0] # [b, hid]
# feat = torch.cat([q, a, t, e], dim=-1) # [b, 4*hid]
# feat = self.dropout(feat)
# logit = self.fc(feat)
# pre = self.sigmoid(logit)
input_ids = x[:, 0, :]
attention_mask = x[:, 1, :]
token_type_ids = x[:, 2, :]
# 计算overlap特征(q和a共同出现的词的出现次数)
# ===============================================
# def get_overlap_count(a, b):
# unique_a, counts_a = torch.unique(a, return_counts=True)
# unique_b, counts_b = torch.unique(b, return_counts=True)
# unique_id, counts_id = torch.unique(torch.cat([unique_a, unique_b]), return_counts=True)
# overlap_id = unique_id[counts_id > 1]
# if overlap_id.shape[0] == 0:
# return overlap_id.sum() # 返回0
# overlap_counts = []
# for i in overlap_id:
# idx_a = torch.argmax((unique_a == i).int())
# idx_b = torch.argmax((unique_b == i).int())
# overlap_counts.append(counts_a[idx_a])
# overlap_counts.append(counts_b[idx_b])
# overlap_count = torch.stack(overlap_counts).sum()
# return overlap_count
# mask_a = (attention_mask.bool() & (token_type_ids == 0))
# mask_b = (attention_mask.bool() & (token_type_ids == 1))
# input_ids_a = input_ids[mask_a].cpu().detach() # 去除[CLS]和[SEP]
# input_ids_b = input_ids[mask_b].cpu().detach() # 去除[SEP]
# overlap_count = []
# for i, (ma, mb) in enumerate(zip(mask_a, mask_b)):
# input_id_a = input_ids[i][ma][1:-1]
# input_id_b = input_ids[i][mb][:-1]
# overlap_count.append(get_overlap_count(input_id_a, input_id_b))
# overlap_count = torch.stack(overlap_count).unsqueeze(-1) # [batch_size, 1]
# unique_a, counts_a = torch.unique(input_id_a, return_counts=True)
# unique_b, counts_b = torch.unique(input_id_b, return_counts=True)
# unique_id, counts_id = torch.unique(torch.cat([unique_a, unique_b]), return_counts=True)
# overlap_id = unique_id[counts_id > 1]
# counts_a_overlap =
# overlap_id = set(input_id_a).intersection(set(input_id_b))
# if len(overlap_id) == 0:
# continue
# unique, counts = np.unique(input_id_a, return_counts=True)
# a_count = dict(zip(unique, counts))
# unique, counts = np.unique(input_id_b, return_counts=True)
# b_count = dict(zip(unique, counts))
# count = 0
# for idx in overlap_id:
# count += (a_count[idx] + b_count[idx])
# overlap_count[i] = count
# overlap_count =
# ===============================================
hidden_state = self.bert(
input_ids=x[:, 0, :], attention_mask=x[:,
1, :], token_type_ids=x[:, 2, :]
)[0] # [b, seq, hid]
feat_cls = hidden_state[:, 0, :] # [CLS]
# feat_mean = torch.mean(hidden_state, dim=1) # [b, hid]
# feat_max, _ = torch.max(hidden_state, dim=1) # [b, hid]
# feat = torch.cat([feat_cls, feat_mean, feat_max], dim=-1) # [b, 3*hid]
# =================================================
# feat_a = torch.mean(hidden_state[mask_a][1:-1], dim=1).unsqueeze(1) # [b, 1, hid]
# feat_b = torch.mean(hidden_state[mask_b][:-1], dim=1).unsqueeze(-1) # [b, hid, 1]
# sim_ab = torch.squeeze(self.W(feat_a) * feat_b) # [b, 1, 1]
# =================================================
feat = self.dropout(feat_cls)
# logit = self.fc(torch.cat([self.relu(feat), overlap_count], dim=1))
# logit = self.fc(self.relu(feat))
# logit = self.fc(self.tanh(feat))
# logit = self.fc(feat)
logit = self.sigmoid(self.fc(feat))
return feat, logit
class BertClsToReg(nn.Module):
def __init__(self, org_bert: BertForHouseQA):
super(BertClsToReg, self).__init__()
# self.bert = BertModel.from_pretrained(os.path.join(PRETRAIN_MODEL_PATH, 'bert-base-chinese'),
self.org_bert = org_bert
self.dropout = nn.Dropout(0.5)
self.fc = nn.Linear(self.org_bert.bert.config.hidden_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x: torch.Tensor):
name, bert = next(self.org_bert.named_children())
assert name == 'bert'
hidden_state = bert(
input_ids=x[:, 0, :], attention_mask=x[:,
1, :], token_type_ids=x[:, 2, :]
)[0] # [b, seq, hid]
feat_cls = hidden_state[:, 0, :] # [CLS]
feat = self.dropout(feat_cls)
logit = self.sigmoid(self.fc(feat))
return logit
class BertClsToCls(nn.Module):
def __init__(self, org_bert: BertForHouseQA):
super(BertClsToCls, self).__init__()
self.org_bert = org_bert
self.dropout = nn.Dropout(0.5)
self.fc = nn.Linear(self.org_bert.bert.config.hidden_size, 2)
self.relu = nn.ReLU()
def forward(self, x: torch.Tensor):
name, bert = next(self.org_bert.named_children())
assert name == 'bert'
hidden_state = bert(
input_ids=x[:, 0, :], attention_mask=x[:,
1, :], token_type_ids=x[:, 2, :]
)[0] # [b, seq, hid]
feat_cls = hidden_state[:, 0, :] # [CLS]
feat = self.dropout(feat_cls)
logit = self.fc(self.relu(feat))
return logit
def get_hinge_loss(model_outputs, qa_id, criterion):
qids = set(qa_id[:, 0])
losses = []
for qid in qids:
# 属于当前qid的mask
mask = qa_id[:, 0] == qid
# 属于当前qid并且标签为正或负的mask
pos_mask = mask & (qa_id[:, 2] == 1)
neg_mask = mask & (qa_id[:, 2] == 0)
if (pos_mask).sum().item() == 0:
continue # 该问题没有正确答案,没法计算hingeloss
pos_probs = model_outputs[pos_mask]
if (neg_mask).sum().item() == 0:
# 该问题没有错误答案
# 在当前答案集合中随机采样负样本(排除当前问题的正样本)
candidate_probs = model_outputs[~pos_mask]
num_candidate = candidate_probs.shape[0]
# idx = torch.multinomial(torch.ones([num_candidate]), num_samples=4)
idx = torch.randint(high=num_candidate, size=(4,))
neg_probs = candidate_probs[idx]
else:
neg_probs = model_outputs[neg_mask]
input1 = pos_probs.repeat(neg_probs.shape[0], 1)
input2 = neg_probs.repeat(pos_probs.shape[0], 1)
target = torch.ones_like(input1)
loss = criterion(input1, input2, target)
# if torch.isnan(loss).item() == True:
# continue
losses.append(loss)
losses = torch.stack(losses)
loss = torch.sum(losses)
return loss
def train_pytorch(**kwargs):
CHECKPOINT_PATH.mkdir(parents=True, exist_ok=True)
# 调用logging.basicConfig会给进程添加一个root logger,这样其他模块中logger的日志才会显示到console当中
# (子logger传到root logger,root logger通过他自带的StreamHandler输出)。
# 如果不调用logging.basicConfig,必须得每个子logger配置一个StreamHandler,很麻烦
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
formater = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Print logs to the terminal.
# stream_handler = logging.StreamHandler()
# stream_handler.setFormatter(formater)
# # Save logs to file.
log_path = CHECKPOINT_PATH / 'train.log'
file_handler = logging.FileHandler(
filename=log_path, mode='w', encoding='utf-8')
file_handler.setFormatter(formater)
# logger.addHandler(stream_handler)
logger.addHandler(file_handler)
inputs = kwargs['inputs']
outputs = kwargs['outputs']
# test_inputs = kwargs['test_inputs']
gkf = GroupKFold(n_splits=kwargs['n_splits']).split(
X=df_train.q2, groups=df_train.id)
# sss = StratifiedShuffleSplit(n_splits=kwargs['n_splits'], test_size=0.2, random_state=RANDOM_SEED).split(X=df_train.q2,
# y=df_train.label)
# skf = StratifiedKFold(n_splits=kwargs['n_splits'], shuffle=True, random_state=RANDOM_SEED).split(X=df_train.q2, y=outputs)
# oof = np.zeros((len(df_train),1))
# all_pred = np.zeros(shape=(len(df_train), 2)) # 分类任务
all_pred = np.zeros(shape=(len(df_train))) # 回归任务
all_true = np.zeros(shape=(len(df_train)))
for fold, (train_idx, valid_idx) in enumerate(gkf):
# for fold, (train_idx, valid_idx) in enumerate(skf):
logger.info(f'Fold No. {fold}')
train_inputs = [inputs[i][train_idx] for i in range(len(inputs))]
train_outputs = outputs[train_idx]
train_qa_id = df_train[['id', 'id_sub', 'label']].iloc[train_idx]
# ===============================================================
# 通过反向翻译进行样本增强(只增强正样本)
# 获得训练集样本的(id, id_sub)
# train_id_set = set([f'{x[0]},{x[1]}' for x in df_train.iloc[train_idx][['id', 'id_sub']].to_numpy()])
# # 从增强样本中找出训练集中出现的样本
# mask = df_train_ex[['id', 'id_sub']].apply(lambda x: f'{x["id"]},{x["id_sub"]}' in train_id_set, axis=1)
# df_train_fold = df_train_ex[mask]
# 获得训练集样本的(id, id_sub)
# train_id_set = set([f'{x[0]},{x[1]}' for x in df_train.iloc[train_idx][['id', 'id_sub']].to_numpy()])
# # 从增强样本中找出训练集中出现的样本
# mask = df_train_aug[['id', 'id_sub']].apply(lambda x: f'{x["id"]},{x["id_sub"]}' in train_id_set, axis=1)
# df_train_fold = df_train_aug[mask]
# train_inputs, train_inputs_overlap = compute_input_arrays(df_train_fold, input_categories, tokenizer, MAX_SEQUENCE_LENGTH)
# train_outputs = compute_output_arrays(df_train_fold, output_categories)
# df_train_fold = df_train.iloc[train_idx]
# train_q_aug = []
# for x in tqdm(df_train_fold['q1']):
# train_q_aug.append(eda_one(x))
# train_a_aug = []
# for x in tqdm(df_train_fold['q2']):
# train_a_aug.append(eda_one(x))
# df_train_fold = pd.DataFrame(data={'q1': train_q_aug, 'q2': train_a_aug})
# train_inputs, train_inputs_overlap = compute_input_arrays(df_train_fold, input_categories, tokenizer, MAX_SEQUENCE_LENGTH)
# train_outputs = compute_output_arrays(df_train_fold, output_categories)
# 添加安居客数据到训练集
# train_inputs = [np.concatenate([train_inputs[i], anjuke_inputs[i]], axis=0) for i in range(len(inputs))]
# train_outputs = np.concatenate([train_outputs, anjuke_outputs], axis=0)
# ================================================================
valid_inputs = [inputs[i][valid_idx] for i in range(len(inputs))]
valid_outputs = outputs[valid_idx]
valid_qa_id = df_train[['id', 'id_sub', 'label']].iloc[valid_idx]
train_set = HouseDataset(train_inputs, train_outputs, train_qa_id)
valid_set = HouseDataset(valid_inputs, valid_outputs, valid_qa_id)
# test_set = HouseDataset(test_inputs, np.zeros_like(test_inputs[0])) # 测试集没有标签
logger.info('Train set size: {}, valid set size {}'.format(
len(train_set), len(valid_set)))
train_loader = DataLoader(train_set,
batch_size=kwargs['batch_size'],
# shuffle=True # 如果使用分类训练,设为True
)
valid_loader = DataLoader(valid_set,
batch_size=kwargs['valid_batch_size'])
# test_loader = DataLoader(test_set,
# batch_size=512)
device = torch.device(f"cuda:{kwargs['device']}")
# model = BertForHouseQA().cuda(device)
model = torch.nn.DataParallel(
BertForHouseQA(), device_ids=[1, 2, 3]).cuda(device)
# 找到分数最高的checkpoint文件并加载
# best_score_ = max([float(x.name[len(MODEL_NAME)+1:-3]) for x in CHECKPOINT_PATH.iterdir() if x.is_file()])
# best_ckpt_path = CHECKPOINT_PATH/f'{MODEL_NAME}_{best_score_}.pt'
# ckpt = torch.load(best_ckpt_path)
# model.load_state_dict(ckpt['model_state_dict'])
# 加载point-wise模型,使用pair-wise继续训练
# 或者加载安居客模型
# =====================================================
# org_model = BertForHouseQA().cuda(device)
# time_str = '2020-11-18-12:49:44'
# org_ckpt_path = DATA_PATH / f"model_record/{MODEL_NAME}/{time_str}"
# org_ckpt_path = DATA_PATH / f'anjuke/model_record/{MODEL_NAME}/{time_str}'
# org_ckpt_paths = [x for x in org_ckpt_path.iterdir() if x.is_file() and x.suffix == '.pt']
# prefix = f'{MODEL_NAME}_'
# best_ckpt_path = [x for x in org_ckpt_paths if str(x.name).startswith(prefix)][0]
# ckpt = torch.load(best_ckpt_path)
# org_model.load_state_dict(ckpt['model_state_dict'])
# model = BertClsToReg(org_model).cuda(device)
# model = BertClsToCls(org_model).cuda(device)
# =====================================================
# List all modules inside the model.
logger.info('Model modules:')
for i, m in enumerate(model.named_children()):
logger.info('{} -> {}'.format(i, m))
# # Get the number of total parameters.
# total_params = sum(p.numel() for p in model.parameters())
# trainable_params = sum(p.numel()
# for p in model.parameters() if p.requires_grad)
# logger.info("Total params: {:,}".format(total_params))
# logger.info("Trainable params: {:,}".format(trainable_params))
# 使用HingeLoss
criterion = torch.nn.MarginRankingLoss(margin=1.0)
# criterion = torch.nn.MSELoss()
# criterion = torch.nn.CrossEntropyLoss()
# criterion_scl = SupConLoss(temperature=0.1, device=device)
# optimizer = torch.optim.Adam(
# model.parameters(), lr=kwargs['lr'], weight_decay=kwargs['weight_decay'])
optimizer = transformers.AdamW(
model.parameters(), lr=kwargs['lr'], weight_decay=kwargs['weight_decay'])
logger.info('Optimizer:')
logger.info(optimizer)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
# mode='min',
# patience=int(kwargs['patience']/2),
# verbose=True
# )
scheduler = transformers.get_cosine_schedule_with_warmup(optimizer,
num_warmup_steps=4,
num_training_steps=kwargs['epoch'])
# best_score = 0.0
stopper = EarlyStopping(patience=kwargs['patience'], mode='max')
ckpt_path = None
for epoch in range(kwargs['epoch']):
pass
# =======================Training===========================
# Set model to train mode.
model.train()
steps = int(np.ceil(len(train_set) / kwargs['batch_size']))
pbar = tqdm(desc='Epoch {}, loss {}'.format(epoch, 'NAN'),
total=steps)
for i, sample in enumerate(train_loader):
x, y = sample[0].cuda(device).long(
), sample[1].cuda(device).long()
optimizer.zero_grad()
feat, model_outputs = model(x) # [batch_size, 2]
# CrossEntropy
# loss = criterion(model_outputs, y)
# MSE
# loss = criterion(model_outputs, y.float().unsqueeze(-1))
# 使用 HingeLoss
train_qa_id_sub = sample[2].cpu().detach().numpy()
loss = get_hinge_loss(model_outputs, train_qa_id_sub, criterion)
# 使用SCL
# feat = F.normalize(feat, dim=-1).unsqueeze(1)
# scl = criterion_scl(feat, y)
# scl_weight = 0.3
# loss = (1-scl_weight)*loss + scl_weight*scl
# loss += scl
loss.backward()
optimizer.step()
pbar.set_description(
'Epoch {}, train loss {:.4f}'.format(epoch, loss.item()))
pbar.update()
pbar.close()
# =========================================================
# =======================Validation========================
# Set model to evaluation mode.
model.eval()
with torch.no_grad():
# Validation step
valid_loss = []
valid_pred = []
valid_true = []
steps = int(np.ceil(len(valid_set) /
kwargs['valid_batch_size']))
pbar = tqdm(desc='Validating', total=steps)
for i, sample in enumerate(valid_loader):
y_true_local = sample[1].numpy()
x, y_true = sample[0].cuda(
device).long(), sample[1].cuda(device).long()
feat, model_outputs = model(x)
# MSELoss
# loss = criterion(model_outputs, y_true.float().unsqueeze(-1)).cpu().detach().item()
# HingeLoss
valid_qa_id_sub = sample[2].cpu().detach().numpy()
loss = get_hinge_loss(model_outputs, valid_qa_id_sub, criterion).cpu().detach().item()
y_pred = model_outputs.cpu().detach().squeeze(-1).numpy()
# CrossEntropy
# loss = criterion(
# model_outputs, y_true).cpu().detach().item()
# y_pred = F.softmax(
# model_outputs.cpu().detach(), dim=1).numpy()
valid_loss.append(loss)
valid_pred.append(y_pred)
valid_true.append(y_true_local)
pbar.update()
pbar.close()
valid_loss = np.asarray(valid_loss).mean()
valid_pred = np.concatenate(valid_pred, axis=0)
valid_true = np.concatenate(valid_true, axis=0)
# 如果使用回归模型
valid_f1, thr = search_f1(valid_true, valid_pred)
logger.info("Epoch {}, valid loss {:.5f}, valid f1 {:.4f}".format(epoch, valid_loss, valid_f1))
# 如果使用分类模型
# valid_pred_label = np.argmax(valid_pred, axis=1)
# valid_auc = roc_auc_score(valid_true, valid_pred_label)
# valid_p, valid_r, valid_f1, _ = precision_recall_fscore_support(
# valid_true, valid_pred_label, average='binary')
# logger.info(
# "Epoch {}, valid loss {:.5f}, valid P {:.4f}, valid R {:.4f}, valid f1 {:.4f}, valid auc {:.4f}".format(
# epoch, valid_loss, valid_p, valid_r, valid_f1, valid_auc)
# )
# logger.info('Confusion Matrix: ')
# logger.info(confusion_matrix(y_true=valid_true,
# y_pred=valid_pred_label, normalize='all'))
# Apply ReduceLROnPlateau to the lr.
scheduler.step(valid_f1)
stop_flag, best_flag = stopper.step(valid_f1)
if best_flag:
# 删除之前保存的模型
if ckpt_path is not None:
ckpt_path.unlink()
ckpt_path = CHECKPOINT_PATH / \
f"{MODEL_NAME}_{fold}_{epoch}_{stopper.best_score}.pt"
# 保存目前的最佳模型
torch.save(
{
"model_name": "BertForHouseQA",
"epoch": epoch,
"valid_loss": valid_loss,
"valid_f1": valid_f1,
"model_state_dict": model.state_dict(),
"train_idx": train_idx,
"valid_idx": valid_idx,
"fold": fold,
# "optimizer_state_dict": optimizer.state_dict(),
"thr": thr
# 'scheduler_state_dict': scheduler.state_dict()
},
f=ckpt_path,
)
logger.info("A best score! Saved to checkpoints.")
# 保存每个验证折的预测值,用作最后整个训练集的f1评估
all_pred[valid_idx] = valid_pred
all_true[valid_idx] = valid_true
if stop_flag:
logger.info("Stop training due to early stopping.")
# 终止训练
break
# 保存每个验证折的预测值,用作最后整个训练集的f1评估
# oof[valid_idx] = valid_pred
# valid_f1, _ = search_f1(valid_outputs, valid_pred) # 寻找最佳分类阈值和f1 score
# print('Valid f1 score = ', valid_f1)
# ==========================================================
# 结束后,评估整个训练集
# CrossEntropy
# all_pred = np.argmax(all_pred, axis=1)
# all_auc = roc_auc_score(all_true, all_pred)
# all_p, all_r, all_f1, _ = precision_recall_fscore_support(
# all_true, all_pred, average='binary')
# logger.info(
# "all P {:.4f}, all R {:.4f}, all f1 {:.4f}, all auc {:.4f}".format(
# all_p, all_r, all_f1, all_auc)
# )
# logger.info('Confusion Matrix: ')
# logger.info(confusion_matrix(y_true=all_true,
# y_pred=all_pred, normalize='all'))
# MSELoss
all_f1, all_thr = search_f1(all_true, all_pred)
logger.info("All f1 {:.4f}, all thr {:.4f}".format(all_f1, all_thr))
return all_f1, CHECKPOINT_PATH
def predict_pytorch(**kwargs):
test_inputs = kwargs['test_inputs']
batch_size = kwargs['batch_size']
test_set = HouseDataset(
test_inputs, np.zeros_like(test_inputs[0])) # 测试集没有标签
test_loader = DataLoader(test_set,
batch_size=batch_size)
device = torch.device(f"cuda:{kwargs['device']}")
# model = BertForHouseQA().cuda(device)
model = torch.nn.DataParallel(
BertForHouseQA(), device_ids=[1, 2, 3]).cuda(device)
# model = BertClsToCls(model).cuda(device)
# model_name = kwargs['model_name']
# time_str = kwargs['time_str']
# checkpoint_path = DATA_PATH / f"model_record/{model_name}/{time_str}"
checkpoint_path = kwargs['checkpoint_path']
model_name = checkpoint_path.parent.name
ckpt_paths = [x for x in checkpoint_path.iterdir() if x.is_file()
and x.suffix == '.pt']
# 找出保存的模型在每个fold训练到的最大epoch
fold2epoch = defaultdict(int)
for path in ckpt_paths:
fold, epoch = str(path.name)[len(model_name)+1:].split('_')[:2]
fold = int(fold)
epoch = int(epoch)
if fold2epoch[fold] < epoch:
fold2epoch[fold] = epoch
test_preds = []
# 找到每个fold分数最高的checkpoint文件并加载
for fold, epoch in fold2epoch.items():
# pattern = f'^{model_name}_{fold}_{epoch}'
prefix = f'{model_name}_{fold}_{epoch}'
# best_ckpt_path = [re.match(pattern, str(x)) for x in ckpt_paths][0]
best_ckpt_path = [x for x in ckpt_paths if str(
x.name).startswith(prefix)][0]
ckpt = torch.load(best_ckpt_path)
model.load_state_dict(ckpt['model_state_dict'])
# =======================Prediction========================
# Set model to evaluation mode.
model.eval()
with torch.no_grad():
# Prediction step
test_pred = []
steps = int(np.ceil(len(test_set) / batch_size))
for i, sample in tqdm(enumerate(test_loader), desc='Predicting', total=steps):
# y_true_local = sample[1].numpy()
x, y_true = sample[0].cuda(
device).long(), sample[1].cuda(device).float()
feat, model_outputs = model(x)
# loss = criterion(model_outputs, y_true.unsqueeze(-1)).cpu().detach().item()
# y_pred = outputs.argmax(dim=1).cpu().numpy()
y_pred = model_outputs.cpu().detach().numpy()
test_pred.append(y_pred)
test_pred = np.concatenate(test_pred, axis=0)
test_preds.append(test_pred)
sub = np.average(test_preds, axis=0)
sub = np.argmax(sub, axis=1)
# sub = sub > best_t # 用该分类阈值来输出测试集
df_test['label'] = sub.astype(int)
# fpath = DATA_PATH / f"model_record/{MODEL_NAME}/{test_time_str}/submission_beike_0.7947.csv"
fpath = checkpoint_path / f"submission_beike_{kwargs['score']}.csv"
df_test[['id', 'id_sub', 'label']].to_csv(
fpath, index=False, header=None, sep='\t')
# return test_preds
# %%
# def create_model():
# q_id = tf.keras.layers.Input((MAX_SEQUENCE_LENGTH,), dtype=tf.int32)
# q_mask = tf.keras.layers.Input((MAX_SEQUENCE_LENGTH,), dtype=tf.int32)
# q_atn = tf.keras.layers.Input((MAX_SEQUENCE_LENGTH,), dtype=tf.int32)
# config = BertConfig.from_pretrained('./bert-base-chinese-config.json')
# config.output_hidden_states = False
# bert_model = TFBertModel.from_pretrained('./bert-base-chinese-tf_model.h5',
# config=config)
# q_embedding = bert_model(q_id, attention_mask=q_mask, token_type_ids=q_atn)[0]
# q = tf.keras.layers.GlobalAveragePooling1D()(q_embedding)
# a = tf.keras.layers.GlobalMaxPooling1D()(q_embedding)
# t = q_embedding[:,-1]
# e = q_embedding[:, 0]
# x = tf.keras.layers.Concatenate()([q, a, t, e])
# x = tf.keras.layers.Dropout(0.5)(x)
# x = tf.keras.layers.Dense(1, activation='sigmoid')(x)
# model = tf.keras.models.Model(inputs=[q_id, q_mask, q_atn], outputs=x)
# return model
# %%
# gkf = GroupKFold(n_splits=5).split(X=df_train.q2, groups=df_train.id)
# valid_preds = []
# test_preds = []
# oof = np.zeros((len(df_train),1))
# for fold, (train_idx, valid_idx) in enumerate(gkf):
# train_inputs = [inputs[i][train_idx] for i in range(len(inputs))]
# train_outputs = outputs[train_idx]
# valid_inputs = [inputs[i][valid_idx] for i in range(len(inputs))]
# valid_outputs = outputs[valid_idx]
# K.clear_session()
# model = create_model()
# optimizer = tf.keras.optimizers.Adam(learning_rate=2e-5)
# model.compile(loss='binary_crossentropy', optimizer=optimizer,metrics=[tf.keras.metrics.AUC()])
# model.fit(train_inputs, train_outputs, validation_data = (valid_inputs, valid_outputs), epochs=3, batch_size=64)
# oof_p = model.predict(valid_inputs, batch_size=512)
# oof[valid_idx] = oof_p
# valid_preds.append(oof_p)
# test_preds.append(model.predict(test_inputs, batch_size=512))
# f1,t = search_f1(valid_outputs, valid_preds[-1])
# print('validation score = ', f1)
# %%
# 整个训练集的f1评估,寻找最佳的分类阈值
# best_score, best_t = search_f1(outputs, oof)
# best_score = np.average(best_scores)
# logger.info(f'Best score: {best_score}')
# %%
# sub = np.average(test_preds, axis=0)
# sub = np.argmax(sub, axis=1)
# sub = sub > best_t # 用该分类阈值来输出测试集
# df_test['label'] = sub.astype(int)
# fpath = DATA_PATH / f"model_record/{MODEL_NAME}/{test_time_str}/submission_beike_0.7947.csv"
# fpath = DATA_PATH / f"model_record/{MODEL_NAME}/{test_time_str}/submission_beike_{score}.csv"
# df_test[['id','id_sub','label']].to_csv(fpath,index=False, header=None,sep='\t')
if __name__ == "__main__":
# ERNIE需要的初始学习率较高,参考https://github.com/ymcui/Chinese-BERT-wwm
# 由于BERT/BERT-wwm使用了维基百科数据进行训练,故它们对正式文本建模较好;
# 而ERNIE使用了额外的百度贴吧、知道等网络数据,它对非正式文本(例如微博等)建模有优势。
# 在长文本建模任务上,例如阅读理解、文档分类,BERT和BERT-wwm的效果较好。
# 使用ERNIE请删除.npy文件并重新生成,因为ERNIR的vocab和wwm系列模型不一样
# all_f1, checkpoint_path = train_pytorch(batch_size=128, valid_batch_size=512, epoch=15, lr=2e-5, weight_decay=1e-3,
# n_splits=10, patience=8, device=1, inputs=inputs,
# outputs=outputs, test_inputs=test_inputs)
checkpoint_path = DATA_PATH / f'model_record/{MODEL_NAME}/{TIME_STR}'
predict_pytorch(batch_size=1024, test_inputs=test_inputs, device=1, checkpoint_path=checkpoint_path, score=all_f1)
|
from pyrogram import Client, filters
import os, shutil
from creds import my
from telegraph import upload_file
import logging
logging.basicConfig(level=logging.INFO)
TGraph = Client(
"Image upload bot",
bot_token = my.BOT_TOKEN,
api_id = my.API_ID,
api_hash = my.API_HASH
)
@TGraph.on_message(filters.command("start"))
async def start(client, message):
await message.reply_text(f"<b>Hello {message.from_user.first_name}, My Name Is MeG Telegraph Bot 🥳\n\nI'm A <u>Telegraph Uploader Bot.</u>\n\nSend Me Any <u>Image</u>& I'll Upload It To Telegra.ph & Send You Back A Link\n\n🙂 Join & Support Us Via 👉 @MeGLeech.\n\n 🌟 Powered By @MeGBots</b>", True)
@TGraph.on_message(filters.command("help"))
async def help(client, message):
await message.reply_text(f"<b> 💁 Hey Its Not Tough To Ise Me...!!!\n\n Just Follow These Steps\n\n ▪️ Send Me Any Image (or) GIF (or) MP4 Below 5MB \n ▪️ Wait For To Generate Link For U\n\n 🌟 Powered By @MeGBots || @MeGLeech</b>", True)
@TGraph.on_message(filters.photo)
async def getimage(client, message):
tmp = os.path.join("downloads",str(message.chat.id))
if not os.path.isdir(tmp):
os.makedirs(tmp)
imgdir = tmp + "/" + str(message.message_id) +".jpg"
dwn = await message.reply_text("Downloading Please Wait...🤗", True)
await client.download_media(
message=message,
file_name=imgdir
)
await dwn.edit_text("Starting Upload...🤗")
try:
response = upload_file(imgdir)
except Exception as error:
await dwn.edit_text(f"Oops something went wrong\n{error}")
return
await dwn.edit_text(f"https://telegra.ph{response[0]}")
shutil.rmtree(tmp,ignore_errors=True)
TGraph.run()
|
import os
import torch
from torch import nn, optim
from l5kit.configs import load_config_data
from l5kit.data import LocalDataManager, ChunkedDataset
from l5kit.data import get_dataset_path
from l5kit.dataset import EgoDatasetVectorized
from l5kit.planning.vectorized.closed_loop_model import VectorizedUnrollModel
from l5kit.simulation.dataset import SimulationConfig
from l5kit.vectorization.vectorizer_builder import build_vectorizer
############################################################################################
def inspection(dataset_name="train_data_loader"):
########################################################################
# Load data and configurations
########################################################################
# set env variable for data
os.environ["L5KIT_DATA_FOLDER"], project_dir = get_dataset_path()
dm = LocalDataManager(None)
cfg = load_config_data(project_dir + "/scenario_generation/config.yaml")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# ===== INIT DATASET
train_zarr = ChunkedDataset(dm.require(cfg["train_data_loader"]["key"])).open()
vectorizer = build_vectorizer(cfg, dm)
train_dataset = EgoDatasetVectorized(cfg, train_zarr, vectorizer)
########################################################################
## Setup the simulator class to be used to unroll the scene
########################################################################
# ==== DEFINE CLOSED-LOOP SIMULATION
num_simulation_steps = 50
sim_cfg = SimulationConfig(use_ego_gt=False, use_agents_gt=False, disable_new_agents=True,
distance_th_far=500, distance_th_close=50, num_simulation_steps=num_simulation_steps,
start_frame_index=0, show_info=True)
weights_scaling = [1.0, 1.0, 1.0]
_num_predicted_frames = cfg["model_params"]["future_num_frames"]
_num_predicted_params = len(weights_scaling)
model = VectorizedUnrollModel(
history_num_frames_ego=cfg["model_params"]["history_num_frames_ego"],
history_num_frames_agents=cfg["model_params"]["history_num_frames_agents"],
num_targets=_num_predicted_params * _num_predicted_frames,
weights_scaling=weights_scaling,
criterion=nn.L1Loss(reduction="none"),
global_head_dropout=cfg["model_params"]["global_head_dropout"],
disable_other_agents=cfg["model_params"]["disable_other_agents"],
disable_map=cfg["model_params"]["disable_map"],
disable_lane_boundaries=cfg["model_params"]["disable_lane_boundaries"],
detach_unroll=cfg["model_params"]["detach_unroll"],
warmup_num_frames=cfg["model_params"]["warmup_num_frames"],
discount_factor=cfg["model_params"]["discount_factor"],
)
train_cfg = cfg["train_data_loader"]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
model.train()
torch.set_grad_enabled(True)
scene_index = 3
scene_dataset = train_dataset.get_scene_dataset(scene_index)
pass
############################################################################################
if __name__ == "__main__":
inspection(dataset_name="train_data_loader")
|
# -*- coding: utf-8 -*-
"""
:author: Grey Li (李辉)
:url: http://greyli.com
:copyright: © 2018 Grey Li <withlihui@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import logging
import os
from logging.handlers import SMTPHandler, RotatingFileHandler
import click
from flask import Flask, render_template, request
from flask_login import login_required, current_user
from flask_sqlalchemy import get_debug_queries
from flask_wtf.csrf import CSRFError
from bluelog.blueprints.admin import admin_bp
from bluelog.blueprints.auth import auth_bp
from bluelog.blueprints.blog import blog_bp
from bluelog.extensions import bootstrap, db, login_manager, csrf, ckeditor, mail, moment, toolbar, migrate
from bluelog.models import Admin, Post, Category, Comment, Link
from bluelog.settings import config
basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def create_app(config_name=None):
if config_name is None:
config_name = os.getenv('FLASK_CONFIG', 'development')
app = Flask('bluelog')
app.config.from_object(config[config_name])
register_logging(app)
register_extensions(app)
register_blueprints(app)
register_commands(app)
register_errors(app)
register_shell_context(app)
register_template_context(app)
register_request_handlers(app)
return app
def register_logging(app):
class RequestFormatter(logging.Formatter):
def format(self, record):
record.url = request.url
record.remote_addr = request.remote_addr
return super(RequestFormatter, self).format(record)
request_formatter = RequestFormatter(
'[%(asctime)s] %(remote_addr)s requested %(url)s\n'
'%(levelname)s in %(module)s: %(message)s'
)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = RotatingFileHandler(os.path.join(basedir, 'logs/bluelog.log'),
maxBytes=10 * 1024 * 1024, backupCount=10)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
mail_handler = SMTPHandler(
mailhost=app.config['MAIL_SERVER'],
fromaddr=app.config['MAIL_USERNAME'],
toaddrs=['ADMIN_EMAIL'],
subject='Bluelog Application Error',
credentials=(app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']))
mail_handler.setLevel(logging.ERROR)
mail_handler.setFormatter(request_formatter)
if not app.debug:
app.logger.addHandler(mail_handler)
app.logger.addHandler(file_handler)
logging.basicConfig(level=logging.INFO)
def register_extensions(app):
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
csrf.init_app(app)
ckeditor.init_app(app)
ckeditor
mail.init_app(app)
moment.init_app(app)
toolbar.init_app(app)
migrate.init_app(app, db)
def register_blueprints(app):
app.register_blueprint(blog_bp)
app.register_blueprint(admin_bp, url_prefix='/admin')
app.register_blueprint(auth_bp, url_prefix='/auth')
def register_shell_context(app):
@app.shell_context_processor
def make_shell_context():
return dict(db=db, Admin=Admin, Post=Post, Category=Category, Comment=Comment)
def register_template_context(app):
@app.context_processor
def make_template_context():
admin = Admin.query.first()
categories = Category.query.order_by(Category.name).all()
links = Link.query.order_by(Link.name).all()
if current_user.is_authenticated:
unread_comments = Comment.query.filter_by(reviewed=False).count()
else:
unread_comments = None
if not current_user.is_authenticated:
for category in categories:
category.posts = Post.query.filter(Post.is_private == False ).\
filter(Post.category_id == category.id).all()
return dict(
admin=admin, categories=categories,
links=links, unread_comments=unread_comments)
def register_errors(app):
@app.errorhandler(400)
def bad_request(e):
return render_template('errors/400.html'), 400
@app.errorhandler(404)
def page_not_found(e):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('errors/500.html'), 500
@app.errorhandler(CSRFError)
def handle_csrf_error(e):
return render_template('errors/400.html', description=e.description), 400
def register_commands(app):
@app.cli.command()
@click.option('--drop', is_flag=True, help='Create after drop.')
def initdb(drop):
"""Initialize the database."""
if drop:
click.confirm('This operation will delete the database, do you want to continue?', abort=True)
db.drop_all()
click.echo('Drop tables.')
db.create_all()
click.echo('Initialized database.')
@app.cli.command()
@click.option('--username', prompt=True, help='The username used to login.')
@click.option('--password', prompt=True, hide_input=True,
confirmation_prompt=True, help='The password used to login.')
def init(username, password):
"""Building Bluelog, just for you."""
click.echo('Initializing the database...')
db.create_all()
admin = Admin.query.first()
if admin is not None:
click.echo('The administrator already exists, updating...')
admin.username = username
admin.set_password(password)
else:
click.echo('Creating the temporary administrator account...')
admin = Admin(
username=username,
blog_title='Bluelog',
blog_sub_title="No, I'm the real thing.",
name='Admin',
about='Anything about you.'
)
admin.set_password(password)
db.session.add(admin)
category = Category.query.first()
if category is None:
click.echo('Creating the default category...')
category = Category(name='Default')
db.session.add(category)
db.session.commit()
click.echo('Done.')
@app.cli.command()
@click.option('--category', default=10, help='Quantity of categories, default is 10.')
@click.option('--post', default=50, help='Quantity of posts, default is 50.')
@click.option('--comment', default=500, help='Quantity of comments, default is 500.')
def forge(category, post, comment):
"""Generate fake data."""
from bluelog.fakes import fake_admin, fake_categories, fake_posts, fake_comments, fake_links
db.drop_all()
db.create_all()
click.echo('Generating the administrator...')
fake_admin()
click.echo('Generating %d categories...' % category)
fake_categories(category)
click.echo('Generating %d posts...' % post)
fake_posts(post)
click.echo('Generating %d comments...' % comment)
fake_comments(comment)
click.echo('Generating links...')
fake_links()
click.echo('Done.')
def register_request_handlers(app):
@app.after_request
def query_profiler(response):
for q in get_debug_queries():
if q.duration >= app.config['BLUELOG_SLOW_QUERY_THRESHOLD']:
app.logger.warning(
'Slow query: Duration: %fs\n Context: %s\nQuery: %s\n '
% (q.duration, q.context, q.statement)
)
return response
@app.before_request
def before_request():
if not current_user.is_authenticated:
ip = request.remote_addr
client = request.headers['User-Agent']
app.logger.info("IP: {0} Client: {1}".format(ip, client))
|
import os
from copy import deepcopy
from core import string_to_component
from _constants import *
from plotting_settings import *
from plotting_settings import plotting_parameters_GUI
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
png_directory = os.path.join(os.path.dirname(__file__), ".graphics")
try:
os.mkdir(png_directory)
except FileExistsError:
pass
dpi = 300
class DummyCircuit(object):
def __init__(self):
self._pp = plotting_parameters_GUI
def generate_icon(comp, hover=False, selected=False):
pp = deepcopy(plotting_parameters_GUI)
comp._node_minus_plot = '0,0'
comp._node_plus_plot = '1,0'
comp._set_plot_coordinates()
comp._circuit = DummyCircuit()
xs, ys, line_type = comp._draw()
fig = plt.figure(figsize=(1, 0.5))
ax = fig.add_subplot(111)
ax.set_axis_off()
plt.margins(x=0., y=0.)
ax.set_ylim(-0.25, 0.25)
ax.set_xlim(0., 1.)
plt.subplots_adjust(left=0., right=1., top=1., bottom=0.)
rect_args = pp['rect_args']
rect_kwargs = pp['rect_kwargs']
if hover and selected:
pp['W']['lw'] += pp['hover_increment']
pp['C']['lw'] += pp['hover_increment']
pp['L']['lw'] += pp['hover_increment']
pp['R']['lw'] += pp['hover_increment']
pp['J']['lw'] += pp['hover_increment']
pp['D']['lw'] += pp['hover_increment']
state_string = '_hover_selected'
ax.add_patch(Rectangle(*rect_args, edgecolor=blue, **rect_kwargs))
elif hover:
pp['W']['lw'] += pp['hover_increment']
pp['C']['lw'] += pp['hover_increment']
pp['L']['lw'] += pp['hover_increment']
pp['R']['lw'] += pp['hover_increment']
pp['J']['lw'] += pp['hover_increment']
pp['D']['lw'] += pp['hover_increment']
state_string = '_hover'
# ax.add_patch(Rectangle(*rect_args,edgecolor=lighter_blue,**rect_kwargs))
elif selected:
state_string = '_selected'
ax.add_patch(
Rectangle(*rect_args, edgecolor=light_blue, **rect_kwargs))
else:
state_string = ''
for i in range(len(xs)):
ax.plot(xs[i], ys[i], color=pp["color"], lw=pp[line_type[i]]['lw'])
fig.savefig(
os.path.join(png_directory,
comp.__class__.__name__ + state_string + '.png'),
transparent=True,
dpi=dpi)
fig.savefig(
os.path.join(png_directory,
comp.__class__.__name__ + state_string + '.jpg'),
transparent=True,
dpi=dpi)
plt.close()
for el in ['R', 'C', 'L', 'J', 'G']:
generate_icon(string_to_component(el, None, None, ''))
generate_icon(string_to_component(el, None, None, ''), hover=True)
generate_icon(string_to_component(el, None, None, ''), selected=True)
generate_icon(
string_to_component(el, None, None, ''), hover=True, selected=True) |
from brazilnum.cnpj import format_cnpj
from brazilnum.cpf import format_cpf
from django.contrib.postgres.search import SearchQuery, SearchRank
from django.db.models import F
from django.utils.safestring import mark_safe
from jarbas.chamber_of_deputies.models import Reimbursement
from jarbas.dashboard.admin import list_filters, widgets
from jarbas.dashboard.admin.paginators import CachedCountPaginator
from jarbas.dashboard.admin.subquotas import Subquotas
from jarbas.public_admin.admin import PublicAdminModelAdmin
from jarbas.public_admin.sites import public_admin
ALL_FIELDS = sorted(Reimbursement._meta.fields, key=lambda f: f.verbose_name)
CUSTOM_WIDGETS = ('receipt_url', 'subquota_description', 'suspicions')
READONLY_FIELDS = (f.name for f in ALL_FIELDS if f.name not in CUSTOM_WIDGETS)
class ReimbursementModelAdmin(PublicAdminModelAdmin):
list_display = (
'short_document_id',
'jarbas',
'rosies_tweet',
'receipt_link',
'congressperson_name',
'year',
'subquota_translated',
'supplier_info',
'value',
'suspicious'
)
search_fields = ('search_vector',)
list_filter = (
list_filters.SuspiciousListFilter,
list_filters.HasReceiptFilter,
list_filters.StateListFilter,
list_filters.YearListFilter,
list_filters.MonthListFilter,
list_filters.DocumentTypeListFilter,
list_filters.SubquotaListFilter,
)
fields = tuple(f.name for f in ALL_FIELDS)
readonly_fields = tuple(READONLY_FIELDS)
list_select_related = ('tweet',)
paginator = CachedCountPaginator
def _format_document(self, obj):
if obj.cnpj_cpf:
if len(obj.cnpj_cpf) == 14:
return format_cnpj(obj.cnpj_cpf)
if len(obj.cnpj_cpf) == 11:
return format_cpf(obj.cnpj_cpf)
return obj.cnpj_cpf
def supplier_info(self, obj):
return mark_safe(f'{obj.supplier}<br>{self._format_document(obj)}')
supplier_info.short_description = 'Fornecedor'
def jarbas(self, obj):
base_url = '/layers/#/documentId/{}/'
url = base_url.format(obj.document_id)
image_src = '/static/favicon/favicon-16x16.png'
image = '<img alt="Ver no Jarbas" src="{}">'.format(image_src)
return mark_safe('<a href="{}">{}</a>'.format(url, image))
jarbas.short_description = ''
def rosies_tweet(self, obj):
try:
return mark_safe('<a href="{}">🤖</a>'.format(obj.tweet.get_url()))
except Reimbursement.tweet.RelatedObjectDoesNotExist:
return ''
rosies_tweet.short_description = ''
def receipt_link(self, obj):
if not obj.receipt_url:
return ''
return mark_safe(f'<a target="_blank" href="{obj.receipt_url}">📃</a>')
receipt_link.short_description = ''
def suspicious(self, obj):
return obj.suspicions is not None
suspicious.short_description = 'suspeito'
suspicious.boolean = True
def has_receipt_url(self, obj):
return obj.receipt_url is not None
has_receipt_url.short_description = 'recibo'
has_receipt_url.boolean = True
def value(self, obj):
return 'R$ {:.2f}'.format(obj.total_net_value).replace('.', ',')
value.short_description = 'valor'
value.admin_order_field = 'total_net_value'
def short_document_id(self, obj):
return obj.document_id
short_document_id.short_description = 'Reembolso'
def subquota_translated(self, obj):
return Subquotas.pt_br(obj.subquota_description)
def get_object(self, request, object_id, from_field=None):
obj = super().get_object(request, object_id, from_field)
if obj and not obj.receipt_fetched:
obj.get_receipt_url()
return obj
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name in CUSTOM_WIDGETS:
custom_widgets = dict(
subquota_description=widgets.SubquotaWidget,
receipt_url=widgets.ReceiptUrlWidget,
suspicions=widgets.SuspiciousWidget
)
kwargs['widget'] = custom_widgets.get(db_field.name)
return super().formfield_for_dbfield(db_field, **kwargs)
def get_search_results(self, request, queryset, search_term):
queryset, distinct = super(ReimbursementModelAdmin, self) \
.get_search_results(request, queryset, None)
if search_term:
query = SearchQuery(search_term, config='portuguese')
rank = SearchRank(F('search_vector'), query)
queryset = queryset.annotate(rank=rank).filter(search_vector=query)
if not queryset.was_ordered():
queryset.order_by('-rank')
return queryset, distinct
public_admin.register(Reimbursement, ReimbursementModelAdmin)
|
#!/usr/bin/env python
from sqlalchemy import create_engine
from sqlalchemy import MetaData, Column, Table, PrimaryKeyConstraint
from sqlalchemy import String, DateTime, Float
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import datetime
Base = declarative_base()
metadata = MetaData()
engine = create_engine('postgresql:///siki', echo=False)
class pdata(Base):
__table__ = Table('pdata', Base.metadata, autoload=True,
autoload_with=engine)
session = sessionmaker()
session.configure(bind=engine)
# session and ORM
s = session()
# html
print ("""<!DOCTYPE html>
<html>
<header>
<title>Python postgres table to html</title>
</header>
<body>
<table>
""")
for row in s.query(pdata).filter(pdata.id.in_(['1', '2'])).order_by(pdata.id).all():
print ("<tr><td>%s</td><td>%.3f</td><td>%.3f</td><td>%s</td></tr>" % (row.id, row.easting, row.northing, row.d.strftime('%Y-%m-%d %H:%m')))
print ("""</table>
</body>
</html>
""")
s.close()
|
from mgetool.tool import tt
from mgetool.tool import tt
from sklearn.datasets import load_boston
from fastgplearn.skflow import SymbolicRegressor as FSR
from gplearn.genetic import SymbolicRegressor as SR
from bgp.skflow import SymbolLearning
x, y = load_boston(return_X_y=True)
sr1 = FSR(population_size=10000, generations=10, stopping_criteria=0.95,
store=False, p_mutate=0.2, p_crossover=0.5, select_method="tournament",
tournament_size=5, hall_of_fame=3, store_of_fame=50,
constant_range=None, constants=None, depth=(2, 5),
function_set=('add',"sub","mul","sin"), n_jobs=1, verbose=True,
random_state=0, method_backend='p_numpy', func_p=None,
# sci_template="default")
sci_template=None)
sr2 = SR(population_size=10000, generations=10, stopping_criteria=0.95, p_crossover=0.5,
tournament_size=5, function_set=('add', 'sub', 'mul', 'div'), n_jobs=8, verbose=True, random_state=0,)
sr3 = SymbolLearning(loop="MultiMutateLoop", pop=10000, gen=10, random_state=0,add_coef=False,n_jobs=4)
tt.t
sr1.fit(x, y)
tt.t
sr1.top_n(5)
# sr2.fit(x, y)
# tt.t
# sr3.fit(x, y)
# tt.t
tt.p
|
import streamlit as st
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import load_model
import streamlit as st
st.set_page_config(
page_title='Find your X-men',
page_icon='icon.png'
)
st.image('icon.png')
st.subheader("Upload an image and find which X-men character it contains")
st.markdown('''
Currently this app best recognises 10 characters:-
Angel,
Beast,
Cyclops,
Iceman,
Magneto,
Mystique,
Phoenix,
Professor X,
Storm and
Wolverine
But feel free to upload any fun images you like.
''')
st.set_option('deprecation.showfileUploaderEncoding', False)
# @st.cache(allow_output_mutation=True)
# def loading_model():
# model=tf.keras.models.load_model("final.h5")
# return model
# with st.spinner('Model is being loaded..'):
model=load_model("final.h5")
# import cv2
from PIL import Image, ImageOps
import numpy as np
def import_and_predict(image_data, model):
size = (256,256)
image = ImageOps.fit(image_data, size, Image.ANTIALIAS)
# img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#img_resize = (cv2.resize(img, dsize=(75, 75), interpolation=cv2.INTER_CUBIC))/255.
image = image.convert("RGB").resize((256, 256))
image = np.asarray(image)
img_reshape = image[np.newaxis,...]
prediction = model.predict(img_reshape)
return prediction
file = st.file_uploader("Please upload an image file", type=["jpg", "png"])
class_btn = st.button("CLASSIFY!!")
if file is not None:
image = Image.open(file)
st.image(image, caption='Uploaded Image', use_column_width=True)
if class_btn:
if file is None:
st.text("Please upload an image file")
else:
prediction = import_and_predict(image, model)
class_names=['Angel','Beast','Cyclops', 'Iceman', 'Magneto', 'Mystique', 'Phoenix', 'Professor X', 'Storm', 'Wolverine']
string="This image is most likely: "+class_names[np.argmax(prediction)]
st.success(string)
st.markdown('''
This model uses Transfer learning from the ResNet50 model to classify the images into classes.
''')
st.image('bottom.jpg')
st.markdown('''
*Built with :heart: by [Pranjal Singh](https://github.com/Pranjal198).*
*If you like the project do star and share the repository on [GitHub](https://github.com/Pranjal198/X-men-classifier) !*
''')
|
from foo.bar import Grill
class FooBarExample:
def make_grill(self, grill):
"""
:rtype: Grill
"""
return Grill()
|
################################################################################################
# #
# G code interpreter and executer for 2D CNC laser engraver using Raspberry Pi #
# Xiang Zhai, Oct 1, 2013 #
# zxzhaixiang at gmail.com #
# Improved and updated for 3D support by dionys rosario #
# dionyself at gmail.com #
# #
################################################################################################
import RPi.GPIO as GPIO
import motor_control
from motor import Motor
import time
from numpy import pi, sin, cos, sqrt, arccos, arcsin
filename = 'Xiang.nc' # file name of the G code commands
GPIO.setmode(GPIO.BOARD)
Laser_switch = 15
GPIO.setup(Laser_switch, GPIO.OUT)
GPIO.output(Laser_switch, False)
def XYposition(lines):
# given a movement command line, return the X Y position
xchar_loc = lines.index('X')
i = xchar_loc+1
while (47 < ord(lines[i]) < 58) | (lines[i] == '.') | (lines[i] == '-'):
i += 1
x_pos = float(lines[xchar_loc+1:i])
ychar_loc = lines.index('Y')
i = ychar_loc+1
while (47 < ord(lines[i]) < 58) | (lines[i] == '.') | (lines[i] == '-'):
i += 1
y_pos = float(lines[ychar_loc + 1:i])
return x_pos, y_pos
def IJposition(lines):
# given a G02 or G03 movement command line, return the I J position
ichar_loc = lines.index('I')
i = ichar_loc + 1
while (47 < ord(lines[i]) < 58) | (lines[i] == '.') | (lines[i] == '-'):
i += 1
i_pos = float(lines[ichar_loc+1:i])
jchar_loc = lines.index('J')
i = jchar_loc+1
while (47 < ord(lines[i]) < 58) | (lines[i] == '.') | (lines[i] == '-'):
i+=1
j_pos = float(lines[jchar_loc+1:i])
return i_pos, j_pos
def moveto(MX, x_pos, dx, MY, y_pos, dy, speed, engraving):
"""Move to (x_pos,y_pos) (in real unit)"""
stepx = int(round(x_pos/dx))-MX.position
stepy = int(round(y_pos/dy))-MY.position
Total_step = sqrt((stepx**2+stepy**2))
if Total_step > 0:
if lines[0:3] == 'G0 ': # fast movement
print 'No Laser, fast movement: Dx=', stepx, ' Dy=', stepy
Motor_control.Motor_Step(MX, stepx, MY, stepy, 50)
else:
print 'Laser on, movement: Dx=', stepx, ' Dy=', stepy
Motor_control.Motor_Step(MX, stepx, MY, stepy, speed)
return 0
def print_gcode_file(filename, **config):
global Laser_switch
MX = Motor(23, 22, 24, 26) # pin number for a1,a2,b1,b2. a1 and a2 form coil A b1 and b2 form coil B
MY = Motor(11, 7, 5, 3)
MZ = Motor(11, 7, 5, 3) # experimental 3d support by Diony
dx = congig.get('dx', 0.075) # resolution in x direction. Unit: mm
dy = congig.get('dy', 0.075) # resolution in y direction. Unit: mm
Engraving_speed = congig.get('speed', 0.1) # unit=mm/sec=0.04in/sec
speed=Engraving_speed/min(dx,dy) #step/sec
try:#read and execute G code
with open(filename,'r') as gcode_file:
## validategcode(filename, gcode_file)
for lines in gcode_file:
if lines==[]:
1 #blank lines
elif lines[0:3]=='G90':
print 'start'
elif lines[0:3]=='G20':# working in inch
dx/=25.4
dy/=25.4
print 'Working in inch'
elif lines[0:3]=='G21':# working in mm
print 'Working in mm'
elif lines[0:3]=='M05':
GPIO.output(Laser_switch,False)
print 'Laser turned off'
elif lines[0:3]=='M03':
GPIO.output(Laser_switch,True)
print 'Laser turned on'
elif lines[0:3]=='M02':
GPIO.output(Laser_switch,False)
print 'finished. shuting down'
break
elif (lines[0:3]=='G1F')|(lines[0:4]=='G1 F'):
1#do nothing
elif (lines[0:3]=='G0 ')|(lines[0:3]=='G1 ')|(lines[0:3]=='G01'): #|(lines[0:3]=='G02')|(lines[0:3]=='G03'):
#linear engraving movement
if (lines[0:3]=='G0 '):
engraving=False
else:
engraving=True
[x_pos,y_pos]=XYposition(lines)
moveto(MX,x_pos,dx,MY,y_pos,dy,speed,engraving)
elif (lines[0:3]=='G02')|(lines[0:3]=='G03'): #circular interpolation
old_x_pos=x_pos
old_y_pos=y_pos
[x_pos,y_pos]=XYposition(lines)
[i_pos,j_pos]=IJposition(lines)
xcenter=old_x_pos+i_pos #center of the circle for interpolation
ycenter=old_y_pos+j_pos
Dx=x_pos-xcenter
Dy=y_pos-ycenter #vector [Dx,Dy] points from the circle center to the new position
r=sqrt(i_pos**2+j_pos**2) # radius of the circle
e1=[-i_pos,-j_pos] #pointing from center to current position
if (lines[0:3]=='G02'): #clockwise
e2=[e1[1],-e1[0]] #perpendicular to e1. e2 and e1 forms x-y system (clockwise)
else: #counterclockwise
e2=[-e1[1],e1[0]] #perpendicular to e1. e1 and e2 forms x-y system (counterclockwise)
#[Dx,Dy]=e1*cos(theta)+e2*sin(theta), theta is the open angle
costheta=(Dx*e1[0]+Dy*e1[1])/r**2
sintheta=(Dx*e2[0]+Dy*e2[1])/r**2 #theta is the angule spanned by the circular interpolation curve
if costheta>1: # there will always be some numerical errors! Make sure abs(costheta)<=1
costheta=1
elif costheta<-1:
costheta=-1
theta=arccos(costheta)
if sintheta<0:
theta=2.0*pi-theta
no_step=int(round(r*theta/dx/5.0)) # number of point for the circular interpolation
for i in range(1,no_step+1):
tmp_theta=i*theta/no_step
tmp_x_pos=xcenter+e1[0]*cos(tmp_theta)+e2[0]*sin(tmp_theta)
tmp_y_pos=ycenter+e1[1]*cos(tmp_theta)+e2[1]*sin(tmp_theta)
moveto(MX,tmp_x_pos,dx,MY, tmp_y_pos,dy,speed,True)
except KeyboardInterrupt:
pass
finally:
GPIO.output(Laser_switch,False) # turn off laser
moveto(MX,0,dx,MY,0,dy,50,False) # move back to Origin
MX.unhold()
MY.unhold()
GPIO.cleanup()
|
class Stoplight:
def __init__(self, color):
self.color = color
sl = Stoplight("Green")
print sl.color
|
from fastapi import FastAPI, HTTPException
from fastapi.params import Depends
from crawler_api.crawlers import COURTS
from crawler_api.models.requests import LegalProcess
from crawler_api.models.response import LegalProcessDetailResponse, Message
from crawler_api.session import HttpAsyncSession
app = FastAPI(
title='Legal Process Crawler',
description=(
'It is a simple API to get legal process detail on TJAL or TJMS website '
'and convert the search result HTML to JSON'
)
)
http_async_session = HttpAsyncSession()
@app.on_event("startup")
def startup():
http_async_session.start()
@app.on_event("shutdown")
async def shutdown_event():
await http_async_session.stop()
@app.post(
"/legal-process",
response_model=LegalProcessDetailResponse,
description='Get Legal Process detail',
responses={404: {"model": Message}}
)
async def show_legal_process_detail(
legal_process: LegalProcess,
session: HttpAsyncSession = Depends(http_async_session)
) -> LegalProcessDetailResponse:
try:
crawler = COURTS[legal_process.court](session)
except KeyError:
raise HTTPException(status_code=422, detail="Crawler not implemented")
result = tuple(await crawler.execute(number=legal_process.number))
if not result:
raise HTTPException(status_code=404, detail="Legal Process not found")
return LegalProcessDetailResponse(degrees=result)
|
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import defusedxml_local.defusedxml.sax
import sys
from shared.instruction import Instruction, Operand, GroupOperand, Asm
import shared.builder_utils as builder_utils
import copy
from shared.builder_exception import BuilderException
# Needed to create class to interpret parsed XML data
from xml.sax.handler import ContentHandler
def asm_op_index(op):
if op.find("op") == 0:
return int(op[2:])
else:
return 0
class InstructionFileHandler(ContentHandler, object):
def __init__(self, file_path, instr_file):
super().__init__()
self.instructionFile = instr_file
self.filePath = file_path
self.nameStack = list()
self.currentInstruction = None
self.currentGroupOperand = None
self.currentOperand = None
self.currentChars = None
self.currentAsm = None
def startDocument(self):
print('Starting parsing "%s" ...' % self.filePath)
def endDocument(self):
print("End parsing.")
def startElement(self, name, attrs):
self.nameStack.append(name)
self.currentChars = ""
if name in ["I", "O", "asm"]:
self.__getattribute__("start_" + name)(attrs)
def endElement(self, name):
self.nameStack.pop()
if name in ["I", "O", "asm"]:
self.__getattribute__("end_" + name)()
def characters(self, content):
self.currentChars += content
def start_I(self, attrs):
self.currentInstruction = Instruction()
names = attrs.getNames()
for name in names:
if name in [
"name",
"form",
"isa",
"group",
"aliasing",
"extension",
]:
setattr(self.currentInstruction, name, attrs[name])
elif name == "class":
self.currentInstruction.iclass = attrs[name]
else:
print("WARNING: not handled instruction attribute : %s" % name)
def end_I(self):
if self.currentInstruction:
self.instructionFile.add_instruction(self.currentInstruction)
def create_operand(self, opr_type):
if opr_type in [
"Group",
"Branch",
"AuthBranch",
"LoadStore",
"AuthLoadStore",
"ALU",
"DataProcessing",
"CacheOp",
"SystemOp",
]:
self.currentGroupOperand = GroupOperand()
return self.currentGroupOperand
else:
return Operand()
def start_O(self, attrs):
self.currentOperand = self.create_operand(attrs["type"])
names = attrs.getNames()
my_attr_list = [
"name",
"type",
"bits",
"value",
"reserved",
"access",
"choices",
"choices2",
"choices3",
"exclude",
"differ",
"slave",
"layout-type",
"layout-multiple",
"reg-count",
"reg-index-alignment",
"elem-width",
"uop-param-type",
"sizeType",
]
for name in names:
if name in my_attr_list:
setattr(self.currentOperand, name, attrs[name])
elif name == "class":
self.currentOperand.oclass = attrs[name]
else:
self.currentOperand.set_extra_attribute(name, attrs[name])
if not self.currentOperand.bits:
self.currentOperand.width = 0
else:
self.currentOperand.width = builder_utils.get_bits_size(self.currentOperand.bits)
def end_O(self):
if self.currentOperand:
if self.currentGroupOperand:
if self.currentGroupOperand == self.currentOperand:
self.currentInstruction.add_operand(self.currentGroupOperand)
self.currentGroupOperand = None
else:
self.currentGroupOperand.add_operand(self.currentOperand)
else:
self.currentInstruction.add_operand(self.currentOperand)
elif self.currentGroupOperand:
self.currentInstruction.add_operand(self.currentGroupOperand)
self.currentGroupOperand = None
self.currentOperand = None
def start_asm(self, attrs):
self.currentAsm = Asm()
names = sorted(attrs.getNames(), key=lambda x: asm_op_index(x))
for name in names:
if name.find("op") == 0:
self.currentAsm.ops.append(attrs[name])
elif name == "format":
self.currentAsm.format = attrs["format"]
else:
print("WARNING: not handled asm attribute : %s" % name)
def end_asm(self):
self.currentInstruction.asm = self.currentAsm
self.currentAsm = None
class InstructionFileParser(object):
def __init__(self, instr_file):
self.instrFile = instr_file
def parse(self, file_path):
ifile_handler = InstructionFileHandler(file_path, self.instrFile)
try:
defusedxml_local.defusedxml.sax.parse(file_path, ifile_handler)
except BaseException:
e_type, e_value, e_tb = sys.exc_info()
import traceback
traceback.print_exception(e_type, e_value, e_tb)
loc = ifile_handler._locator
print(
'Parsing error, file "%s", line %d, column %d'
% (file_path, loc.getLineNumber(), loc.getColumnNumber())
)
sys.exit(1)
|
import hfo
class Goalie:
def __init__(self):
pass
|
#
# Copyright 2014 ARM Limited and Contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of ARM Limited nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
'''change the copy right year
there are 4 cases
1 2014 => 2014
2 20xx => 20xx-14
3 20xx-bb => 20xx-14
4 20xx-14 => 20xx-14
'''
import re, os, sys
import fileinput
import argparse
def extend_copyright(line, year):
'''year is the year which you want to extend to'''
#match the format like 'Copyright 2014 ARM Limited' or 'Copyright 2011-14 ARM Limited'
p2014 = re.compile(r'.*{}.*'.format(year))
if p2014.match(line):
return line
#match the format like 'Copyright 2011-12 ARM Limited'
p20xx_bb = re.compile(r'(.*)(20\d\d)(-)(\d\d)(.*)')
m = p20xx_bb.match(line)
if m:
return p20xx_bb.sub(r'\g<1>\g<2>\g<3>{}\g<5>'.format(year), line)
#match the format like 'Copyright 2012 ARM Limited'
p20xx = re.compile(r'(.*)(20\d\d)(.*)')
m = p20xx.match(line)
if m:
return p20xx.sub(r'\g<1>\g<2>-{}\g<3>'.format(year), line)
def replace_line(file,search_exp,replace_exp):
for line in fileinput.input(file, inplace=1):
if search_exp in line:
line = line.replace(search_exp, replace_exp)
sys.stdout.write(line)
def test():
year = '14'
if extend_copyright('Copyright 2011-12 ARM Limited', year) != 'Copyright 2011-14 ARM Limited':
print "test failed"
return
if extend_copyright('Copyright 2013-14 ARM Limited', year) != 'Copyright 2013-14 ARM Limited':
print "test failed"
return
if extend_copyright('Copyright 2012 ARM Limited', year) != 'Copyright 2012-14 ARM Limited':
print "test failed"
return
if extend_copyright('Copyright 2014 ARM Limited', year) != 'Copyright 2014 ARM Limited':
print "test failed"
return
print "test success."
def extend_copyright_all(extend_to_year):
all_files = []
for root, dirs, files in os.walk(os.getcwd()):
for f in files:
#exclude this script file
if f != os.path.basename(sys.argv[0]):
all_files.append(os.path.join(root, f))
pcopy_right = re.compile(r'.*Copyright [0-9-]* ARM Limited.*')
for f in all_files:
fd = open(f, 'r')
for line in fd.readlines():
m = pcopy_right.match(line)
if m:
old_line = m.group(0)
new_line = extend_copyright(old_line, extend_to_year)
fd.close()
replace_line(f, old_line, new_line)
def main():
parser = argparse.ArgumentParser(description='Extend copyright year to the year you specified.')
parser.add_argument('year', nargs='?', help='year you want to extend, only 2 digitals, e.g.\'14\'')
parser.add_argument('-t', '--test', action='store_true', help='run the test')
args = parser.parse_args()
if args.test:
test()
return
else:
#check input year includes 2 digitals
pdigital2 = re.compile(r'^\d\d$')
if args.year and pdigital2.search(args.year):
extend_copyright_all(args.year)
else:
parser.print_help()
if __name__ == '__main__':
main()
|
from pandas import DataFrame
from shutil import rmtree
from math import isclose
from glob import glob
import chemw
import re, os
def test_inits():
# import the class modules
chem_mw = chemw.ChemMW()
phreeq_db = chemw.PHREEQdb()
print(os.getcwd())
for TF in [chem_mw.verbose, chem_mw.final, chem_mw.end, phreeq_db.verbose]:
assert type(TF) is bool
rmtree(phreeq_db.output_path)
def test_accuracy():
# calculate the MW for chemicals of known MW
test_chemicals = {
'Na2.43_Cl_(OH)2_(OH)1.2_(OH)': 162.7,
'Na2.43Cl(Ca(OH)2)1.2':180.2,
'Na2.43Cl:2H2O': 127.3,
'Na2.43Cl2.5:2H2O': 180.5,
'CaCl2:(MgCl2)2:12H2O': 517.6,
'Na2SO4:3K2SO4': 664.8,
'K2SO4:CaSO4:H2O': 328.4,
'Na.96Al.96Si2.04O6:H2O ': 219.2,
'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O': 714.4
}
# calculate the MW for the dictionary of chemicals
chem_mw = chemw.ChemMW()
for chemical in test_chemicals:
chem_mw.mass(chemical)
tolerance = chem_mw.mw*0.001 # 99.9% accuracy
if not isclose(chem_mw.raw_mw, test_chemicals[chemical], rel_tol = tolerance):
assert False
else:
assert True
# affirm that iterated entities are zero
for zero in [chem_mw.groups, chem_mw.layer, chem_mw.skip_characters]:
assert zero == 0
def test_phreeq_db():
# process the PHREEQ databases
phreeq_databases = [db for db in glob('databases/*.dat')]
# output_path = os.path.join(os.path.dirname(__file__), 'PHREEQqb')
phreeq_db = chemw.PHREEQdb()
for db in phreeq_databases:
print('\n\n\n', re.search('([A-Za-z0-9_\.]+(?=\.dat))',db).group(), 'database\n', '='*len(db))
phreeq_db.process(db)
# verify the output folder and its contents
for db in phreeq_databases:
json_name = re.search('([A-Za-z0-9_\.]+(?=\.dat))', db).group()+'.json'
assert os.path.exists(os.path.join(phreeq_db.output_path, json_name))
assert type(phreeq_db.db_name) is str
assert type(phreeq_db.db) is DataFrame
# delete the directory
rmtree(phreeq_db.output_path) |
"""
Set the min/max for each panel based on input data
"""
from typing import Dict
from ....tk.arrayTK import minmax
from ..readFileOrList import readFileOrList
import numpy
def set_panel_minmax(params):
# type: (Dict) -> Dict
"""See the min/max for each panel
Args:
params (dict): plotting parameter dictionary
Returns:
an updated parameter dictionary
"""
params['internal']['panel']['minmax'] = dict()
p_minmax = params['internal']['panel']['minmax']
def update(old, new):
return {'x': min(old['x'], new['x']), 'y': max(old['y'], new['y'])}
def aux(new_minmax_dict, panel_id):
if panel_id in p_minmax:
return update(new_minmax_dict, p_minmax[panel_id])
else:
return new_minmax_dict
for p in params['data']:
panel_id = p['which_panel']
if p['file'] is None and p['values'] is None:
continue
else:
data = readFileOrList(p['file'], p['values'], p['skip_rows'])
row_count, column_count = numpy.shape(data)
if row_count == 0 or column_count == 0:
continue
elif column_count == 1:
print(p['file'])
print("data.shape", data.shape)
mm = [
[0, row_count - 1],
minmax(data[:, 0])
]
else:
mm = minmax(data[:, 0], data[:, 1])
p_minmax[panel_id] = aux({'x': mm[0], 'y': mm[1]}, panel_id)
return params
|
import time
import cv2
import numpy as np
def run():
filename = '../../dataset/Hands/Hand_0000083.jpg'
img = cv2.imread(filename)
# small = cv2.resize(img, (0, 0), fx=0.9, fy=0.9)
small = img
gray = cv2.cvtColor(small, cv2.COLOR_BGR2GRAY)
# subpixel - further refinement
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 3, 0.04)
dst = cv2.dilate(dst, None)
ret, dst = cv2.threshold(dst, 0.01 * dst.max(), 255, 0)
dst = np.uint8(dst)
# find centroids
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)
# define the criteria to stop and refine the corners
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners = cv2.cornerSubPix(gray, np.float32(centroids), (5, 5), (-1, -1), criteria)
# draw the centroids
res = np.hstack((centroids, corners))
res = np.int0(res)
img[res[:, 1], res[:, 0]] = [0, 0, 255]
img[res[:, 3], res[:, 2]] = [0, 255, 0]
cv2.imwrite('out/subpixel' + str(time.time()) + '.png', img)
|
#!/dark/usr/anaconda/bin/python
#/usr/bin/env python
import sys,os,cgi,string,glob
os.environ['HOME']='../tmp/'
from socket import gethostname, gethostbyname
ip = gethostbyname(gethostname())
import urllib,urllib2
hostname=gethostname()
if hostname in ['engs-MacBook-Pro-4.local','valenti-macbook.physics.ucsb.edu','valenti-mbp-2',\
'svalenti-lcogt.local','svalenti-lcogt.lco.gtn','valenti-mbp-2.lco.gtn',\
'valenti-mbp-2.attlocal.net','dhcp43168.physics.ucdavis.edu',\
'valenti-MacBook-Pro-2.local']:
sys.path.append('/Users/svalenti/lib/python2.7/site-packages/')
location='SV'
elif hostname in ['dark']:
sys.path.append('/dark/hal/lib/python2.7/site-packages/')
location='dark'
else:
location='deneb'
sys.path.append('/home/cv21/lib/python2.7/site-packages/')
from numpy import array
import scipy
import pyfits,os,glob
import matplotlib
#matplotlib.use('Agg')
import numpy as np
from pylab import *
form = cgi.FieldStorage()
SN = form.getlist('SN')
directory = form.getlist('directory')
if not SN:
SN=['ttMrk1048_20140907_None_2014-09-07.fits']
directory=['/dark/hal/public_html/AGNKEY/test/']
print "Content-Type: text/html\n"
print '<html><body>'
#print str(directory[0])+str(SN[0])
fi=str(directory[0])+str(SN[0])
spec = pyfits.open(fi)
head = spec[0].header
graf=1
if spec[0].data.ndim == 1: fl = spec[0].data
elif spec[0].data.ndim == 2: fl = spec[0].data[:,0]
elif spec[0].data.ndim == 3: fl = spec[0].data[0,0,:]
naxis1 = head['naxis1']
try:
crpix1 = head['crpix1']
crval1 = head['crval1']
try: cdelt1 = head['cdelt1']
except: cdelt1 = head['cd1_1']
pix = array(range(1,naxis1+1,1))
pix = array(range(1,len(fl)+1,1))
lam = (pix-crpix1)*cdelt1+crval1
except:
try:
WAT= head['WAT2_001']
pix = array(range(1,naxis1+1,1))
crpix1=string.split(string.split(WAT,'"')[1])[0]
crval1=string.split(string.split(WAT,'"')[1])[3]
cdelt1=string.split(string.split(WAT,'"')[1])[4]
lam = (pix-float(crpix1))*float(cdelt1)+float(crval1)
except:
graf=0
if graf:
massimo=np.sort(fl)[len(fl)-len(fl)/10]
minimo=np.sort(fl)[len(fl)/10]
delta=(massimo-minimo)/10
fileoutput='../tmp/pippo.png'
titlin=SN[0]
xlabel('Angstrom')
ylabel('Flux')
title(' '+str(titlin)+'')
plot(lam,fl)
ylim(minimo-4*delta,massimo+5*delta)
savefig(fileoutput, format='png')
if graf:
print '<img src="'+fileoutput+'" alt="" height="500" width="800">'
else:
print '<h2> ERROR: problem to read the fits </h2>'
print '</body></html>'
|
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from ..cifi import analyzeObservations
from .create_test_data import createTestDataSet
MIN_OBS = range(5, 10)
def test_analyzeObservations_noClasses():
### Test analyzeObservations when no truth classes are given
# Create test data
for min_obs in MIN_OBS:
# Generate test data set
observations_test, all_truths_test, linkage_members_test, all_linkages_test, summary_test = createTestDataSet(
min_obs,
5,
20
)
# Build the all_truths and summary data frames
all_truths, findable_observations, summary = analyzeObservations(
observations_test,
min_obs=min_obs,
classes=None
)
# Assert equality among the returned columns
assert_frame_equal(all_truths, all_truths_test[["truth", "num_obs", "findable"]])
assert_frame_equal(summary, summary_test[summary_test["class"] == "All"][["class", "num_members", "num_obs", "findable"]])
return
def test_analyzeObservations_withClassesColumn():
### Test analyzeObservations when a class column is given
# Create test data
for min_obs in MIN_OBS:
# Generate test data set
observations_test, all_truths_test, linkage_members_test, all_linkages_test, summary_test = createTestDataSet(
min_obs,
5,
20
)
# Build the all_truths and summary data frames
all_truths, findable_observations, summary = analyzeObservations(
observations_test,
min_obs=min_obs,
classes="class"
)
# Assert equality among the returned columns
assert_frame_equal(all_truths, all_truths_test[["truth", "num_obs", "findable"]])
assert_frame_equal(summary, summary_test[["class", "num_members", "num_obs", "findable"]])
return
def test_analyzeObservations_withClassesDictionary():
### Test analyzeObservations when a class dictionary is given
# Create test data
for min_obs in MIN_OBS:
# Generate test data set
observations_test, all_truths_test, linkage_members_test, all_linkages_test, summary_test = createTestDataSet(
min_obs,
5,
20
)
classes = {}
for c in ["blue", "red", "green"]:
classes[c] = observations_test[observations_test["truth"].str.contains(c)]["truth"].unique()
# Build the all_truths and summary data frames
all_truths, findable_observations, summary = analyzeObservations(
observations_test,
min_obs=min_obs,
classes=classes
)
# Assert equality among the returned columns
assert_frame_equal(all_truths, all_truths_test[["truth", "num_obs", "findable"]])
assert_frame_equal(summary, summary_test[["class", "num_members", "num_obs", "findable"]])
return
def test_analyzeObservations_noObservations():
### Test analyzeObservations when the observations data frame is empty
observations_test, all_truths_test, linkage_members_test, all_linkages_test, summary_test = createTestDataSet(
5,
5,
20
)
observations_test = observations_test.drop(observations_test.index)
with pytest.raises(ValueError):
# Build the all_truths and summary data frames
all_truths, findable_observations, summary = analyzeObservations(
observations_test,
min_obs=5,
classes=None
)
return
def test_analyzeObservations_errors():
### Test analyzeObservations the metric is incorrectly defined
observations_test, all_truths_test, linkage_members_test, all_linkages_test, summary_test = createTestDataSet(
5,
5,
20
)
with pytest.raises(ValueError):
# Build the all_truths and summary data frames
all_truths, findable_observations, summary = analyzeObservations(
observations_test,
min_obs=5,
metric="wrong_metric",
classes=None
)
return
def test_analyzeObservations_metrics():
### Test analyzeObservations with built in metrics (this only tests that no errors are raised when calling them)
### actual metric tests are in test_metrics.py
column_mapping={
"obs_id" : "obs_id",
"truth" : "truth",
"time" : "time",
"night": "night"
}
# Create test data
observations_test, all_truths_test, linkage_members_test, all_linkages_test, summary_test = createTestDataSet(
5,
5,
20)
observations_test["night"] = np.arange(0, len(observations_test))
observations_test["time"] = np.arange(0, len(observations_test))
# Build the all_truths and summary data frames and make sure no metric errors are returned
all_truths, findable_observations, summary = analyzeObservations(
observations_test,
metric="min_obs",
min_obs=5,
classes=None,
column_mapping=column_mapping
)
# Build the all_truths and summary data frames and make sure no metric errors are returned
all_truths, findable_observations, summary = analyzeObservations(
observations_test,
metric="nightly_linkages",
linkage_min_obs=1,
max_obs_separation=10,
min_linkage_nights=1,
classes=None,
column_mapping=column_mapping
)
return
def test_analyzeObservations_customMetric():
### Test analyzeObservations when a custom metric is given
def _customMetric(observations, min_observations=5, column_mapping={}):
# Same as minObs metric, just testing if a custom made function can be sent to analyzeObservations
object_num_obs = observations[column_mapping["truth"]].value_counts().to_frame("num_obs")
object_num_obs = object_num_obs[object_num_obs["num_obs"] >= min_obs]
findable_objects = object_num_obs.index.values
findable_observations = observations[observations[column_mapping["truth"]].isin(findable_objects)]
findable = findable_observations.groupby(by=[column_mapping["truth"]])[column_mapping["obs_id"]].apply(np.array).to_frame("obs_ids")
findable.reset_index(
inplace=True,
drop=False
)
return findable
# Create test data
for min_obs in MIN_OBS:
# Generate test data set
observations_test, all_truths_test, linkage_members_test, all_linkages_test, summary_test = createTestDataSet(
min_obs,
5,
20
)
# Build the all_truths and summary data frames
all_truths, findable_observations, summary = analyzeObservations(
observations_test,
metric=_customMetric,
min_observations=min_obs,
classes=None
)
# Assert equality among the returned columns
assert_frame_equal(all_truths, all_truths_test[["truth", "num_obs", "findable"]])
assert_frame_equal(summary, summary_test[summary_test["class"] == "All"][["class", "num_members", "num_obs", "findable"]])
return
|
from collections import Counter
def friends_of_friend_ids_bad(user):
#"foaf" acrônimo friend of a friend
return [foaf["id"]
for friend in user["friends"] #para cada amigo de usuário
for foaf in friend["friends"]] #para acada _their_friends
def not_the_same(user, other_user):
# dois usuários não são os mesmos se possuírem ids diferentes
return user["id"] != other_user["id"]
def not_friends(user, other_user):
return all(not_the_same(friend,other_user) for friend in user["friends"])
def friends_of_friend_ids(user):
return Counter(foaf["id"]
for friend in user["friends"] # para cada um de meus amigos
for foaf in friend["friends"] # que contam *their* amigos
if not_the_same(user, foaf) # que não sejam eu
and not_friends(user, foaf)) # e que não são meus amigos |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import datetime
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('coffee', '0002_auto_20140903_2207'),
]
operations = [
migrations.CreateModel(
name='PointComment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('comment', models.TextField(null=True, blank=True)),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RoastProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('date', models.DateTimeField(default=datetime.datetime(2014, 9, 2, 22, 49, 5, 228766))),
('coffee', models.ForeignKey(to='coffee.Coffee', null=True)),
],
options={
'verbose_name': 'Roast Profile',
'verbose_name_plural': 'Roast Profiles',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TempPoint',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('temperature', models.CharField(default='212.0', max_length=255)),
('time', models.PositiveIntegerField()),
('roast_profile', models.ForeignKey(to='profiling.RoastProfile')),
],
options={
'verbose_name': 'Temperature Point',
'verbose_name_plural': 'Temperature Points',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='pointcomment',
name='point',
field=models.ForeignKey(blank=True, to='profiling.TempPoint', null=True),
preserve_default=True,
),
]
|
import datetime
from pymongo import MongoClient
from bson import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
from flask_restful import abort
client = MongoClient(host="localhost", port=27017)
db = client.blog
User = db.user
Post = db.post
Comment = db.comment
def create_user(username, password=None, is_admin=False):
if list(User.find({"username": username})):
abort(400)
if not password:
password = 123456
user = {
"username": username.replace(" ", ""),
"password": generate_password_hash(str(password)),
"nickname": username,
"avatar": "http://q1.qlogo.cn/g?b=qq&nk=286183317&s=640",
"is_admin": is_admin
}
return retrieve_user(User.insert_one(user).inserted_id)
def retrieve_user(id):
result = list(User.find({"_id": ObjectId(id)}))
if not result:
abort(404)
info = result[0]
info["_id"] = str(info["_id"])
return info
def retrieve_all_user():
users = list(db.user.find())
for index in range(len(users)):
users[index]["_id"] = str(users[index]["_id"])
return users
def update_user(id, **kwargs):
result = list(User.find({"_id": ObjectId(id)}))
if not result:
abort(404)
info = result[0]
new_info = info.copy()
for k, v in kwargs.items():
if v:
new_info[k] = v
return retrieve_user(id)
def create_post(title: str, body: str = None, category: str = None, tags: list = None):
if not body:
body = ""
if not category:
category = ""
if not tags:
tags = []
post = {
"title": title,
"description": body[0:min(50, len(body))],
"body": body,
"pubdate": datetime.datetime.utcnow(),
"category": category,
"tags": tags
}
return retrieve_post(Post.insert_one(post).inserted_id)
def retrieve_post(id):
result = list(Post.find({"_id": ObjectId(id)}))
if not result:
abort(404)
data = result[0]
data["_id"] = str(data["_id"])
data["pubdate"] = str(data["pubdate"])
data.pop("description")
return data
def retrieve_all_post():
posts = list(Post.find())
for index in range(len(posts)):
posts[index]["_id"] = str(posts[index]["_id"])
posts[index]["pubdate"] = str(posts[index]["pubdate"])
posts[index].pop("body")
return posts
def update_post(id, **kwargs):
result = list(Post.find({"_id": ObjectId(id)}))
if not result:
abort(404)
data = result[0]
new_data = data.copy()
for k, v in kwargs.items():
if v:
new_data[k] = v
return retrieve_post(id)
def delete_post(ids):
if isinstance(ids, list):
for id in ids:
Post.delete_one({"_id": ObjectId(id)})
else:
id = ids
Post.delete_one({"_id": ObjectId(id)})
return True
def create_comment(content, name=None, site=None):
if not name:
name = "无名氏"
if not site:
site = ""
comment = {
"name": name,
"site": site,
"content": content,
"pubdate": datetime.datetime.utcnow(),
}
return retrieve_comment(Comment.insert_one(comment).inserted_id)
def retrieve_comment(id):
result = list(Comment.find({"_id": ObjectId(id)}))
if not result:
abort(404)
data = result[0]
data["_id"] = str(data["_id"])
data["pubdate"] = str(data["pubdate"])
return data
def retrieve_all_comment():
comments = list(Comment.find())
for index in range(len(comments)):
comments[index]["_id"] = str(comments[index]["_id"])
comments[index]["pubdate"] = str(comments[index]["pubdate"])
return comments
def delete_comment(ids):
if isinstance(ids, list):
for id in ids:
Comment.delete_one({"_id": ObjectId(id)})
else:
id = ids
Comment.delete_one({"_id": ObjectId(id)})
return True
|
import unittest
import math
from batchlib.util.elisa_results_parser import ElisaResultsParser
class TestElisaResultsParser(unittest.TestCase):
def test_elisa_results_parsing(self):
elisa_results_parser = ElisaResultsParser()
results = elisa_results_parser.get_elisa_values('C14d', test_names=['ELISA IgG', 'ELISA IgA'])
assert results[0] == 0.96
assert results[1] == 0.39
results = elisa_results_parser.get_elisa_values('C23i', test_names=['ELISA IgG', 'ELISA IgA'])
assert results[0] == 6.36
assert results[1] == 6.16
results = elisa_results_parser.get_elisa_values('Z24', test_names=['ELISA IgG', 'ELISA IgA'])
assert results[0] == 0.05
assert results[1] is None
results = elisa_results_parser.get_elisa_values('A9', test_names=['ELISA IgG', 'ELISA IgA'])
assert results[0] == 0.14
assert results[1] == 0.12
results = elisa_results_parser.get_elisa_values('P12', test_names=['ELISA IgG', 'ELISA IgA'])
assert results[0] == 7.81
assert results[1] == 11
results = elisa_results_parser.get_elisa_values('3-0010 K', test_names=['ELISA IgG', 'ELISA IgA'])
assert results[0] == 0.24
assert results[1] is None
results = elisa_results_parser.get_elisa_values('K97', test_names=['IF IgG', 'IF IgA', 'Roche', 'Abbot', 'Luminex'])
assert results[0] == 1.07
assert results[1] == 1.13
assert results[2] == 'pos'
assert results[3] == 'neg'
assert results[4] == 'neg'
results = elisa_results_parser.get_elisa_values('P21', test_names=['ELISA IgG', 'ELISA IgA'])
assert results[0] == 2.14
assert results[1] == 2.08
results = elisa_results_parser.get_elisa_values('C2b', test_names=['ELISA IgG', 'ELISA IgA', 'days_after_onset'])
assert results[0] == 1.68
assert results[1] == 4.36
assert results[2] == 12
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
try:
from IPython.html.widgets import DOMWidget
from IPython.utils.traitlets import Unicode, Int, Bool
except Exception as exp:
# Init dummy objects needed to import this module withour errors.
# These are all overwritten with imports from IPython (on success)
DOMWidget = object
Unicode = Int = Float = Bool = lambda *args, **kwargs: None
available, testable, why_not, which = False, False, str(exp), None
else:
available, testable, why_not, which = True, False, None, None
from vispy.app.backends._ipynb_util import create_glir_message
from vispy.app import Timer
# ---------------------------------------------------------- IPython Widget ---
def _stop_timers(canvas):
"""Stop all timers in a canvas."""
for attr in dir(canvas):
try:
attr_obj = getattr(canvas, attr)
except NotImplementedError:
# This try/except is needed because canvas.position raises
# an error (it is not implemented in this backend).
attr_obj = None
if isinstance(attr_obj, Timer):
attr_obj.stop()
class VispyWidget(DOMWidget):
_view_name = Unicode("VispyView", sync=True)
_view_module = Unicode('/nbextensions/vispy/webgl-backend.js', sync=True)
#height/width of the widget is managed by IPython.
#it's a string and can be anything valid in CSS.
#here we only manage the size of the viewport.
width = Int(sync=True)
height = Int(sync=True)
resizable = Bool(value=True, sync=True)
def __init__(self, **kwargs):
super(VispyWidget, self).__init__(**kwargs)
self.on_msg(self.events_received)
self.canvas = None
self.canvas_backend = None
self.gen_event = None
def set_canvas(self, canvas):
self.width, self.height = canvas._backend._default_size
self.canvas = canvas
self.canvas_backend = self.canvas._backend
self.canvas_backend.set_widget(self)
self.gen_event = self.canvas_backend._gen_event
#setup the backend widget then.
def events_received(self, _, msg):
if msg['msg_type'] == 'init':
self.canvas_backend._reinit_widget()
elif msg['msg_type'] == 'events':
events = msg['contents']
for ev in events:
self.gen_event(ev)
elif msg['msg_type'] == 'status':
if msg['contents'] == 'removed':
# Stop all timers associated to the widget.
_stop_timers(self.canvas_backend._vispy_canvas)
def send_glir_commands(self, commands):
# TODO: check whether binary websocket is available (ipython >= 3)
# Until IPython 3.0 is released, use base64.
array_serialization = 'base64'
# array_serialization = 'binary'
if array_serialization == 'base64':
msg = create_glir_message(commands, 'base64')
msg['array_serialization'] = 'base64'
self.send(msg)
elif array_serialization == 'binary':
msg = create_glir_message(commands, 'binary')
msg['array_serialization'] = 'binary'
# Remove the buffers from the JSON message: they will be sent
# independently via binary WebSocket.
buffers = msg.pop('buffers')
self.comm.send({"method": "custom", "content": msg},
buffers=buffers)
|
AUTOCFG_TEMPLATE = r"""
PROJECT_NAME = "{project_name}"
OUTPUT_DIRECTORY = {output_dir}
GENERATE_LATEX = NO
GENERATE_MAN = NO
GENERATE_RTF = NO
CASE_SENSE_NAMES = NO
INPUT = {input}
ENABLE_PREPROCESSING = YES
QUIET = YES
JAVADOC_AUTOBRIEF = YES
JAVADOC_AUTOBRIEF = NO
GENERATE_HTML = NO
GENERATE_XML = YES
ALIASES = "rst=\verbatim embed:rst"
ALIASES += "endrst=\endverbatim"
""".strip()
class DoxygenProcessHandle(object):
def __init__(self, path_handler, run_process, write_file):
self.path_handler = path_handler
self.run_process = run_process
self.write_file = write_file
def process(self, auto_project_info, files):
name = auto_project_info.name()
cfgfile = "%s.cfg" % name
full_paths = map(lambda x: auto_project_info.abs_path_to_source_file(x), files)
cfg = AUTOCFG_TEMPLATE.format(
project_name=name,
output_dir=name,
input=" ".join(full_paths)
)
build_dir = self.path_handler.join(
auto_project_info.build_dir(),
"breathe",
"doxygen"
)
self.write_file(build_dir, cfgfile, cfg)
self.run_process(['doxygen', cfgfile], cwd=build_dir)
return self.path_handler.join(build_dir, name, "xml")
|
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from distutils.version import StrictVersion
import tensorflow as tf
from PIL import Image
import numpy as np
import cv2
# Check the version of the TensorFlow
if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in ['num_detections',
'detection_boxes',
'detection_scores',
'detection_classes',
'detection_masks']:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
def detection(path_to_frozen_model,
path_to_labels,
path_to_images,
path_to_results,
min_score_thresh = 0.2
):
"""
:param path_to_frozen_model: Path to frozen detection graph. This is the actual model that is used for the object detection.
:param path_to_labels: List of the strings that is used to add correct label for each box.
:param test_images_path: The image which you want to process
:param min_score_thresh: The least confidence level
:param path_to_results: The result of the detection
:return: A processed image
"""
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(path_to_frozen_model, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
category_index = label_map_util.create_category_index_from_labelmap(path_to_labels, use_display_name=True)
for image_path in [path_to_images]:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
result = vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks= output_dict.get('detection_masks'),
use_normalized_coordinates= True,
line_thickness= 2,
min_score_thresh= min_score_thresh)
cv2.imwrite(path_to_results, result)
return output_dict, path_to_results
|
"""
Test various functions regarding chapter 8: MDI, MDA, SFI importance.
"""
import os
import unittest
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, accuracy_score
from mlfinlab.util.volatility import get_daily_vol
from mlfinlab.filters.filters import cusum_filter
from mlfinlab.labeling.labeling import get_events, add_vertical_barrier, get_bins
from mlfinlab.sampling.bootstrapping import get_ind_mat_label_uniqueness, get_ind_matrix
from mlfinlab.ensemble.sb_bagging import SequentiallyBootstrappedBaggingClassifier
from mlfinlab.feature_importance.importance import (feature_importance_mean_decrease_impurity,
feature_importance_mean_decrease_accuracy, feature_importance_sfi,
plot_feature_importance)
from mlfinlab.feature_importance.orthogonal import feature_pca_analysis, get_orthogonal_features
from mlfinlab.cross_validation.cross_validation import PurgedKFold, ml_cross_val_score
# pylint: disable=invalid-name
def _generate_label_with_prob(x, prob, random_state=np.random.RandomState(1)):
"""
Generates true label value with some probability(prob)
"""
choice = random_state.choice([0, 1], p=[1 - prob, prob])
if choice == 1:
return x
return int(not x)
def _get_synthetic_samples(ind_mat, good_samples_thresh, bad_samples_thresh):
"""
Get samples with uniqueness either > good_samples_thresh or uniqueness < bad_samples_thresh
"""
# Get mix of samples where some of them are extremely non-overlapping, the other one are highly overlapping
i = 0
unique_samples = []
for label in get_ind_mat_label_uniqueness(ind_mat):
if np.mean(label[label > 0]) > good_samples_thresh or np.mean(label[label > 0]) < bad_samples_thresh:
unique_samples.append(i)
i += 1
return unique_samples
class TestFeatureImportance(unittest.TestCase):
"""
Test Feature importance
"""
def setUp(self):
"""
Set the file path for the sample dollar bars data and get triple barrier events, generate features
"""
project_path = os.path.dirname(__file__)
self.path = project_path + '/test_data/dollar_bar_sample.csv'
self.data = pd.read_csv(self.path, index_col='date_time')
self.data.index = pd.to_datetime(self.data.index)
# Compute moving averages
self.data['fast_mavg'] = self.data['close'].rolling(window=20, min_periods=20,
center=False).mean()
self.data['slow_mavg'] = self.data['close'].rolling(window=50, min_periods=50,
center=False).mean()
# Compute sides
self.data['side'] = np.nan
long_signals = self.data['fast_mavg'] >= self.data['slow_mavg']
short_signals = self.data['fast_mavg'] < self.data['slow_mavg']
self.data.loc[long_signals, 'side'] = 1
self.data.loc[short_signals, 'side'] = -1
# Remove Look ahead bias by lagging the signal
self.data['side'] = self.data['side'].shift(1)
daily_vol = get_daily_vol(close=self.data['close'], lookback=50) * 0.5
cusum_events = cusum_filter(self.data['close'], threshold=0.005)
vertical_barriers = add_vertical_barrier(t_events=cusum_events, close=self.data['close'],
num_hours=2)
meta_labeled_events = get_events(close=self.data['close'],
t_events=cusum_events,
pt_sl=[1, 4],
target=daily_vol,
min_ret=5e-5,
num_threads=3,
vertical_barrier_times=vertical_barriers,
side_prediction=self.data['side'])
meta_labeled_events.dropna(inplace=True)
labels = get_bins(meta_labeled_events, self.data['close'])
# Generate data set which shows the power of SB Bagging vs Standard Bagging
ind_mat = get_ind_matrix(meta_labeled_events.t1, self.data.close)
unique_samples = _get_synthetic_samples(ind_mat, 0.5, 0.1)
X = self.data.loc[labels.index,].iloc[unique_samples].dropna() # get synthetic data set with drawn samples
labels = labels.loc[X.index, :]
X.loc[labels.index, 'y'] = labels.bin
# Generate features (some of them are informative, others are just noise)
for index, value in X.y.iteritems():
X.loc[index, 'label_prob_0.6'] = _generate_label_with_prob(value, 0.6)
X.loc[index, 'label_prob_0.5'] = _generate_label_with_prob(value, 0.5)
X.loc[index, 'label_prob_0.3'] = _generate_label_with_prob(value, 0.3)
X.loc[index, 'label_prob_0.2'] = _generate_label_with_prob(value, 0.2)
X.loc[index, 'label_prob_0.1'] = _generate_label_with_prob(value, 0.1)
features = ['label_prob_0.6', 'label_prob_0.2', 'label_prob_0.1'] # Two super-informative features
for prob in [0.5, 0.3, 0.2, 0.1]:
for window in [2, 5]:
X['label_prob_{}_sma_{}'.format(prob, window)] = X['label_prob_{}'.format(prob)].rolling(
window=window).mean()
features.append('label_prob_{}_sma_{}'.format(prob, window))
X.dropna(inplace=True)
y = X.pop('y')
self.X_train, self.X_test, self.y_train_clf, self.y_test_clf = train_test_split(X[features], y, test_size=0.4,
random_state=1, shuffle=False)
self.y_train_reg = (1 + self.y_train_clf)
self.y_test_reg = (1 + self.y_test_clf)
self.samples_info_sets = meta_labeled_events.loc[self.X_train.index, 't1']
self.price_bars_trim = self.data[
(self.data.index >= self.X_train.index.min()) & (self.data.index <= self.X_train.index.max())].close
def test_orthogonal_features(self):
"""
Test orthogonal features: PCA features, importance vs PCA importance analysis
"""
# Init classifiers
clf_base = RandomForestClassifier(n_estimators=1, criterion='entropy', bootstrap=False,
class_weight='balanced_subsample')
sb_clf = SequentiallyBootstrappedBaggingClassifier(base_estimator=clf_base, max_features=1.0, n_estimators=100,
samples_info_sets=self.samples_info_sets,
price_bars=self.price_bars_trim, oob_score=True,
random_state=1)
pca_features = get_orthogonal_features(self.X_train)
# PCA features should have mean of 0
self.assertAlmostEqual(np.mean(pca_features[:, 2]), 0, delta=1e-7)
self.assertAlmostEqual(np.mean(pca_features[:, 5]), 0, delta=1e-7)
self.assertAlmostEqual(np.mean(pca_features[:, 6]), 0, delta=1e-7)
# Check particular PCA values std
self.assertAlmostEqual(np.std(pca_features[:, 1]), 1.499, delta=0.2)
self.assertAlmostEqual(np.std(pca_features[:, 3]), 1.047, delta=0.2)
self.assertAlmostEqual(np.std(pca_features[:, 4]), 0.948, delta=0.2)
sb_clf.fit(self.X_train, self.y_train_clf)
mdi_feat_imp = feature_importance_mean_decrease_impurity(sb_clf, self.X_train.columns)
pca_corr_res = feature_pca_analysis(self.X_train, mdi_feat_imp)
# Check correlation metrics results
self.assertAlmostEqual(pca_corr_res['Weighted_Kendall_Rank'][0], 0.0677, delta=1e-1)
def test_feature_importance(self):
"""
Test features importance: MDI, MDA, SFI and plot function
"""
sb_clf, cv_gen = self._prepare_clf_data_set(oob_score=False)
# MDI feature importance
mdi_feat_imp = feature_importance_mean_decrease_impurity(sb_clf, self.X_train.columns)
# MDA feature importance
mda_feat_imp_log_loss = feature_importance_mean_decrease_accuracy(sb_clf, self.X_train, self.y_train_clf,
cv_gen,
sample_weight=np.ones(
(self.X_train.shape[0],)))
mda_feat_imp_f1 = feature_importance_mean_decrease_accuracy(sb_clf, self.X_train, self.y_train_clf,
cv_gen, scoring=f1_score)
# SFI feature importance
sfi_feat_imp_log_loss = feature_importance_sfi(sb_clf, self.X_train[self.X_train.columns[:5]], self.y_train_clf,
cv_gen=cv_gen, sample_weight=np.ones((self.X_train.shape[0],)))
sfi_feat_imp_f1 = feature_importance_sfi(sb_clf, self.X_train[self.X_train.columns[:5]], self.y_train_clf,
cv_gen=cv_gen,
scoring=f1_score) # Take only 5 features for faster test run
# MDI assertions
self.assertAlmostEqual(mdi_feat_imp['mean'].sum(), 1, delta=0.001)
# The most informative features
self.assertAlmostEqual(mdi_feat_imp.loc['label_prob_0.1', 'mean'], 0.19598, delta=0.01)
self.assertAlmostEqual(mdi_feat_imp.loc['label_prob_0.2', 'mean'], 0.164, delta=0.01)
# Noisy feature
self.assertAlmostEqual(mdi_feat_imp.loc['label_prob_0.1_sma_5', 'mean'], 0.08805, delta=0.01)
# MDA(log_loss) assertions
self.assertAlmostEqual(mda_feat_imp_log_loss.loc['label_prob_0.1', 'mean'], 0.23685, delta=10)
self.assertAlmostEqual(mda_feat_imp_log_loss.loc['label_prob_0.2', 'mean'], 0.3222, delta=10)
# MDA(f1) assertions
self.assertAlmostEqual(mda_feat_imp_f1.loc['label_prob_0.1', 'mean'], 0.25, delta=3)
self.assertAlmostEqual(mda_feat_imp_f1.loc['label_prob_0.2', 'mean'], 0.3, delta=3)
# SFI(log_loss) assertions
self.assertAlmostEqual(sfi_feat_imp_log_loss.loc['label_prob_0.1', 'mean'], -2.14, delta=1)
self.assertAlmostEqual(sfi_feat_imp_log_loss.loc['label_prob_0.2', 'mean'], -2.15, delta=1)
# SFI(accuracy) assertions
self.assertAlmostEqual(sfi_feat_imp_f1.loc['label_prob_0.1', 'mean'], 0.81, delta=1)
self.assertAlmostEqual(sfi_feat_imp_f1.loc['label_prob_0.2', 'mean'], 0.74, delta=1)
self.assertAlmostEqual(sfi_feat_imp_f1.loc['label_prob_0.5_sma_2', 'mean'], 0.224, delta=1)
def test_plot_feature_importance(self):
"""
Test plot_feature_importance function
"""
sb_clf, cv_gen = self._prepare_clf_data_set(oob_score=True)
oos_score = ml_cross_val_score(sb_clf, self.X_train, self.y_train_clf, cv_gen=cv_gen, sample_weight=None,
scoring=accuracy_score).mean()
sb_clf.fit(self.X_train, self.y_train_clf)
mdi_feat_imp = feature_importance_mean_decrease_impurity(sb_clf, self.X_train.columns)
plot_feature_importance(mdi_feat_imp, oob_score=sb_clf.oob_score_, oos_score=oos_score)
plot_feature_importance(mdi_feat_imp, oob_score=sb_clf.oob_score_, oos_score=oos_score,
savefig=True, output_path='test.png')
os.remove('test.png')
def _prepare_clf_data_set(self, oob_score):
"""
Helper function for preparing data sets for feature importance
:param oob_score: (bool): bool flag for oob_score in classifier
"""
clf_base = RandomForestClassifier(n_estimators=1, criterion='entropy', bootstrap=False,
class_weight='balanced_subsample', random_state=1)
sb_clf = SequentiallyBootstrappedBaggingClassifier(base_estimator=clf_base, max_features=1.0, n_estimators=100,
samples_info_sets=self.samples_info_sets,
price_bars=self.price_bars_trim, oob_score=oob_score,
random_state=1)
sb_clf.fit(self.X_train, self.y_train_clf)
cv_gen = PurgedKFold(n_splits=4, samples_info_sets=self.samples_info_sets)
return sb_clf, cv_gen
|
"""Add subtitles to a DASH OnDemand MPD."""
import os
import re
import sys
from collections import namedtuple
from argparse import ArgumentParser
from backup_handler import make_backup, BackupError
Format = namedtuple('Format', 'name mime_type extension')
FORMATS = {
'ttml': Format('ttml', 'application/ttml+xml', '.ttml'),
'webvtt': Format('webvtt', 'text/wvtt', '.vtt'),
'srt': Format('srt', 'text/srt', '.srt')}
AS_TEMPLATE = '''\
<AdaptationSet contentType="text" mimeType="%(mime_type)s" lang="%(lang)s">
<Role schemeIdUri="urn:mpeg:dash:role:2011" value="subtitle"/>
<Representation id="%(rep_id)s" bandwidth="10000">
<BaseURL>%(filename)s</BaseURL>
</Representation>
</AdaptationSet>
'''
class SubtitleFile():
def __init__(self, filename, lang=None, format=None):
self.filename = filename
if lang is None:
lang = 'und'
self.lang = lang
base, ext = os.path.splitext(filename)
self.base = base
if format is None:
if ext == '':
raise ValueError("No file extension and no format specified")
ext = ext.lower()
for fmt in FORMATS.values():
if fmt.extension == ext:
format = fmt.name
break
else:
raise ValueError('File extension %s not in supported '
'extensions: %s' % (ext, [f.extension for f in FORMATS.values()]))
self.format = FORMATS[format]
@property
def adaptation_set(self):
return AS_TEMPLATE % ({
'mime_type': self.format.mime_type,
'lang': self.lang,
'filename': self.filename,
'rep_id': self.base
})
def add_subtitles(mpd_file, subtitle_files):
"Add subtitles to a DASH manifest file."
with open(mpd_file, 'r') as ifh:
mpd_content = ifh.read()
as_end = r"\</AdaptationSet\>[\r\n]*"
# Open file, parse manifest, find proper place to add Adaptaiton set
sub_xml = ''.join(sub_file.adaptation_set for sub_file in subtitle_files)
as_end_mobj = re.search(as_end, mpd_content)
if as_end_mobj is None:
raise ValueError('Cound not find AdaptationSet in %s' % mpd_file)
end_pos = as_end_mobj.end()
output_mpd = mpd_content[:end_pos] + sub_xml + mpd_content[end_pos:]
try:
make_backup(mpd_file)
except BackupError:
print("Backup-file already exists. Skipping file %s" % mpd_file)
with open(mpd_file, 'w') as ofh:
ofh.write(output_mpd)
def main():
parser = ArgumentParser()
parser.add_argument('mpd')
parser.add_argument('files_and_langs', nargs='+', help='List of file language pairs')
args = parser.parse_args()
if len(args.files_and_langs) % 2 != 0:
print("You must list pairs of files and languages")
parser.usage()
sys.exit(1)
subfiles = []
for i in range(len(args.files_and_langs) // 2):
subfile = args.files_and_langs[2 * i]
lang = args.files_and_langs[2 * i + 1]
subfiles.append(SubtitleFile(subfile, lang))
add_subtitles(args.mpd, subfiles)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.