blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f215b321dbc65408cbfafabd6f87c0b42985c9ea
|
93d68ae55337bee755e630aa842c3ea6f0d1e007
|
/gen_template_c.py
|
bba2735817963fddddb873201776538ba131c82d
|
[] |
no_license
|
CathalHarte/c_cpp_cmake_templates
|
efd78df100a8d4c69ee4d48cb1336f157e9cac04
|
01a2d59130beb41630760417aa145df854318b7d
|
refs/heads/master
| 2021-02-17T15:33:40.181562
| 2020-04-07T15:45:20
| 2020-04-07T15:45:20
| 245,107,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,224
|
py
|
#!/usr/bin/python3
import argparse
parser = argparse.ArgumentParser(description="This creates a template .c file in the folder \
(Title, section headers, etc) it is called in.")
parser.add_argument("filename", help="The name of the file that you want to generate omitting .c")
args = parser.parse_args()
filename = args.filename
lines = []
# lines.append(" \n")
lines.append("/******************************************************************************/\n")
lines.append("/*!\n")
lines.append(" * @file %s.c\n" %filename)
lines.append(" * @brief\n")
lines.append(" * \n")
lines.append(" * @author Cathal Harte <cathal.harte@protonmail.com>\n")
lines.append(" *\n")
lines.append(" */\n")
lines.append("\n")
lines.append("/*******************************************************************************\n")
lines.append("* Includes\n")
lines.append("******************************************************************************/\n")
lines.append("\n")
lines.append("\n")
lines.append("/*******************************************************************************\n")
lines.append("* Definitions and types\n")
lines.append("*******************************************************************************/\n")
lines.append("\n")
lines.append("\n")
lines.append("/*******************************************************************************\n")
lines.append("* Internal function prototypes\n")
lines.append("*******************************************************************************/\n")
lines.append("\n")
lines.append("\n")
lines.append("/*******************************************************************************\n")
lines.append("* Data\n")
lines.append("*******************************************************************************/\n")
lines.append("\n")
lines.append("\n")
lines.append("/*******************************************************************************\n")
lines.append("* Functions\n")
lines.append("*******************************************************************************/\n")
lines.append("\n")
# Create and fill the file.
file = open("%s.c" % filename, "w")
for line in lines:
file.write( line )
file.close()
|
[
"cathal.harte@rapttouch.com"
] |
cathal.harte@rapttouch.com
|
2573aab5a2471963704c00ae85c7efb0ca6e854c
|
5c147ad2bb9b91ed33409e5a5724ceb20e585435
|
/flaskr/__init__.py
|
92bc96423f129b206fd3b2dc492a4e9a7198262a
|
[] |
no_license
|
finisher1017/smarter
|
1fca6d923c63b2f395b1624257cf66829efc9ebd
|
07a23f84da71d488cc52c05b8dab693f4ac8f48a
|
refs/heads/master
| 2020-03-24T22:08:41.079902
| 2018-08-01T17:12:09
| 2018-08-01T17:12:09
| 141,936,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
import os
from flask import Flask, render_template
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'flaskr.psql'),)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.route("/")
def home():
return render_template("homepage.html")
@app.route("/hiking")
def hiking():
return render_template("hiking.html")
@app.route("/skiing")
def skiing():
return render_template("skiing.html")
return app
|
[
"finisher1017@gmail.com"
] |
finisher1017@gmail.com
|
4ebb84f81063fc2f6ba05d750ff8d4b3761caa04
|
2948eda77e7530b4252827313fd9a11acd751da8
|
/startpage/main.py
|
fcfe2bd3f44e3d49ce8a1ea902b29ea52e96b461
|
[] |
no_license
|
mrtopf/adhc-bt-startpage
|
bd00fbcc386f3b7c7a85e3437ae837900a8aa68e
|
a985e85d9cd58918250ebb419c95271a792a93ce
|
refs/heads/master
| 2021-01-10T21:37:36.265035
| 2011-02-21T07:08:15
| 2011-02-21T07:08:15
| 1,391,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,125
|
py
|
from framework import Handler, Application
from framework.decorators import html
from logbook import Logger
from logbook import FileHandler
import uuid
import werkzeug
import datetime
import pkg_resources
from jinja2 import Environment, PackageLoader, TemplateNotFound
import setup
class StaticHandler(Handler):
def get(self, path_info):
return self.settings.staticapp
class CSSResourceHandler(Handler):
def get(self, path_info):
return self.settings['css'].render_wsgi
class JSResourceHandler(Handler):
def get(self, path_info):
return self.settings['js'].render_wsgi
class Page(Handler):
"""show a page"""
@html
def get(self, page=None):
if page is None:
page = "index.html"
try:
tmpl = self.app.pts.get_template(page)
except TemplateNotFound:
raise werkzeug.exceptions.NotFound()
out = tmpl.render(
css = self.settings.css(),
js = self.settings.js(),
)
return out
class App(Application):
logfilename = "/tmp/frontend.log"
def setup_handlers(self, map):
"""setup the mapper"""
map.connect(None, "/css/{path_info:.*}", handler=CSSResourceHandler)
map.connect(None, "/js/{path_info:.*}", handler=JSResourceHandler)
map.connect(None, "/img/{path_info:.*}", handler=StaticHandler)
map.connect(None, "/extensions/{path_info:.*}", handler=StaticHandler)
map.connect(None, "/", handler=Page)
map.connect(None, "/{page}", handler=Page)
self.logger = Logger('app')
self.pts = Environment(loader=PackageLoader("startpage","templates"))
def main():
port = 7652
app = App(setup.setup())
return webserver(app, port)
def frontend_factory(global_config, **local_conf):
settings = setup.setup(**local_conf)
return App(settings)
def webserver(app, port):
import wsgiref.simple_server
wsgiref.simple_server.make_server('', port, app).serve_forever()
if __name__=="__main__":
main()
else:
settings = setup.setup()
app = App(settings)
|
[
"website@ip-10-226-178-16.eu-west-1.compute.internal"
] |
website@ip-10-226-178-16.eu-west-1.compute.internal
|
785c3c15eaf032737edc2ee120b7320ddca17890
|
2f44c31cb1ba4b92f5575b91915a6624fcf8dc50
|
/simple_softmax.py
|
0bb67cd09e22041bba5f7418ac9e7a46c7d86ccc
|
[] |
no_license
|
grissiom/captcha-tensorflow
|
322859bc8bce442cdfe6afbd170a4ad701644510
|
d0f428524cdc2072d7b192da188a64afc016bbfc
|
refs/heads/master
| 2021-09-03T13:51:27.820290
| 2018-01-07T16:30:44
| 2018-01-07T16:30:44
| 116,818,872
| 0
| 0
| null | 2018-01-09T13:22:38
| 2018-01-09T13:22:37
| null |
UTF-8
|
Python
| false
| false
| 2,343
|
py
|
# -*- coding:utf-8 -*-
import argparse
import sys
import tensorflow as tf
import datasets.base as input_data
MAX_STEPS = 10000
BATCH_SIZE = 1000
FLAGS = None
def main(_):
# load data
meta, train_data, test_data = input_data.load_data(FLAGS.data_dir, flatten=True)
print 'data loaded'
print 'train images: %s. test images: %s' % (train_data.images.shape[0], test_data.images.shape[0])
LABEL_SIZE = meta['label_size']
IMAGE_SIZE = meta['width'] * meta['height']
print 'label_size: %s, image_size: %s' % (LABEL_SIZE, IMAGE_SIZE)
# variable in the graph for input data
x = tf.placeholder(tf.float32, [None, IMAGE_SIZE])
y_ = tf.placeholder(tf.float32, [None, LABEL_SIZE])
# define the model
W = tf.Variable(tf.zeros([IMAGE_SIZE, LABEL_SIZE]))
b = tf.Variable(tf.zeros([LABEL_SIZE]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
cross_entropy = tf.reduce_mean(diff)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# forword prop
predict = tf.argmax(y, axis=1)
expect = tf.argmax(y_, axis=1)
# evaluate accuracy
correct_prediction = tf.equal(predict, expect)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
tf.global_variables_initializer().run()
# Train
for i in range(MAX_STEPS):
batch_xs, batch_ys = train_data.next_batch(BATCH_SIZE)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
if i % 100 == 0:
# Test trained model
r = sess.run(accuracy, feed_dict={x: test_data.images, y_: test_data.labels})
print 'step = %s, accuracy = %.2f%%' % (i, r * 100)
# final check after looping
r_test = sess.run(accuracy, feed_dict={x: test_data.images, y_: test_data.labels})
print 'testing accuracy = %.2f%%' % (r_test * 100, )
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='images/char-1-epoch-2000/',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
[
"i@jackon.me"
] |
i@jackon.me
|
4fbdbe5e494379c5bfa4f47fee381e212fefc707
|
e0b6f5bd451aa8af3273fbc948799637681342e1
|
/scripts/old_codes/combine_ips_visual_bysubj.py
|
71bd1a76218fe56ac5bea30f853a5bcbbc877385
|
[] |
no_license
|
davidbestue/encoding
|
6b304f6e7429f94f97bd562c7544d1fdccf7bdc1
|
c27319aa3bb652b3bfc6b7340044c0fda057bc62
|
refs/heads/master
| 2022-05-05T23:41:42.419252
| 2022-04-27T08:34:52
| 2022-04-27T08:34:52
| 144,248,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,336
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 4 11:56:47 2019
@author: David
"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from functions_encoding_loop import *
root = '/mnt/c/Users/David/Desktop/together_mix_2TR/Conditions/'
dfs_visual = {}
dfs_ips = {}
#Parameters
presentation_period= 0.35
presentation_period_cue= 0.50
inter_trial_period= 0.1
pre_cue_period= 0.5
pre_stim_period= 0.5
limit_time=5
ref_angle=45
def circ_dist(a1,a2):
## Returns the minimal distance in angles between to angles
op1=abs(a2-a1)
angs=[a1,a2]
op2=min(angs)+(360-max(angs))
options=[op1,op2]
return min(options)
def circ_dist_0(a1):
##returns positive and negative values to angle 0
if a1>180:
distance = -circ_dist(0, a1)
else:
distance = circ_dist(0, a1)
return distance
def ub_wind_path(PATH, system):
if system=='wind':
A = PATH
B = A.replace('/', os.path.sep)
C= B.replace('\\mnt\\c\\', 'C:\\')
if system=='unix':
C=PATH
###
return C
def decode(RE):
N=len(RE)
R = []
angles = np.arange(0,N)*2*np.pi/N
R=np.dot(RE,np.exp(1j*angles)) / N
angle = np.angle(R)
if angle < 0:
angle +=2*np.pi
return np.degrees(angle)
def decode_0_90(RE):
N=len(RE)
R = []
angles = np.arange(0,N)*(np.pi/2)/N
R=np.dot(RE,np.exp(1j*angles)) / N
angle = np.angle(R)
if angle < 0:
angle +=2*np.pi
return np.degrees(angle)
###
for CONDITION in ['1_0.2', '1_7', '2_0.2', '2_7']:
for SUBJECT_USE_ANALYSIS in ['d001', 'n001', 'r001', 'b001', 'l001', 's001']:
for algorithm in ["visual", "ips"]:
Method_analysis = 'together'
distance='mix'
#CONDITION = '1_0.2' #'1_0.2', '1_7', '2_0.2', '2_7'
## Load Results
Matrix_results_name = root + CONDITION + '/' + SUBJECT_USE_ANALYSIS + '_' + algorithm + '_' + CONDITION + '_' + distance + '_' + Method_analysis + '.xlsx'
Matrix_results_name= ub_wind_path(Matrix_results_name, system='wind')
xls = pd.ExcelFile(Matrix_results_name)
sheets = xls.sheet_names
##
if algorithm == 'visual':
for sh in sheets:
Matrix_results = pd.read_excel(Matrix_results_name, sheet_name=sh)
df_rolled=np.roll(Matrix_results, -2*ref_angle, 0)
df_rolled=pd.DataFrame(df_rolled)
dfs_visual[ SUBJECT_USE_ANALYSIS + '_' + sh] = df_rolled
if algorithm == 'ips':
for sh in sheets:
Matrix_results = pd.read_excel(Matrix_results_name, sheet_name=sh)
df_rolled=np.roll(Matrix_results, -2*ref_angle, 0)
df_rolled=pd.DataFrame(df_rolled)
dfs_ips[ SUBJECT_USE_ANALYSIS + '_' + sh] = df_rolled
#####
#####
panel_v=pd.Panel(dfs_visual)
df_visual=panel_v.mean(axis=0)
df_visual.columns = [float(df_visual.columns[i])*2 for i in range(0, len(df_visual.columns))]
panel_i=pd.Panel(dfs_ips)
df_ips=panel_i.mean(axis=0)
df_ips.columns = [float(df_ips.columns[i])*2 for i in range(0, len(df_ips.columns))]
df_heatmaps = {}
df_heatmaps['ips'] = df_ips
df_heatmaps['visual'] = df_visual
df_heatmaps_by_subj = {}
df_heatmaps_by_subj['ips'] = dfs_ips
df_heatmaps_by_subj['visual'] = dfs_visual
#####
#####
b_reg = []
b_reg_by_subj = []
b_reg360=[]
TIMES = list(np.array([float(Matrix_results.columns.values[i]) for i in range(len(Matrix_results.columns.values))]) * 2 )
for algorithm in ['visual', 'ips']:
# plt.figure()
# TITLE_HEATMAP = algorithm + '_' + CONDITION + '_' +distance + '_' + Method_analysis + ' heatmap'
# plt.title(TITLE_HEATMAP)
# #midpoint = df.values.mean() # (df.values.max() - df.values.min()) / 2
# ax = sns.heatmap(df_heatmaps[algorithm], yticklabels=list(df_heatmaps[algorithm].index), cmap="coolwarm", vmin=-0.1, vmax=0.1) # cmap= viridis "jet", "coolwarm" RdBu_r, gnuplot, YlOrRd, CMRmap , center = midpoint
# #ax.invert_yaxis()
# ax.plot([0.25, shape(df_heatmaps[algorithm])[1]-0.25], [posch1_to_posch2(4),posch1_to_posch2(4)], 'k--')
# plt.yticks([posch1_to_posch2(4), posch1_to_posch2(13), posch1_to_posch2(22), posch1_to_posch2(31)] ,['45','135','225', '315'])
# plt.ylabel('Angle')
# plt.xlabel('time (s)')
# plt.show(block=False)
#### TSplot preferred
## mean
ref_angle=45
Angle_ch = ref_angle * (len(df_heatmaps[algorithm]) / 360)
values= [ decode(df_heatmaps[algorithm].iloc[:, TR]) for TR in range(0, np.shape(df_heatmaps[algorithm])[1])]
#times= list(df_heatmaps[algorithm].columns)
df_together = pd.DataFrame({'Decoding':values, 'timepoint':TIMES})
df_together['ROI'] = [algorithm for i in range(0, len(df_together))]
b_reg.append(df_together)
## by_subj
for Subj in df_heatmaps_by_subj[algorithm].keys():
values= [ decode(df_heatmaps_by_subj[algorithm][Subj].iloc[:, TR]) for TR in range(0, np.shape(df_heatmaps_by_subj[algorithm][Subj])[1])]
#times= list(df_heatmaps[algorithm].columns)
df_together_s = pd.DataFrame({'Decoding':values, 'timepoint':TIMES})
df_together_s['ROI'] = [algorithm for i in range(0, len(df_together_s))]
df_together_s['subj'] = Subj.split('_')[0]
b_reg_by_subj.append(df_together_s)
#####
#####
## for whole area
# Angle_ch = ref_angle * (len(df_heatmaps[algorithm]) / 360)
# df_all360 = df_heatmaps[algorithm]
# df_together = df_all360.melt()
# df_together['ROI'] = [algorithm for i in range(0, len(df_together))]
# df_together['voxel'] = [i+1 for i in range(0, len(df_all360))]*np.shape(df_all360)[1]
# df_together.columns = ['timepoint', 'Decoding', 'ROI', 'voxel']
# df_together['timepoint'] = [float(df_together['timepoint'].iloc[i]) for i in range(0, len(df_together))]
# b_reg360.append(df_together)
### FactorPlot all brain region
#12.35 in 1 and 12 in 2 ( :S :S aghhhhhhh should not affect both in beh and imaging )
### depending on condition
if CONDITION == '1_0.2':
delay1 = 0.2
delay2 = 11.8
cue=0
t_p = cue + presentation_period_cue + pre_stim_period
d_p = t_p + presentation_period +delay1
r_t = d_p + presentation_period + delay2
elif CONDITION == '1_7':
delay1 = 7
delay2 = 5
cue=0
t_p = cue + presentation_period_cue + pre_stim_period
d_p = t_p + presentation_period +delay1
r_t = d_p + presentation_period + delay2
elif CONDITION == '2_0.2':
delay1 = 0.2
delay2 = 12
cue=0
d_p = cue + presentation_period_cue + pre_stim_period
t_p = d_p + presentation_period +delay1
r_t = t_p + presentation_period + delay2
elif CONDITION == '2_7':
delay1 = 7
delay2 = 12
cue=0
d_p = cue + presentation_period_cue + pre_stim_period
t_p = d_p + presentation_period +delay1
r_t = t_p + presentation_period + delay2
## position in axes
plt.figure()
df_all = pd.concat(b_reg)
df_all_by_subj = pd.concat(b_reg_by_subj)
df_all_by_subj['Decoding_error'] = [circ_dist(df_all_by_subj.Decoding.values[i], 0) for i in range(len(df_all_by_subj))]
#df_all_by_subj['Decoding_error'] = [circ_dist_0(df_all_by_subj.Decoding.values[i]) for i in range(len(df_all_by_subj))]
x_bins = len(df_all.timepoint.unique()) -1
max_val_x = df_all.timepoint.max()
start_hrf = 4
sec_hdrf = 2
d_p1 = (start_hrf + d_p) * x_bins/ max_val_x
t_p1 = (start_hrf +t_p)* x_bins/ max_val_x
r_t1= (start_hrf + r_t)* x_bins/ max_val_x
#
d_p2 = d_p1 + sec_hdrf * x_bins/ max_val_x
t_p2 = t_p1 + sec_hdrf * x_bins/ max_val_x
r_t2= r_t1 + sec_hdrf * x_bins/ max_val_x
y_vl_min = df_all_by_subj.Decoding_error.min()
y_vl_max = df_all_by_subj.Decoding_error.max()
range_hrf = [float(5)/x_bins, float(6)/x_bins] #
paper_rc = {'lines.linewidth': 2, 'lines.markersize': 2}
sns.set_context("paper", rc = paper_rc)
sns.pointplot(x='timepoint', y='Decoding_error', hue='ROI', data=df_all_by_subj, size=5, aspect=1.5)
##all subj visual
paper_rc = {'lines.linewidth': 0.25, 'lines.markersize': 0.5}
sns.set_context("paper", rc = paper_rc)
for a in ['visual', 'ips']:
if a=='visual':
c='b'
elif a =='ips':
c='darkorange'
for s in df_all_by_subj.subj.unique():
sns.pointplot(x='timepoint', y='Decoding_error',
data=df_all_by_subj.loc[ (df_all_by_subj['ROI']==a) & (df_all_by_subj['subj']==s) ],
linestyles='--', color=c, legend=False, size=5, aspect=1.5)
#
plt.fill_between( [ t_p1, t_p2 ], [y_vl_min, y_vl_min], [y_vl_max, y_vl_max], color='b', alpha=0.3, label='target' )
plt.fill_between( [ d_p1, d_p2 ], [y_vl_min, y_vl_min], [y_vl_max, y_vl_max], color='g', alpha=0.3, label='distractor' )
plt.fill_between( [ r_t1, r_t2 ], [y_vl_min, y_vl_min], [y_vl_max, y_vl_max], color='y', alpha=0.3, label='response' )
plt.ylabel('Decoding value')
plt.xlabel('time (s)')
TITLE_BR = CONDITION + '_' +distance + '_' + Method_analysis + ' preferred b_r'
plt.legend(frameon=False)
plt.title(TITLE_BR)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.gca().get_xaxis().tick_bottom()
plt.gca().get_yaxis().tick_left()
plt.tight_layout()
plt.show(block=False)
|
[
"davidsanchezbestue@hotmail.com"
] |
davidsanchezbestue@hotmail.com
|
52a470f19ad47696fc3099bbb68edd42a9983fd4
|
899b8f35ec099abdb8965b39e3f63641c38b336f
|
/headless-chrome/example/chrome_driver_demo.py
|
671934ce6d82f28ab023c3d6070a0c01eb1c72d2
|
[
"MIT"
] |
permissive
|
einverne/dockerfile
|
51862e36a5cc26f33e42708cdce36f0b76e552f4
|
54064d3f61ba3c61dc0b7454a86efd2a0e05d230
|
refs/heads/master
| 2023-08-31T20:51:42.479471
| 2023-08-29T05:36:52
| 2023-08-29T05:36:52
| 121,220,501
| 146
| 49
|
MIT
| 2023-09-02T02:38:18
| 2018-02-12T08:32:53
|
Dockerfile
|
UTF-8
|
Python
| false
| false
| 2,497
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def get_jd_price(driver):
driver.get("https://item.jd.com/5218185.html")
# wait up to 10 seconds for the elements to become available
driver.implicitly_wait(10)
price = driver.find_element_by_class_name("p-price")
driver.implicitly_wait(10)
print price.text
driver.implicitly_wait(10)
driver.page_source # 获取源代码
return price.text
def search(driver):
driver.get("http://www.douban.com")
driver.implicitly_wait(10)
search_ele = driver.find_element_by_name('q')
search_ele.send_keys("call me")
search_ele.send_keys(Keys.ENTER)
driver.save_screenshot('screenshot.png')
# driver.get_screenshot_as_file('main-page.png')
if __name__ == '__main__':
options = webdriver.ChromeOptions()
# options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
options.binary_location = '/usr/bin/google-chrome-stable'
chromedriver_path = '/usr/local/bin/chromedriver'
driver = webdriver.Chrome(chrome_options=options, executable_path=chromedriver_path)
get_jd_price(driver)
search(driver)
driver.close()
# # use css selectors to grab the login inputs
# email = driver.find_element_by_css_selector('input[type=email]')
# password = driver.find_element_by_css_selector('input[type=password]')
# login = driver.find_element_by_css_selector('input[value="Log In"]')
#
# driver.find_element_by_xpath('//*[@id="_ctl0__ctl0_LoginLink"]').click()
#
# email.send_keys('evan@intoli.com')
# password.send_keys('hunter2')
# driver.get_screenshot_as_file('main-page.png')
# # login
# login.click()
#
# # navigate to my profile
# driver.get('https://www.facebook.com/profile.php?id=100009447446864')
#
# # take another screenshot
# driver.get_screenshot_as_file('evan-profile.png')
# posts = driver.find_elements_by_css_selector('#stream_pagelet .fbUserContent')
# for post in posts:
# try:
# author = post.find_elements_by_css_selector('a[data-hovercard*=user]')[-1].get_attribute('innerHTML')
# content = post.find_elements_by_css_selector('div.userContent')[-1].get_attribute('innerHTML')
# except IndexError:
# # it's an advertisement
# pass
# print(f'{author}: "{content}"')
|
[
"einverne@gmail.com"
] |
einverne@gmail.com
|
b84b7acadbb7f125057cd865a2952c7aa3a68ee9
|
608cbb8a2cfa7be360fbdc1d3338a0b348226c00
|
/SimulationCode/RunAllSims.py
|
1cb5864b5d4e52a4db2dd4bcf4eb83cb57cf921a
|
[
"MIT"
] |
permissive
|
MacIver-Lab/Ergodic-Information-Harvesting
|
aa89d3900f7b1106d3d1326f8851544632171950
|
6b06033852d511c682f1a38d84d6c3e0d735659b
|
refs/heads/master
| 2021-07-01T14:53:29.893326
| 2020-08-18T04:10:02
| 2020-08-18T04:10:02
| 132,499,485
| 6
| 4
|
MIT
| 2020-08-13T14:45:29
| 2018-05-07T18:18:02
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,490
|
py
|
from ErgodicHarvestingLib.SimulationMainQueue import SimulationMainQueue
from datetime import datetime
import time
import warnings
import sys
from multiprocessing import cpu_count
from os import scandir
import numpy as np
# Suppress all warnings
np.seterr(all="ignore")
warnings.filterwarnings("ignore")
if __name__ == "__main__":
# get number of parallel threads, EAFP way
try:
nThread = int(sys.argv[1])
except BaseException:
nThread = cpu_count() # default
print(f"using {nThread} threads")
timeStampStart0 = datetime.fromtimestamp(time.time())
timeStampStart = datetime.fromtimestamp(time.time())
params = []
# load parameter files
paramPath = "./SimParameters/"
for f in scandir(paramPath):
if f.is_file() and ".json" in f.name:
params.append(paramPath + f.name)
# sort the file list so we have deterministic ordering
params.sort()
nSimJobs = len(params)
print(f"Submitting {nSimJobs} total simulation jobs...")
print("---------------------------------------------------------------")
SimulationMainQueue(params, nThread=nThread)
timeStampEnd = datetime.fromtimestamp(time.time())
timeString = timeStampEnd.strftime("%b-%d-%Y %T")
durationMinutes = (timeStampEnd - timeStampStart0).total_seconds() / 60.0
print(
"All done! EOF at {0}, total time taken for all simulation(s) {1:.2f} minutes".format(
timeString, durationMinutes
)
)
|
[
"chenchen.bme@gmail.com"
] |
chenchen.bme@gmail.com
|
e2034247a869695e430d6d1219d75fd72bf5b6a7
|
d81f09e9d1caac1cd216d2a077ff4669d6b89b59
|
/SerialBoot16/SerialBoot16.py
|
e7f4083932a93cee3ea071c31d04fa386aad0497
|
[] |
no_license
|
luciodj/PIC16-Serial-Bootloader
|
203bb6113cbeb721578843b61cb691205da433a5
|
4b18ba65353227b92e9fef21391a30b5ec62e96c
|
refs/heads/master
| 2020-07-21T03:21:13.098216
| 2015-01-20T20:54:35
| 2015-01-20T20:54:35
| 206,748,947
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,641
|
py
|
#!usr/bin/python
#
# Serial Bootloader for PIC16
#
# Author: Lucio Di Jasio
# url: blog.flyingpic24.com
#
import serial
import serial.tools.list_ports as lp
import time
import sys
import intelhex
from Tkinter import *
from tkFileDialog import askopenfilename
__version__ = 0.1
STX = '[' #0x0f
cmdSYNC = 'S' #1
cmdINFO = 'I' #2
cmdBOOT = 'B' #3
cmdREBOOT = 'R' #4
cmdWRITE = 'W' #11
cmdERASE = 'E' #21
"""
Protocol Description.
USB protocol is a typical master-slave communication protocol, where
master (PC) sends commands and slave (bootloader equipped device) executes
them and acknowledges execution.
* Command format.
<STX[0]><CMD_CODE[0]><ADDRESS[0..3]><COUNT[0..1]> <DATA[0..COUNT-1]>
|-- 1 --|---- 1 -----|------ 4 -----|----- 2 ----|------ COUNT -----|
STX - Command start delimiter (for future upgrades).
Length: 1 byte. Mandatory.
CMD_CODE - Command index (TCmd).
Length: 1 byte. Mandatory.
ADDRESS - Address field. Flash start address for
CMD_CODE command operation.
Length: 4 bytes. Optional (command specific).
COUNT - Count field. Amount of data/blocks for
CMD_CODE command operation.
Length: 2 bytes. Optional (command specific).
DATA - Data array.
Length: COUNT bytes. Optional (command specific).
Some commands do not utilize all of these fields.
See 'Command Table' below for details on specific command's format.
* Command Table.
--------------------------+---------------------------------------------------
| Description | Format |
| Synchronize with PC tool | <STX><cmdSYNC> |
| Send bootloader info | <STX><cmdINFO> |
| Go to bootloader mode | <STX><cmdBOOT> |
| Restart MCU | <STX><cmdREBOOT> |
| Write to MCU flash | <STX><cmdWRITE><START_ADDR><DATA_LEN><DATA_ARRAY> |
| Erase MCU flash. | <STX><cmdERASE><START_ADDR><ERASE_BLOCK_COUNT> |
------------------------------------------------------------------------------
* Acknowledge format.
<STX[0]><CMD_CODE[0]>
|-- 1 --|---- 1 -----|
STX - Response start delimiter (for future upgrades).
Length: 1 byte. Mandatory.
CMD_CODE - Index of command (TCmd) we want to acknowledge.
Length: 1 byte. Mandatory.
See 'Acknowledgement Table' below for details on specific command's
acknowledgement process.
* Acknowledgement Table.
--------------------------+---------------------------------------------------
| Description | Acknowledgement |
|--------------------------+---------------------------------------------------|
| Synchronize with PC tool | upon reception |
| Send bootloader info | no acknowledge, just send info |
| Go to bootloader mode | upon reception |
| Restart MCU | no acknowledge |
| Write to MCU flash | upon each write of internal buffer data to flash |
| Erase MCU flash. | upon execution |
"""
# Supported MCU families/types.
dMcuType = { "PIC16" : 1, 'PIC18':2, 'PIC18FJ':3, 'PIC24':4, 'dsPIC':10, 'PIC32': 20}
#define an INFO record
class info:
McuType = ''
McuId = 0
McuSize = 0
WriteBlock = 0
EraseBlock = 0
BootloaderRevision = 0
DeviceDescription = ''
BootStart = 0
# additional fields
dHex = None
def getMCUtype( list, i):
for key, value in dMcuType.items():
if value == list[i]:
info.McuType = key
print "MCU type is:", info.McuType
return i+1
print "MCU type (%d) not recognized" % list[i]
return i+1
def getMCUid( list, i):
# MCUId appears not to be used anymore, report error
print 'MCUId Info field found!?'
exit(1)
def getMCUSIZE( list, i):
low = int(list[i+0]) + int(list[i+1])*256
high = int(list[i+2]) + int(list[i+3])*256
info.McuSize = high*65536 + low
print "MCU size = %d" % info.McuSize
return i+3
def getERASEB( list, i):
info.EraseBlock = (int(list[i+0])+int( list[i+1])*256)
print "ERASE Block = %d" % info.EraseBlock
return i+1
def getWRITEB( list, i):
info.WriteBlock = ( int(list[i+0])+int(list[i+1])*256)
print "WRITE Block = %d" % info.WriteBlock
return i+1
def getBOOTR( list, i):
info.BootloaderRevision = ( int(list[i+0])+int(list[i+1])*256)
print "Bootloader Revision = %x" % info.BootloaderRevision
return i+1
def getBOOTS( list, i):
low = int(list[i+0]) + int(list[i+1])*256
high = int(list[i+2]) + int(list[i+3])*256
info.BootStart = (high*65536 + low)
print "BOOT Start = 0x%x" % info.BootStart
return i+3
def getDEVDSC( list, i):
info.DeviceDescription = "".join(map( lambda x: chr(x), list[i : i+20]))
#print "Device Description: %s" % info.DeviceDescription
return i+20
# Bootloader info field ID's enum
dBIF = {
# 0: ("ALIGN", skip_align),
1: ('MCUTYPE', getMCUtype), # MCU type/family (byte)
2: ('MCUID', getMCUid ), # MCU ID number ()
3: ('ERASEBLOCK', getERASEB), # MCU flash erase block size (int)
4: ('WRITEBLOCK', getWRITEB), # MCU flash write block size (int)
5: ('BOOTREV', getBOOTR), # Bootloader revision (int)
6: ('BOOTSTART', getBOOTS), # Bootloader start address (long)
7: ('DEVDSC', getDEVDSC), # Device descriptor (string[20])
8: ('MCUSIZE', getMCUSIZE) # MCU flash size (long)
}
def DecodeINFO( size, list):
index = 0
while index<size:
print "index:",index
try:
f = dBIF[list[index]] # find in the dictionary of valid fields
except:
print "Field %d at location %d not recognized!" % (list[index], index)
return
index = f[1](list, index+1) # call decoding function
index += 1
#----------------------------------------------------------------------
def Connect():
global h
portgen = lp.grep( 'tty.usb')
for port,_,_ in portgen: break # catch the first one
print 'port=',port
if port:
h = serial.Serial( port, baudrate=19200)
print h
h.flushInput()
else: raise ConnectionFailed
def ConnectLoop():
print "Connecting..."
while True:
try:
Connect()
except:
print "Reset board and keep checking ..."
time.sleep(1)
else:
break;
# succeeded, obtained a handle
print "Connected!"
def Boot():
print "Send the BOOT command ..",
h.write( bytearray([ STX, cmdBOOT]))
r = h.read(2)
if r[1] == cmdBOOT:
print "Ready!"
def Sync():
print "Send the Sync command",
h.timeout=0.5 # temporarily set a max time for sync response
r =[]
while len(r)<2:
h.write( bytearray([ STX, cmdSYNC]))
r = h.read(2)
if len(r)<2: # timeout detected
print "timeout!"
h.flushInput() # flush all the remaining garbage in the input buffer
h.timeout = None # wait forever
if r[1] == cmdSYNC:
print "Ready!"
def Info():
print "Send the INFO command",
h.write( bytearray([ STX, cmdINFO]))
size = ord(h.read()) # get the info block length
print "Size", size
ilist = bytearray(h.read( size))
#print ilist
DecodeINFO( size, ilist)
def Erase( waddr):
#print "Erase: 0x%x " % waddr
cmd = bytearray([ STX, cmdERASE])
cmd = extend32bit( cmd, waddr) # starting address
cmd = extend16bit( cmd, 1) # no of words
h.write( cmd)
r = h.read(2) # check reply
if r[1] != cmdERASE: raise ERASE_ERROR
def WriteRow( waddr):
# print "Write: 0x%x " % waddr
iaddr = waddr*2 # get the byte address
count = info.WriteBlock # number of words
cmd = bytearray([ STX, cmdWRITE])
cmd = extend32bit( cmd, waddr)
cmd = extend16bit( cmd, count)
d = info.dHex
# pick count words out of the hex array
for x in xrange( iaddr, iaddr+count*2, 2):
cmd.extend( [ d[x], d[x+1]])
# print "cmd: ",cmd
h.write(cmd) # send the command
r = h.read(2)
if r[1] != cmdWRITE: raise WRITE_ERROR
def ReBoot():
# global h
print "Rebooting the MCU!"
h.write(bytearray( [ STX, cmdREBOOT]))
Close()
def Close():
# global h
if h:
h.close()
def Load( name):
# init and empty code dictionary
info.dHex = None
try:
info.dHex = intelhex.IntelHex( name)
return True
except:
return False
def extend16bit( lista, word):
lista.extend([ word%256, word/256])
return lista
def extend32bit( lista, long):
lista = extend16bit( lista, long%65536)
lista = extend16bit( lista, long/65536)
return lista
# write test
# def WriteTest():
# print "Test erasing the first block at 0x20, 32 words"
# waddr = info.EraseBlock
# # print "waddr", waddr
# Erase( waddr)
# print "Test writing a first block 0x20, 32 words"
# waddr = info.EraseBlock
# iaddr = waddr*2;
# d = info.dHex
# for x in xrange( iaddr, iaddr*2): d[x]=x
# WriteRow( waddr)
def EmptyRow( waddr):
iaddr = waddr*2
for x in xrange( info.WriteBlock*2):
if info.dHex[ iaddr+x] != 0xff: return False
return True
def Execute():
# 1. fix the App reset vector
d = info.dHex
a = (info.BootStart*2)-4 # copy it to appReset = BootStart -4
for x in xrange(4): # copy
d[a+x] = d[x]
# 2. fix the reset vector to point to BootStart
v = extend32bit( [], info.BootStart)
# high movlp low goto
d[0]=0x80+(v[1]); d[1]=0x31; d[2]=v[0]; d[3]=0x28+( v[1] & 0x7)
# print "Reset Vector ->", v[1], v[0]
# d[0] = 0x8E; d[1]=0x31; d[2]=0x00; d[3]=0x2E
print d[0], d[1], d[2], d[3]
# 3. erase blocks 1..last
eblk = info.EraseBlock # compute erase block size in word
last = info.BootStart / eblk # compute number of erase blocks excluding Bootloader
print "Erasing ..."
for x in xrange( 1, last):
#print "Erase( %d, %d)" % ( x * eblk, 1)
Erase( x * eblk) # erase one at a time
# 4. program blocks 1..last (if not FF)
wwblk = info.WriteBlock # compute the write block size
last = info.BootStart / wwblk # compute number of write blocks excluding Bootloader
print "writeBlock= %d, last block = %d" % ( wwblk, last)
for x in xrange( eblk/wwblk, last): # write all rows starting from second erase block
if not EmptyRow( x * wwblk): # skip empty rows
# print "WriteRow( %X)" % (x * wwblk)
WriteRow( x*wwblk) # write to device
pass
# 5. erase block 0
Erase( 0)
# print "Erase( 0)"
# 6. program all rows of block 0
for x in xrange( eblk/wwblk):
WriteRow( x * wwblk)
# print "WriteRow( %X)" % (x * wwblk)
###################################################################
# main window definition
#
class MainWindow():
def __init__( self):
global root
bgc = 'light gray'
bgd = 'ghost white'
root = Tk()
root.title( "PIC16 Serial Bootloader")
#root.configure( bg=bgc)
root.focus_set()
root.geometry( '+400+100')
root.protocol( 'WM_DELETE_WINDOW', root.quit) # intercept red button
root.bind( sequence='<Command-q>', func= lambda e: e.widget.quit)
root.grid_columnconfigure( 1, minsize=200)
rowc = 0
#------- top icon
rowc += 1
self.img = PhotoImage(file='mikroBootloader.png')
Label( root, image=self.img).grid( padx=10, pady=5, columnspan=2, row=rowc, sticky=W)
#---------- grid
rowc += 1
self.MCUType = StringVar()
self.MCUType.set( 'None')
Label( root, text="MCU Type:", width=10, bg=bgc).grid( padx=10, pady=5, row=rowc, sticky=W)
Label( root, textvariable=self.MCUType, width=30, bg=bgd).grid( padx=10, pady=5, row=rowc, column=1, sticky=W)
Button( root, text='1:Connect', width=15, bg=bgc, command=self.cmdInit).grid(
padx=10, pady=5, row = rowc, column=2, sticky=N+W)
rowc += 1
self.Device = StringVar()
self.Device.set( 'None')
Label( root, text="Device:", width=10, bg=bgc).grid( padx=10, pady=5, row=rowc, sticky=W)
Label( root, textvariable=self.Device, width=30, bg=bgd).grid( padx=10, pady=5, row=rowc, column=1, sticky=W)
Button( root, text='2: Browse for HEX', width=15, command=self.cmdLoad).grid(
padx=10, pady=5, row=rowc, column=2)
rowc += 1
self.fileHex = StringVar()
Label( root, text="Hex:", width=10, bg=bgc).grid( padx=10, pady=5, row=rowc, sticky=W)
Label( root, textvariable=self.fileHex, width=30, bg=bgd).grid( padx=10, pady=5, row=rowc, column=1, sticky=W)
Button( root, text='3: Begin Uploading', width=15, command=self.cmdProgram).grid(
padx=10, pady=5, row=rowc, column=2)
#------- bottom row
#------- status bar --------------------------------------
rowc += 1
self.Status = StringVar()
self.Status.set( 'Uninitialized')
Label( root, text="Status:", width=10, bg=bgc).grid( padx=10, pady=10, row=rowc, sticky=W)
Label( root, textvariable=self.Status, width=30, bg=bgd).grid( padx=10, pady=10, row=rowc, column=1, columnspan=2, sticky=W)
Button( root, text='Quit', width=15, command=root.quit).grid( padx=10, pady=10, row=rowc, column=2, sticky=E+S)
# check if the file name is loadable
global dHex
name = ''
if len(sys.argv) > 1:
name = sys.argv[1]
if not Load( name):
self.Status.set( "File: %s not found!")
self.fileHex.set( name)
#------------------ main commands
def cmdInit( self):
# check if serial port available
try:
Connect()
except:
self.Status.set( "Serial Bootloader Not Found, connection failed")
else:
self.Status.set( "Serial Bootloader connected!")
Sync() # check the sync
Info() # get the device infos
Boot() # lock into boot mode
self.Device.set( info.DeviceDescription)
self.MCUType.set( info.McuType)
def cmdLoad( self):
name = askopenfilename()
if Load(name):
self.Status.set( "Hex file loaded")
self.fileHex.set( name)
else:
self.Status.set( "Invalid file name")
self.fileHex.set( '')
def cmdProgram( self):
# Execute()
# try:
# WriteTest()
Execute()
# except:
# programming error
# self.Status.set( "Programming failed")
# else:
self.Status.set( "Programming successful")
ReBoot()
#root.destroy()
#----------------------------------------------------------------------------
if __name__ == '__main__':
#discriminate if process is called with the check option
if len(sys.argv) > 1:
if sys.argv[1] == '-gui':
sys.argv.pop(1) # remove the option
MainWindow()
mainloop()
exit(0)
# command line mode
# if a file name is passed
if len(sys.argv) == 1:
print "Usage: %s (-gui) file.hex"
exit(1)
else:
name = sys.argv[1]
# load the hex file provided
if not Load(name):
print "File %s not found" % name
exit(1)
# loops until gets a connection
ConnectLoop()
# run the erase/program sequence
Execute()
#
ReBoot()
|
[
"pilot@flyingpic24.com"
] |
pilot@flyingpic24.com
|
54d58dff53e43a7372192dc56d2ea6c3f9d782e0
|
e93ddb893fccfb94a4240e4c36e547a10c33f5a5
|
/data/process_data.py
|
896fa0ae57010eebc9f28f3ea9eeaee9218959f0
|
[] |
no_license
|
imadarsh1001/Disaster_Response_Pipeline_NLP
|
8e3a8b58c0f5b5b8c9e41388b354ba8ae4244a7c
|
6f089812ee18ef1f4ba0858bbfb7ab07129fb452
|
refs/heads/master
| 2022-09-13T14:39:32.664825
| 2020-05-26T22:38:26
| 2020-05-26T22:38:26
| 264,499,135
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,402
|
py
|
import sys
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
- Loads the data from csv files into dataframe
- Merge the 2 dataframe
Args:
messages_filepath (str): File path of message
categories_filepath (str): File pathe of category
Returns:
pandas dataframe: Merged data that is loaded
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(messages, categories, on="id")
return df
def clean_data(df):
"""
- Cleans the data
Args:
df (pandas dataframe): Merged dataframe
Returns:
pandas dataframe: Cleaned and structured dataframe
"""
# create a dataframe of the 36 individual category columns
categories = df.categories.str.split(";", expand=True)
# select the first row of the categories dataframe
row = categories.iloc[0,:]
# use this row to extract a list of new column names for categories.
# one way is to apply a lambda function that takes everything
# up to the second to last character of each string with slicing
category_colnames = row.apply(lambda name: name[:-2]).tolist()
# rename the columns of `categories`
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].str.split("-").str[-1]
# convert column from string to numeric
categories[column] = categories[column].astype(int)
# drop the original categories column from `df`
df = df.drop(["categories"], axis=1)
# concatenates the original dataframe with the new `categories` dataframe
df = pd.concat([df, categories], axis=1)
# drop the duplicates
df = df.drop_duplicates()
return df
def save_data(df, database_filename):
"""
- Save clean data into sqlite database
Args:
df (pandas dataframe): Cleaned data
database_filename (str): Database name to save
"""
engine = create_engine('sqlite:///' + database_filename)
# import pdb; pdb.set_trace()
df.to_sql('Disaster_Response', engine, index=False)
engine.dispose()
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
|
[
"imadarsh1001@gmail.com"
] |
imadarsh1001@gmail.com
|
3a93bf306e8f160d8e5f9c0ed472b015bb94941e
|
ee401da90cfa0e100da89d493712d1371ca0bbd9
|
/prpl/apis/hl/spec/builder/graph/graph.py
|
cc6e401942a48b79ffa4987493ac85811ffd7ed4
|
[] |
no_license
|
prplfoundation/prpl-ssi-api-parser
|
c82e72a802de581e6df856c96ecf3b36a1d3e494
|
f81c189eb7a61a0260e7110b264c2e1823d39258
|
refs/heads/master
| 2020-03-26T22:07:02.424175
| 2019-02-28T18:36:22
| 2019-02-28T18:36:22
| 145,432,762
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,442
|
py
|
class Graph:
"""Graph groups multiple nodes into a Graphviz cluster/graph."""
def __init__(self):
"""Initializes the Graph object."""
self.nodes = {}
def append(self, node):
"""Adds the provided node/tree to the cluster.
Args:
node (Node): Node/tree to be added to the cluster as root node.
"""
self.nodes[node.name] = node
def dot(self, indent_interval=2):
"""Converts the graph to a Graphviz graph.
Args:
indent_interval (int): Number of spaces used for identation.
Returns:
str: Graphviz graph.
"""
indentation = ' ' * indent_interval
nodes = map(lambda x: '{}\n'.format(x.dot(indent_interval)), self.nodes.values())
nodes = ''.join(nodes)
dot = 'graph G {\n'
dot += indentation + 'graph [font="Calibri Light" fontsize=11 style=dashed penwidth=0.5]\n'
dot += indentation + 'node [shape=box font="Calibri Light" fontsize=11 style=dashed penwidth=0.5]\n'
dot += nodes
dot += '}'
return dot
def __str__(self):
"""Formats the node cluster as a string.
Returns:
str: Human-friendly textual representation of the sub-tree.
"""
nodes = list(map(lambda x: str(x), self.nodes.values()))
return ''.join(nodes)[:-1]
|
[
"jrmfreitas@live.com"
] |
jrmfreitas@live.com
|
ccfe5211621db29314115cb391d4fdeebba44405
|
060722dfbdd53724a2e6444341225c4937ef1fe9
|
/classes/classes.py
|
674cb4527fb1309c374fb9441aab386377272355
|
[] |
no_license
|
math77/Python3OO-Basico
|
aded5407489aab2284338b75abf52212bb919275
|
331988f15852ebc724cc3f826d4cd1f92a805fcb
|
refs/heads/master
| 2020-03-22T01:09:46.548659
| 2018-07-01T23:54:42
| 2018-07-01T23:54:42
| 139,286,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
class Dog:
#Class Attribute
specie = 'mammal'
def __init__(self, name, age):
self.name = name
self.age = age
#metodo de instancia
def description(self):
return "{} is {} years old".format(self.name, self.age)
#metodo de instancia
def speak(self, sound):
return "{} says {}".format(self.name, sound)
class RussellTerrier(Dog):
def run(self, speed):
return "{} runs {}".format(self.name, speed)
class Bulldog(Dog):
def run (self, speed):
return "{} runs {}".format(self.name, speed)
|
[
"matheusemanuel745@gmail.com"
] |
matheusemanuel745@gmail.com
|
047d16ad58473a4eeec83febc0bfece993897984
|
801cf912f3c13ac73af9178b482ba2408ac5f102
|
/questions/migrations/0004_auto_20200227_1859.py
|
b0cb9bc47b7f6556a0e67c31a6372b96db86c9c2
|
[] |
no_license
|
didemertens/sei-project-4
|
62de1c24ad0f5433a95b51e44a6e1430bf39454b
|
dc6b42565698fb1d2b3d411cec6d6c95612a9b20
|
refs/heads/master
| 2023-01-12T13:44:43.069791
| 2020-03-27T10:31:33
| 2020-03-27T10:31:33
| 243,276,770
| 0
| 0
| null | 2023-01-05T08:28:49
| 2020-02-26T14:03:03
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
# Generated by Django 2.2.9 on 2020-02-27 18:59
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('questions', '0003_auto_20200227_1019'),
]
operations = [
migrations.AddField(
model_name='answer',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='answer',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='question',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='question',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
|
[
"d.g.j.mertens@gmail.com"
] |
d.g.j.mertens@gmail.com
|
f517f22ae79b5ae1cd103c71e08419386bd8eeae
|
9da129602c7da275c2e7c839c29c044e71d6c9b1
|
/PYTHON CODE FOR COMPUTER SIDE/NFC/venv/Lib/site-packages/llsmartcard/helper.py
|
9d973b8fee47640b86c7829dd9c4fb2d36468e71
|
[] |
no_license
|
SSTodorov/Passdroid
|
370b428ab82abd1c0bdcdcd7fa97dc67c4e848eb
|
3afa9578ee27a47b69bb2129ad209001f4952a5b
|
refs/heads/main
| 2023-08-05T19:51:00.015103
| 2021-09-17T11:33:46
| 2021-09-17T11:33:46
| 407,514,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 596
|
py
|
def write_binary(data, filename):
"""
Write binary data to a file on disk
"""
import struct
# Create file and write to it
f = open(filename, "wb+")
f.write(struct.pack("%dB" % len(data), *data))
f.close()
def read_binary(filename):
"""
Write binary data to a file on disk
"""
import struct
data = []
# Create file and write to it
f = open(filename, "rb")
byte = f.read(1)
while byte != b"":
data.append(ord(byte))
# Do stuff with byte.
byte = f.read(1)
f.close()
return data
|
[
"62951147+SSTodorov@users.noreply.github.com"
] |
62951147+SSTodorov@users.noreply.github.com
|
2ec24b71ad14bc96972d682f916bd44ff665186f
|
fca8cc40e736521a3cb31c831379264da76a2bf3
|
/reference/reference/views.py
|
d3ab641339a0503282d0b6fb983de7fecab1c4c8
|
[] |
no_license
|
vnikesh/Essens-Website
|
4e3058f345d2e3e0c7a26dd09e8e0fb0e6ccb59f
|
34d1f7932d9a96e4ac7dfc279e92d6fb2273e3d5
|
refs/heads/master
| 2020-03-07T08:22:08.688873
| 2018-03-30T03:20:16
| 2018-03-30T03:20:16
| 127,375,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
from django.shortcuts import render
# Create your views here.
#from django.utils import timezone
#from .models import *
from django.shortcuts import render, get_object_or_404
#from django.shortcuts import redirect
#from django.contrib.auth.decorators import login_required
#from .forms import *
#from django.db.models import Sum
def home(request):
return render(request, 'essens/home.html',
{'essens': home})
def reference(request):
return render(request, 'essens/reference.html',
{'essens': reference})
def core(request):
return render(request, 'essens/core.html',
{'essens': core})
# Create your views here.
|
[
"nikeev1991@gmail.com"
] |
nikeev1991@gmail.com
|
db7f00f51f2445ceb5545c73e114f641524ecfc4
|
54750412da7d9d16285c9a45a4fb9de6890b1d5b
|
/activity_manager/views/register.py
|
336835eded49f2f18ee03bf46e61fdbd44889384
|
[] |
no_license
|
MasterBlaster479/we_deploy
|
1bc36bb14fc8a01fc8bf0eba1a434dcaa8c319bb
|
038e369034b603766c692f3de936a93c3ee574cd
|
refs/heads/master
| 2021-01-25T01:22:20.254456
| 2017-06-19T09:13:05
| 2017-06-19T09:13:05
| 94,755,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
from flask_restful import Api
from ActivityResource import *
from UserResource import *
def register_resources(app, api_prefix):
api = Api(app)
# Activity Resource register
api.add_resource(ActivityResource, api_prefix + ActivityResource.route_base)
api.add_resource(ActivityResourceList, api_prefix + ActivityResourceList.route_base)
api.add_resource(ActivityResourceMethod, api_prefix + ActivityResourceMethod.route_base)
# User Resource register
api.add_resource(UserResource, api_prefix + UserResource.route_base)
api.add_resource(UserResourceList, api_prefix + UserResourceList.route_base)
api.add_resource(UserLogin, api_prefix + UserLogin.route_base)
|
[
"goran.bogic@infokom.hr"
] |
goran.bogic@infokom.hr
|
afa655ac8cf52139ac0b35fdacc2cb0bfdc7e870
|
88fea51d5d9e9e2e30d81f099fa6093e6d60e6f7
|
/test.py
|
d435266d2e6f0d421f8f35ecb5bcc002217e2dbf
|
[] |
no_license
|
dnfcallan/VirtualConductor
|
fb80caea70832652e16df02ac246f0bd46aa5e4f
|
30930a51747599a601e24bb764ae0f984a538390
|
refs/heads/main
| 2023-04-20T03:35:18.436315
| 2021-04-26T12:01:11
| 2021-04-26T12:01:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,888
|
py
|
import datetime
import torch
from moviepy.editor import *
from dataset import *
from utils_pose import filter, show_pose
def test(G_high_dir, G_low_dir, testset, video_save_dir=None, vis=True):
dataset = TestDataset(test_samles_dir=testset)
testloader = DataLoader(dataset=dataset, batch_size=1)
mode = []
if video_save_dir is not None:
os.mkdir(video_save_dir)
if G_high_dir is not None:
G_high = torch.load(G_high_dir).cuda()
G_high.eval()
print('high-pass model loaded from', G_high_dir)
mode.append('high')
if G_low_dir is not None:
G_low = torch.load(G_low_dir).cuda()
G_low.eval()
print('low-pass model loaded from', G_low_dir)
mode.append('low')
keypoints_mean = np.load('keypoints_mean.npy', allow_pickle=True)
print('testing mode:', mode)
for step, (music_feature, name) in enumerate(testloader):
name = name[0]
print('\n------ evaluating {}/{} ------'.format(step, len(dataset)))
print('test sample:', name)
music_feature = music_feature.transpose(1, 2)
var_x = music_feature.float().cuda()
noise = torch.randn([1, var_x.size()[1], 18]).cuda()
y_high, hx = G_high(var_x, noise)
y_low, hx = G_low(var_x, noise)
y_high = y_high[0].detach().cpu().numpy()
y_low = y_low[0].detach().cpu().numpy()
y_high_norm = filter(y_high - np.mean(y_high, axis=0), mode='high pass')
y_low_norm = filter(y_low - np.mean(y_low, axis=0), mode='low pass')
y_high_norm *= 0.2 * 0.8
y_low_norm *= 0.8
y = [y_high_norm + keypoints_mean, y_high_norm + y_low_norm + keypoints_mean, y_low_norm + keypoints_mean]
# pad17pose = padding_results(predicted_pose)
# np.save(video_save_dir+name,y)
# np.save(video_save_dir+'【17】'+name,pad17pose)
show_pose(y, name, video_save_dir = video_save_dir, vis = vis)
if video_save_dir is not None:
video = VideoFileClip(video_save_dir + name + '.avi')
video = video.set_audio((AudioFileClip(testset + name)))
video.write_videofile(video_save_dir + name + '.mp4')
os.remove(video_save_dir + name + '.avi')
print('test finished')
if __name__ == '__main__':
G_high_dir = r'checkpoints/high_G_stage2_1adv10per_c_globalstep138000.pt'
G_low_dir = r'checkpoints/low_G_stage2_1adv1per_globalstep42000.pt'
test_description = 'MM_models_test'
time_stamp = datetime.datetime.now().strftime('_%m_%d__%H_%M_%S')
video_save_dir = 'test\\results\\' + test_description + time_stamp + '/'
test(G_high_dir=G_high_dir,
G_low_dir=G_low_dir,
testset='test\\testset\\',
video_save_dir=video_save_dir,
vis=True)
|
[
"noreply@github.com"
] |
dnfcallan.noreply@github.com
|
45d147e228b07cd440f607f9150aae35457d9bc5
|
2fe43bb14f9c54ac0d043078b023c48a061c0e70
|
/api/settings.py
|
6d5016eb2fe941f4f56e3b9b2327e4f43fdf7310
|
[] |
no_license
|
martin-martin/django-music-API
|
5bcad92ccd4855e1a173ebef6757c6c8a0a0f0f6
|
6947a1ac558b2deeee795aef8a26c49a3f5a15e9
|
refs/heads/master
| 2020-03-30T09:12:04.021342
| 2018-10-01T15:02:22
| 2018-10-01T15:02:22
| 151,065,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,114
|
py
|
"""
Django settings for api project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'frj&r!n0=2h0gi#w5(&kdtuhrx_qh2ykb%kvl7z%#iz(8tyqfm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'music',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"breuss.martin@gmail.com"
] |
breuss.martin@gmail.com
|
02fcc954935e2275f6b7dd5f75950f1634c61ccb
|
8deae87877b6a814e7deca685b75e5d25ce6af93
|
/simple/simple/wsgi.py
|
40361f37ece8b58fa1bf5c7129437637d70a6330
|
[
"MIT"
] |
permissive
|
e4c5/django-react
|
a885ef78d22f820017a4c61091088b958be4d098
|
88a04915871f5d6544368fc1a91bffc9e02506cc
|
refs/heads/master
| 2023-04-14T06:24:23.424084
| 2023-04-04T12:46:27
| 2023-04-04T12:46:27
| 187,174,419
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
WSGI config for simple project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'simple.settings')
application = get_wsgi_application()
|
[
"raditha.dissanayake@gmail.com"
] |
raditha.dissanayake@gmail.com
|
3c8837220b27445ba7329947de011eeec3235eb2
|
e4abb5d67c5ba3edcd70592d55ad58b01a4dba1c
|
/fraud_detections/userr/migrations/0001_initial.py
|
f65740eaec82c1b3c483a92f3896e5337d5da76c
|
[] |
no_license
|
vismaya1999/fraud-detection1
|
6cd886a2833637e9b602f155665063828f5eeb97
|
eb65af882a081e080918e8da6e9c6d81981d386f
|
refs/heads/main
| 2023-08-04T06:42:21.325285
| 2021-09-15T18:03:30
| 2021-09-15T18:03:30
| 406,871,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,268
|
py
|
# Generated by Django 2.2 on 2021-05-06 07:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='csvfile',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('v1', models.FloatField()),
('v2', models.FloatField()),
('v3', models.FloatField()),
('v4', models.FloatField()),
('v5', models.FloatField()),
('v6', models.FloatField()),
('v7', models.FloatField()),
('v8', models.FloatField()),
('v9', models.FloatField()),
('v10', models.FloatField()),
('v11', models.FloatField()),
('v12', models.FloatField()),
('v13', models.FloatField()),
('v14', models.FloatField()),
('v15', models.FloatField()),
('v16', models.FloatField()),
('v17', models.FloatField()),
('v18', models.FloatField()),
('v19', models.FloatField()),
('v20', models.FloatField()),
('v21', models.FloatField()),
('v22', models.FloatField()),
('v23', models.FloatField()),
('v24', models.FloatField()),
('v25', models.FloatField()),
('v26', models.FloatField()),
('v27', models.FloatField()),
('v28', models.FloatField()),
],
),
migrations.CreateModel(
name='us_data',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('password', models.CharField(max_length=200)),
('age', models.IntegerField()),
('gentder', models.CharField(max_length=100)),
('dob', models.CharField(max_length=250)),
('state', models.CharField(max_length=250)),
('country', models.CharField(max_length=250)),
],
),
]
|
[
"90727490+vismaya1999@users.noreply.github.com"
] |
90727490+vismaya1999@users.noreply.github.com
|
cf7302c368d59a41fe3b12a16f5001efce0f9cc7
|
d93d0c40b1099961806093d37933530185d6303c
|
/jobs/migrations/0006_auto_20171015_0136.py
|
ff58f6a0a8d6dcc596a639dbf12ad86003af8ca9
|
[] |
no_license
|
mindBenders1/infyjobs-backend
|
52881d38684d0fe2892dde38ff5d8ba223fd226e
|
de52f48358ea260f99799e20bde50f9799e4290f
|
refs/heads/master
| 2021-07-13T08:23:17.451274
| 2017-10-15T06:41:45
| 2017-10-15T06:41:45
| 106,837,666
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-15 01:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0005_auto_20171015_0136'),
]
operations = [
migrations.AlterField(
model_name='createjob',
name='marks_required',
field=models.IntegerField(null=True),
),
]
|
[
"pal.vishal41@gmail.com"
] |
pal.vishal41@gmail.com
|
1d7d27f729c4dd8cd00c766f664b766d6bb795e4
|
473ff17a5b9d6526a653a27a9aaa3f01f05f3d27
|
/workshopsit/manage.py
|
30530222835804f2e8b27052d957b96ef9de474c
|
[] |
no_license
|
anagha-praveen/Django-workshop
|
a6c944de10c5151c0f0cc4b736175cf597de9723
|
acc1719123499a942fa62bf621217a10f12282b8
|
refs/heads/master
| 2020-03-19T15:56:41.575773
| 2018-06-09T11:04:57
| 2018-06-09T11:04:57
| 136,692,761
| 0
| 1
| null | 2019-10-26T06:01:11
| 2018-06-09T05:11:28
|
Python
|
UTF-8
|
Python
| false
| false
| 543
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "workshopsit.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"root@13cpu0168L.amritavidya.edu"
] |
root@13cpu0168L.amritavidya.edu
|
10960bc50ae00a433f9e2abc2c7241c9ec902041
|
a5cf6698654cb9d2af54d81c2195844c98d8acf3
|
/cd/website/settings.py
|
5341535e181b48083e144076acf7a3da3c4034ae
|
[] |
no_license
|
captaintl/lll
|
e721cd8213cdcb7a7676e77aef2429c0d535585d
|
6a30503365caf11c64d4c4ffdeb42fa0f98e14c0
|
refs/heads/master
| 2021-01-10T12:58:57.470321
| 2015-11-15T00:58:41
| 2015-11-15T00:58:41
| 46,198,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,986
|
py
|
"""
Django settings for website project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y$lx+!l8(a__+nm=$682hilv1p&iyxuwj6d#=3ss%pujqw*^z#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
TEMPLATE_DIRS = (
'./addr_book/',
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'addr_book',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'website.urls'
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
#lala
|
[
"314802708@qq.com"
] |
314802708@qq.com
|
6194798040b0c19cfcc3216d87ba8f8fbab67590
|
4a8dd5bc769f7e7733558d036533520d15c40974
|
/gs_decentralized/Specialized/Maya/gs_assets/gs_scripts/Common/gs_NonDefHistCheck_class.py
|
989de8c289eeaf1756f26c896fddaeb49d562810
|
[] |
no_license
|
sadams115/Grindstone
|
6794cd16940a5c361b93091124558e497bb3aac9
|
cc5809f35bccd15a3008cd0ca5a933375f54902d
|
refs/heads/master
| 2021-01-21T12:12:10.779455
| 2018-05-04T19:19:11
| 2018-05-04T19:19:11
| 102,050,711
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,099
|
py
|
# Grindstone gs_NonDefHistCheck_class.py
# Authors: Sam Carnes and Sean Adams
# This file scans every DAG object in the scene and checks for any non-deformer history.
# The script returns a true or false depending on the results
import pymel.core as pm
import maya.cmds as cmds
class NonDefHistCheck:
#********** INIT **********#
def __init__(self):
# identify whether or not the script has an auto-fix function
self.hasFix = True
# identify what this check is called
self.scriptName = "Non-deformer history"
# provides a label for the button that executes the auto-fix
# NO MORE THAN 20 CHARACTERS
self.fixLabel = "Delete history"
#********** DO CHECK **********#
def doCheck(self):
# Select all DAG objects in the scene and set them to an array
sceneSel = pm.ls(dagObjects = True)
# Set up elements for iterating through the selection
objInd = 0
nonDefTag = ''
for someObj in sceneSel:
nonDefChk = [n for n in sceneSel[objInd].history(il=1,pdo=True) if not isinstance(n, pm.nodetypes.GeometryFilter)] #and not cmds.referenceQuery(n, isNodeReferenced=True)
objInd += 1
if nonDefChk:
nonDefTag = 'Non-Deformer history detected.'
break
return nonDefTag
#********** RUN FIX **********#
# deletes non-deformer history
def runFix(self):
try:
# delete non-deformer history
cmds.bakePartialHistory(allShapes=True, prePostDeformers=True)
return "Non-deformer history deleted."
except:
return "There was a problem deleteing non-deformer history."
#********** RETURN INSTANCE OF SCRIPT CLASS **********#
def getObject():
return NonDefHistCheck()
|
[
"noreply@github.com"
] |
sadams115.noreply@github.com
|
6a5589892bc9da0e9e5a16a04da8e397c39bfd69
|
7d776e7d8211a118f5b666fdfd4b6ac05fedc23f
|
/dqn-LunarLander/ll-v2.py
|
1c46b70a1a64d29712fa3cc18e772572e22a3ca4
|
[] |
no_license
|
srikanth-kilaru/rl-projects
|
12f16bad24c7081eb12055ca7d6bdfc77f31c5a2
|
cf3b5c20e2007545a13eb39d186d22940196ec73
|
refs/heads/master
| 2020-04-11T13:50:23.458816
| 2019-01-07T00:54:21
| 2019-01-07T00:54:21
| 161,831,743
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,063
|
py
|
import gym
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers.embeddings import Embedding
from keras.optimizers import RMSprop, Adam
import random
from collections import deque
import matplotlib.pyplot as plt
from keras import backend as K
from IPython import display
from pylab import rcParams
import warnings
rcParams['figure.figsize'] = 10,5
#%matplotlib inline
#%matplotlib notebook
class DeepQNet:
def __init__(self,
simulator,
model_params,
use_target,
target_update_freq,
gamma,
eps_init,
eps_min,
eps_decay,
batch_size,
min_samples,
memory_size):
self.simulator = simulator
self.state_dim = simulator.observation_space.shape[0]
self.num_actions = simulator.action_space.n
self.model = self.create_model(model_params)
self.use_target = use_target
if self.use_target:
self.target_update_freq = target_update_freq
self.target_model = self.create_model(model_params)
self.eps = eps_init
self.eps_min = eps_min
self.eps_decay = eps_decay
self.gamma = gamma
self.batch_size = batch_size
self.min_samples = min_samples
self.memory = deque(maxlen=memory_size)
self.steps = 0
def create_model(self, params):
layers = params['layers']
hidden_activation = 'relu'
final_activation = 'linear'
model = Sequential()
model.add(Dense(layers[0], input_dim=self.state_dim,
activation=hidden_activation))
for i in layers[1:]:
model.add(Dense(i, activation=hidden_activation))
model.add(Dense(self.num_actions, activation=final_activation))
model.compile(loss=params['loss'], optimizer=params['optimizer'])
model.summary()
return model
def choose_action(self, state, force_random=False):
if force_random or random.random() < self.eps:
action = random.randrange(self.num_actions)
else:
action = np.argmax(self.model.predict(state)[0])
return action
def record(self, state, action, next_state, reward, done):
self.memory.append((state, action, next_state, reward, done))
self.steps += 1
if done:
self.eps = max(self.eps_min, self.eps_decay*self.eps)
if self.use_target and self.steps % self.target_update_freq == 0:
self.target_model.set_weights(self.model.get_weights())
def run_episode(self):
state = self.simulator.reset()
state = state.reshape((1,-1))
done = False
steps = 0
reward = 0
while not done:
steps += 1
action = self.choose_action(state)
next_state, r, done, _ = self.simulator.step(action)
next_state = next_state.reshape((1,-1))
reward += r
self.record(state, action, next_state, r, done)
self.train()
state = next_state
return reward
def train(self):
if len(self.memory) < self.min_samples:
return 0
batch = random.sample(self.memory, self.batch_size)
states, actions, next_states, rewards, done = zip(*batch)
states = np.asarray(states).reshape((self.batch_size, -1))
next_states = np.asarray(next_states).reshape((self.batch_size, -1))
q_model = self.model
nq_model = self.target_model if self.use_target else self.model
q = q_model.predict(states)
nq = nq_model.predict(next_states)
targets = np.asarray(rewards)
for i, d in enumerate(done):
if not d:
targets[i] += self.gamma*np.amax(nq[i])
y = q
for i, a in enumerate(actions):
y[i, a] = targets[i]
X = states
return self.model.fit(X, y, epochs=1, verbose=0).history['loss']
def run_exp(dqn, max_episodes, long=100, short=5, early_stop=200):
fig,ax = plt.subplots()
ax.clear()
rewards = []
avg_rewards_long = []
avg_rewards_short =[]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
for i in range(max_episodes):
reward = dqn.run_episode()
rewards.append(reward)
avg_rewards_long.append(np.mean(rewards[-long:]))
avg_rewards_short.append(np.mean(rewards[-short:]))
ax.plot(np.arange(len(rewards)), rewards, color='black',
linewidth=0.5)
ax.plot(np.arange(len(avg_rewards_short)),
avg_rewards_short, color='orange')
ax.plot(np.arange(len(avg_rewards_long)),
avg_rewards_long, color='blue')
ax.set_title(f'Ep {i + 1}/{max_episodes}, Rewards = {int(reward)}/{int(avg_rewards_short[-1])}/{int(avg_rewards_long[-1])}')
fig.canvas.draw()
if avg_rewards_long[-1] >= early_stop:
return True
return False
def hubert_loss(y_true, y_pred):
err = y_pred - y_true
return K.mean( K.sqrt(1+K.square(err))-1, axis=-1 )
if __name__ == '__main__':
max_episodes = 1000
# success = False
# while not success:
model_params = {
'loss': hubert_loss,
'optimizer' : Adam(lr=0.0005),
'layers': [128, 128]
}
env = gym.make('LunarLander-v2')
dqn = DeepQNet(env,
model_params=model_params,
use_target=True,
target_update_freq=500,
gamma=0.99,
eps_init=1.0,
eps_min=0,
eps_decay=0.98,
batch_size=32,
min_samples=1000,
memory_size=500000)
success = run_exp(dqn, max_episodes, early_stop=200)
test_rewards = []
max_simulations = 1000
print('Running simulations')
for i in range(max_simulations):
print(f'Running test simulation {i} of {max_simulations}...', end='\r')
state = env.reset().reshape((1,-1))
done = False
reward = 0
while not done:
env.render()
action = dqn.choose_action(state)
next_state, r, done, _ = env.step(action)
state = next_state.reshape((1,-1))
reward += r
test_rewards.append(reward)
'''
print()
n, bins, patches = plt.hist(np.asarray(test_rewards), 100)
plt.show()
np.mean(test_rewards)
'''
|
[
"srikilaru@gmail.com"
] |
srikilaru@gmail.com
|
d0de8044c39efd2d7705999f0cdd891d7b72b4ed
|
690491a5adfaccd6be1c9ef54e3b7da779550249
|
/utils/dm_test.py
|
dfb03976b29cd326184bdb2b91f5e53298ace296
|
[] |
no_license
|
lex-koelewijn/Thesis_Asset_Price_Forecasting
|
34aa4742c58bc387a4baf1cf2c9da2ab9c3525b2
|
ce09153316c628ef777fddbab3af48e644368197
|
refs/heads/main
| 2023-04-25T03:44:35.680328
| 2021-04-28T14:06:01
| 2021-04-28T14:06:01
| 344,453,837
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,825
|
py
|
# Author : John Tsang
# Date : December 7th, 2017
# Purpose : Implement the Diebold-Mariano Test (DM test) to compare
# forecast accuracy
# Input : 1) actual_lst: the list of actual values
# 2) pred1_lst : the first list of predicted values
# 3) pred2_lst : the second list of predicted values
# 4) h : the number of stpes ahead
# 5) crit : a string specifying the criterion
# i) MSE : the mean squared error
# ii) MAD : the mean absolute deviation
# iii) MAPE : the mean absolute percentage error
# iv) poly : use power function to weigh the errors
# 6) poly : the power for crit power
# (it is only meaningful when crit is "poly")
# Condition: 1) length of actual_lst, pred1_lst and pred2_lst is equal
# 2) h must be an integer and it must be greater than 0 and less than
# the length of actual_lst.
# 3) crit must take the 4 values specified in Input
# 4) Each value of actual_lst, pred1_lst and pred2_lst must
# be numerical values. Missing values will not be accepted.
# 5) power must be a numerical value.
# Return : a named-tuple of 2 elements
# 1) p_value : the p-value of the DM test
# 2) DM : the test statistics of the DM test
##########################################################
# References:
#
# Harvey, D., Leybourne, S., & Newbold, P. (1997). Testing the equality of
# prediction mean squared errors. International Journal of forecasting,
# 13(2), 281-291.
#
# Diebold, F. X. and Mariano, R. S. (1995), Comparing predictive accuracy,
# Journal of business & economic statistics 13(3), 253-264.
#
##########################################################
def dm_test(actual_lst, pred1_lst, pred2_lst, h = 1, crit="MSE", power = 2):
# Routine for checking errors
def error_check():
rt = 0
msg = ""
# Check if h is an integer
if (not isinstance(h, int)):
rt = -1
msg = "The type of the number of steps ahead (h) is not an integer."
return (rt,msg)
# Check the range of h
if (h < 1):
rt = -1
msg = "The number of steps ahead (h) is not large enough."
return (rt,msg)
len_act = len(actual_lst)
len_p1 = len(pred1_lst)
len_p2 = len(pred2_lst)
# Check if lengths of actual values and predicted values are equal
if (len_act != len_p1 or len_p1 != len_p2 or len_act != len_p2):
rt = -1
msg = "Lengths of actual_lst, pred1_lst and pred2_lst do not match."
return (rt,msg)
# Check range of h
if (h >= len_act):
rt = -1
msg = "The number of steps ahead is too large."
return (rt,msg)
# Check if criterion supported
if (crit != "MSE" and crit != "MAPE" and crit != "MAD" and crit != "poly"):
rt = -1
msg = "The criterion is not supported."
return (rt,msg)
# Check if every value of the input lists are numerical values
from re import compile as re_compile
comp = re_compile("^\d+?\.\d+?$")
def compiled_regex(s):
""" Returns True is string is a number. """
# if comp.match(s) is None:
# return s.isdigit()
return True
for actual, pred1, pred2 in zip(actual_lst, pred1_lst, pred2_lst):
is_actual_ok = compiled_regex(str(abs(actual)))
is_pred1_ok = compiled_regex(str(abs(pred1)))
is_pred2_ok = compiled_regex(str(abs(pred2)))
if (not (is_actual_ok and is_pred1_ok and is_pred2_ok)):
msg = "An element in the actual_lst, pred1_lst or pred2_lst is not numeric."
rt = -1
return (rt,msg)
return (rt,msg)
# Error check
error_code = error_check()
# Raise error if cannot pass error check
if (error_code[0] == -1):
raise SyntaxError(error_code[1])
return
# Import libraries
from scipy.stats import t
import collections
import pandas as pd
import numpy as np
# Initialise lists
e1_lst = []
e2_lst = []
d_lst = []
# convert every value of the lists into real values
actual_lst = pd.Series(actual_lst).apply(lambda x: float(x)).tolist()
pred1_lst = pd.Series(pred1_lst).apply(lambda x: float(x)).tolist()
pred2_lst = pd.Series(pred2_lst).apply(lambda x: float(x)).tolist()
# Length of lists (as real numbers)
T = float(len(actual_lst))
# construct d according to crit
if (crit == "MSE"):
for actual,p1,p2 in zip(actual_lst,pred1_lst,pred2_lst):
e1_lst.append((actual - p1)**2)
e2_lst.append((actual - p2)**2)
for e1, e2 in zip(e1_lst, e2_lst):
d_lst.append(e1 - e2)
elif (crit == "MAD"):
for actual,p1,p2 in zip(actual_lst,pred1_lst,pred2_lst):
e1_lst.append(abs(actual - p1))
e2_lst.append(abs(actual - p2))
for e1, e2 in zip(e1_lst, e2_lst):
d_lst.append(e1 - e2)
elif (crit == "MAPE"):
for actual,p1,p2 in zip(actual_lst,pred1_lst,pred2_lst):
e1_lst.append(abs((actual - p1)/actual))
e2_lst.append(abs((actual - p2)/actual))
for e1, e2 in zip(e1_lst, e2_lst):
d_lst.append(e1 - e2)
elif (crit == "poly"):
for actual,p1,p2 in zip(actual_lst,pred1_lst,pred2_lst):
e1_lst.append(((actual - p1))**(power))
e2_lst.append(((actual - p2))**(power))
for e1, e2 in zip(e1_lst, e2_lst):
d_lst.append(e1 - e2)
# Mean of d
mean_d = pd.Series(d_lst).mean()
# Find autocovariance and construct DM test statistics
def autocovariance(Xi, N, k, Xs):
autoCov = 0
T = float(N)
for i in np.arange(0, N-k):
autoCov += ((Xi[i+k])-Xs)*(Xi[i]-Xs)
return (1/(T))*autoCov
gamma = []
for lag in range(0,h):
gamma.append(autocovariance(d_lst,len(d_lst),lag,mean_d)) # 0, 1, 2
V_d = (gamma[0] + 2*sum(gamma[1:]))/T
DM_stat=V_d**(-0.5)*mean_d
harvey_adj=((T+1-2*h+h*(h-1)/T)/T)**(0.5)
DM_stat = harvey_adj*DM_stat
# Find p-value
# p_value = 2*t.cdf(-abs(DM_stat), df = T - 1) #Two tailed
p_value = t.cdf(-DM_stat, df = T - 1) #one-tailed
# Construct named tuple for return
dm_return = collections.namedtuple('dm_return', 'DM p_value')
rt = dm_return(DM = DM_stat, p_value = p_value)
return rt
|
[
"lex.koelewijn@gmail.com"
] |
lex.koelewijn@gmail.com
|
198983b3fa30c1ea7e5fc7b81f8959731a8ec0e7
|
75bc89befe6d0b9ccb16140b153d8467d4cff56a
|
/survey3_ver0825.py
|
105b7d9d12645f512818e785efa2a16f14a68bf6
|
[] |
no_license
|
seongeun827/OTTRS
|
fbf61a22ecdf4088638ed3ac588e3042ec36f27d
|
c962b3e0108f1df738b0672e281fc8d103c6ab42
|
refs/heads/main
| 2023-08-14T17:24:26.389600
| 2021-09-29T03:22:41
| 2021-09-29T03:22:41
| 411,514,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,721
|
py
|
import sys
import cx_Oracle
import csv
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class OttApp(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.pp = parent
self.width = 1200
self.height = 800
self.setStyleSheet("background-color: white;")
# self.pp.setWindowTitle("OTTRS Survey")
self.question = QLabel("선호하는 컨텐츠를 선택하세요! 영화를 추천해드리는 데 도움이 됩니다. (3/3)", self)
#지금은 장르 기준이지만 survey1 설문 바탕으로 알고리즘에 따라 포스터 들어가는 자리
#이전 버전이 장르 기준이어서 이렇게 쓰여있음. 변경 가능
self.answer1 = QLabel("코미디", self) #지금은 장르 기준이지만 survey1 설문 바탕으로 알고리즘에 따라 포스터 들어가는 자리
self.answer1.setAlignment(Qt.AlignVCenter|Qt.AlignRight) #가운데 정렬
self.answer2 = QLabel("멜로", self)
self.answer2.setAlignment(Qt.AlignVCenter|Qt.AlignRight)
self.answer3 = QLabel("SF/판타지", self)
self.answer3.setAlignment(Qt.AlignVCenter|Qt.AlignRight)
self.answer4 = QLabel("다큐멘터리", self)
self.answer4.setAlignment(Qt.AlignVCenter|Qt.AlignRight)
self.answer5 = QLabel("공연실황", self)
self.answer5.setAlignment(Qt.AlignVCenter|Qt.AlignRight)
self.gotoResultBtn = QPushButton("완료! OTTRS 시작하고""\n""\n""메인화면에서 영화 추천 받기",self)
self.gotoResultBtn.clicked.connect(self.gotoResult)
#다음으로 버튼을 누르면 self.gotoResult() 함수로 연결됨
self.img1 = QPixmap("E:\dev\python_workspace\img\survey2_poster\img1.jpg")
self.img2 = QPixmap("E:\dev\python_workspace\img\survey2_poster\img2.jpg")
self.img3 = QPixmap("E:\dev\python_workspace\img\survey2_poster\img3.jpg")
self.img4 = QPixmap("E:\dev\python_workspace\img\survey2_poster\img4.jpg")
self.img5 = QPixmap("E:\dev\python_workspace\img\survey2_poster\img5.jpg")
self.img1=self.img1.scaled(190,273,Qt.KeepAspectRatio, Qt.FastTransformation) #포스터 크기조정
self.img2=self.img2.scaled(190,273,Qt.KeepAspectRatio, Qt.FastTransformation)
self.img3=self.img3.scaled(190,273,Qt.KeepAspectRatio, Qt.FastTransformation)
self.img4=self.img4.scaled(190,273,Qt.KeepAspectRatio, Qt.FastTransformation)
self.img5=self.img5.scaled(190,273,Qt.KeepAspectRatio, Qt.FastTransformation)
self.button1 = QRadioButton("트루먼쇼",self)
self.button1.clicked.connect(self.mvbtnClicked) #버튼이 눌리면 self.mvbtnClicked 함수로 연결됨.
self.button2 = QRadioButton("윤희에게",self)
self.button2.clicked.connect(self.mvbtnClicked)
self.button3 = QRadioButton("신과 함께",self)
self.button3.clicked.connect(self.mvbtnClicked)
self.button4 = QRadioButton("말하는""\n""건축가",self)
self.button4.clicked.connect(self.mvbtnClicked)
self.button5 = QRadioButton("레미제라블""\n""뮤지컬""\n""콘서트",self)
self.button5.clicked.connect(self.mvbtnClicked)
self.initUI()
def initUI(self):
self.answer1.setPixmap(self.img1)
self.answer2.setPixmap(self.img2)
self.answer3.setPixmap(self.img3)
self.answer4.setPixmap(self.img4)
self.answer5.setPixmap(self.img5)
grid=QGridLayout()
grid.addWidget(self.boxQ(),0,0,1,3)
grid.addWidget(self.box1(),1,0,3,1)
grid.addWidget(self.box2(),1,1,3,1)
grid.addWidget(self.box3(),1,2,3,1)
grid.addWidget(self.box4(),4,0,3,1)
grid.addWidget(self.box5(),4,1,3,1)
grid.addWidget(self.gotoResultBtn,5,2)
self.setLayout(grid)
self.gotoResultBtn.setEnabled(False) #추천받기 버튼 비활성화 상태가 기본값
# self.gotoResultBtn.setStyleSheet("background-color:rgb(169,169,169)") #비활성화 버튼 상태 회색
# self.setWindowTitle("영화 취향 선택!")
# self.setGeometry(50,50,1100,730) #메인 창의 위치와 크기
self.setWindowTitle("OTTRS Survey") #창 타이틀 이름
self.question.setStyleSheet(
"color: black;"
"font-family: 'GMARKETSANSMEDIUM';"
"font-size: 25px;")
self.button1.setStyleSheet("color: black;"
"font-family: 'NEXON LV1 GOTHIC OTF';"
"font-weight:bold;"
"font-size: 23px;")
self.button2.setStyleSheet("color: black;"
"font-family: 'NEXON LV1 GOTHIC OTF';"
"font-weight:bold;"
"font-size: 23px;")
self.button3.setStyleSheet("color: black;"
"font-family: 'NEXON LV1 GOTHIC OTF';"
"font-weight:bold;"
"font-size: 23px;")
self.button4.setStyleSheet("color: black;"
"font-family: 'NEXON LV1 GOTHIC OTF';"
"font-weight:bold;"
"font-size: 23px;")
self.button5.setStyleSheet("color: black;"
"font-family: 'NEXON LV1 GOTHIC OTF';"
"font-weight:bold;"
"font-size: 23px;")
self.gotoResultBtn.setStyleSheet("color: black;"
"font-family: 'GMARKETSANSMEDIUM';"
"font-weight:bold;"
"font-size: 20px;"
"background-color: #6E727D;"
)
self.groupboxQ.setStyleSheet("background-color:#ECC165;")
self.show()
def boxQ(self):
self.groupboxQ= QGroupBox()
#포스터 라벨 넣기
vbox = QVBoxLayout()
vbox.addWidget(self.question)
self.groupboxQ.setLayout(vbox)
return self.groupboxQ
def box1(self):
groupbox= QGroupBox()
hbox= QHBoxLayout()
hbox.addWidget(self.button1)
hbox.addWidget(self.answer1)
groupbox.setLayout(hbox)
return groupbox
def box2(self):
groupbox= QGroupBox()
hbox= QHBoxLayout()
hbox.addWidget(self.button2)
hbox.addWidget(self.answer2)
groupbox.setLayout(hbox)
return groupbox
def box3(self):
groupbox= QGroupBox()
hbox= QHBoxLayout()
hbox.addWidget(self.button3)
hbox.addWidget(self.answer3)
groupbox.setLayout(hbox)
return groupbox
def box4(self):
groupbox= QGroupBox()
hbox= QHBoxLayout()
hbox.addWidget(self.button4)
hbox.addWidget(self.answer4)
groupbox.setLayout(hbox)
return groupbox
def box5(self):
groupbox= QGroupBox()
hbox= QHBoxLayout()
hbox.addWidget(self.button5)
hbox.addWidget(self.answer5)
groupbox.setLayout(hbox)
return groupbox
def mvbtnClicked(self):
if self.button1.isChecked() or self.button2.isChecked() or self.button3.isChecked() or self.button4.isChecked() or self.button5.isChecked():
#button1~button5 중에 하나가 눌리면(or로 연결)
#추천받기 버튼 활성화!
self.gotoResultBtn.setEnabled(True)
self.gotoResultBtn.setStyleSheet("color: black;"
"font-family: 'GMARKETSANSMEDIUM';"
"font-weight:bold;"
"font-size: 20px;"
"background-color: #9aafda;")
def gotoResult(self):
print("다음으로 버튼이 눌림")
survey3 = ""
if self.button1.isChecked():
print("영화1 선택됨")
survey3 = 1
elif self.button2.isChecked():
print("영화2 선택됨")
survey3 = 3
elif self.button3.isChecked():
print("영화3 선택됨")
survey3 = 5
elif self.button4.isChecked():
print("영화4 선택됨")
survey3 = 7
elif self.button5.isChecked():
print("영화5 선택됨")
survey3 = 9
print(self.pp.serGenreList)
print(self.pp.survey2Genre)
print(survey3)
genreList = self.pp.serGenreList
survey2 = self.pp.survey2Genre
genreList[survey2] += 2
genreList[survey3] += 2
print(genreList)
# 액션, 코미디, 드라마, 멜로, 공포 / 스릴러, SF / 판타지, 애니메이션, 다큐멘터리
sql = """ INSERT INTO USER_GENRE VALUES (OTTRS_USER_SEQ.NEXTVAL,"""
sql += str(genreList[0]) + ","
sql += str(genreList[1]) + ","
sql += str(genreList[2]) + ","
sql += str(genreList[3]) + ","
sql += str(genreList[4]) + ","
sql += str(genreList[5]) + ","
sql += str(genreList[6]) + ","
sql += str(genreList[7]) + ","
sql += str(self.pp.loginUserSeq)
sql += ")"
print(sql)
conn = cx_Oracle.connect("scott", "tigertiger", "orcl.cgnlgvycsnjd.us-east-2.rds.amazonaws.com:1521/orcl")
cur = conn.cursor()
cur.execute(sql)
conn.commit()
conn.close()
self.pp.initMainWidget()
if __name__ == "__main__":
a = QApplication(sys.argv)
e = OttApp()
sys.exit(a.exec_())
|
[
"noreply@github.com"
] |
seongeun827.noreply@github.com
|
bba2c42a8194c48e55d42961683bf776e1fbdeeb
|
b01a590c216ae1e96b0a28cfe99fb6959b2c4b2c
|
/while_loop.py
|
6750498cdb69306daf14b589f52174103934d114
|
[] |
no_license
|
mshkdm/python_basics
|
f4f5237f9b1036f3a039cf5d3a7ee2be6ab60e63
|
33c739bc4ea5ad2f59b82decfbeccd008b6baad9
|
refs/heads/master
| 2022-12-16T22:55:29.875876
| 2017-07-25T08:38:09
| 2017-07-25T08:38:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
i = 0
numbers = []
while i < 6:
print "At the top i is %d" % i
numbers.append(i)
i += 1
print "Numbers now: ", numbers
print "At hte bottom i is %d" % i
print "The numbers: "
for element in numbers:
print element
# 1. Study Drills_1
def while_loop(i):
i = 0
numbers_1 = []
print "At the top i is %d" % i
numbers_1.append(i)
i += 1
print "Numbers now: ", numbers_1
print "At the bottom i is %d" % i
# 2. Study Drills_2
while_loop(2)
while_loop(5)
# 3. Study Drills_3
def while_loop_3(i, increment):
numbers_3 = []
print "At the top i is %d" % i
numbers_3.append(i)
i += increment
print "Numbers now: ", numbers_3
print "At the bottom i is %d" % i
# 4. Study Drills_4
while_loop_3(1, 2)
while_loop_3(0, 1)
# 5. Study Drills_5
def some_new_loop(i, increase_to, increment):
numbers_5 = []
for element_5 in range(i, increase_to, increment):
numbers_5.append(element_5)
print "There are elements: ", numbers_5
some_new_loop(1, 10, 3)
|
[
"mixandoma@ya.ru"
] |
mixandoma@ya.ru
|
3f2dc467cc4233f609afa83c18e40cb0e854ef1b
|
8d16db57a6d3be686f654e51d287ff03ba2fcf78
|
/scripts/motors2.py
|
e9368a082a33a5224058e99e47afff358def2bac
|
[] |
no_license
|
isapanda/pimouse_ros
|
8db8d222060baca59b7f3b0ff35b9a62a6db3b63
|
fae920085d39683ec4b9c6d7d9e3fe44d3ec575b
|
refs/heads/master
| 2023-02-06T13:16:12.326416
| 2020-12-30T12:45:09
| 2020-12-30T12:45:09
| 324,419,190
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,493
|
py
|
#!/usr/bin/env python
#encoding: utf8
import sys, rospy, math
from pimouse_ros.msg import MotorFreqs
from geometry_msgs.msg import Twist
from std_srvs.srv import Trigger, TriggerResponse
class Motor():
def __init__(self):
if not self.set_power(False): sys.exit(1)
rospy.on_shutdown(self.set_power)
self.sub_raw = rospy.Subscriber ('motor_raw', MotorFreqs, self.callback_raw_freq)
self.sub_cmd_vel = rospy.Subscriber('cmd_vel',Twist, self.callback_cmd_vel)
self.srv_on = rospy.Service('motor_on', Trigger,self.callback_on)
self.srv_off = rospy.Service('motor_off', Trigger, self.callback_off)
self.last_time = rospy.Time.now()
self.using_cmd_vel = False
def set_power(self,onoff=False):
en = "/dev/rtmotoren0"
try:
with open(en,'w') as f:
f.write("1\n" if onoff else "0\n")
self.is_on = onoff
return True
except:
rospy.logger("cannot write to " + en)
return False
def set_raw_freq(self,left_hz,right_hz):
if not self.is_on:
rospy.logerr("not enpowered")
try:
with open("/dev/rtmotor_raw_l0",'w') as lf,\
open("/dev/rtmotor_raw_r0",'w') as rf:
lf.write(str(int(round(left_hz))) + "\n")
rf.write(str(int(round(right_hz))) + "\n")
except:
rospy.logerr("cannot write to rtmotor_raw_*")
def callback_raw_freq(self,message):
self.set_raw_freq(message.left_hz,message.right_hz)
def callback_cmd_vel(self,message):
forward_hz = 80000.0*message.linear.x/(9*math.pi)
rot_hz = 400.0*message.angular.z/math.pi
self.set_raw_freq(forward_hz-rot_hz, forward_hz+rot_hz)
self.using_cmd_vel = True
self.last_time = rospy.Time.now()
def onoff_response(self,onoff):
d = TriggerResponse()
d.success = self.set_power(onoff)
d.message = "ON" if self.is_on else "OFF"
return d
def callback_on(self,message): return self.onoff_response(True)
def callback_off(self,message): return self.onoff_response(False)
if __name__ =='__main__':
rospy.init_node('motors')
m = Motor()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if m.using_cmd_vel and rospy.Time.now().to_sec() -m.last_time.to_sec() >= 1.0:
m.set_raw_freq(0,0)
m.using_cmd_vel = False
rate.sleep()
|
[
"koro.koro.panda.dokoeiku@gmail.com"
] |
koro.koro.panda.dokoeiku@gmail.com
|
60025ea38f82f5fba4bcc8381d4ab1d16e25f418
|
b542c85b3bf8c8b512831484bea148821c049aa2
|
/webpersonal/users/views.py
|
018eea55ac188f6215be1b5fde5ff6657e26dd6f
|
[] |
no_license
|
franlop24/FlaskWebApp
|
86030dfe7487fcb3a8ed9505667812192295d72d
|
2e9627f7be946cafb2d48c4dd1a52e081819c848
|
refs/heads/master
| 2023-03-05T03:13:52.430229
| 2021-02-19T18:55:45
| 2021-02-19T18:55:45
| 340,461,750
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,561
|
py
|
from flask import render_template, url_for, flash, redirect, request, Blueprint
from flask_login import login_user, current_user, logout_user, login_required
from webpersonal import db
from webpersonal.models import User
from webpersonal.users.forms import RegistrationForm, LoginForm, UpdateUserForm
from webpersonal.users.picture_handler import add_profile_pic
users = Blueprint('users',__name__, template_folder='templates/users')
@users.route('/')
@login_required
def admin():
return render_template('admin.html')
#register
@users.route('/register', methods=['GET','POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, username=form.username.data,password=form.password.data)
db.session.add(user)
db.session.commit()
flash('Thanks for registration!')
return redirect(url_for('users.login'))
return render_template('register.html', form=form)
#login
@users.route('/login', methods=['GET','POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None:
if user.check_password(form.password.data):
login_user(user)
flash('Log in Success!')
next = request.args.get('next')
if next == None or not next[0]=='/':
next = url_for('core.index')
return redirect(next)
return render_template('login.html', form=form)
#logout
@users.route('/logout')
def logout():
logout_user()
return redirect(url_for('core.index'))
#account(update UserForm)
@users.route('/account', methods=['GET','POST'])
@login_required
def account():
form = UpdateUserForm()
if form.validate_on_submit():
if form.picture.data:
username = current_user.username
pic = add_profile_pic(form.picture.data,username)
current_user.profile_image = pic
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('User Account Updated!')
return redirect(url_for('users.account'))
elif request.method == "GET":
form.username.data = current_user.username
form.email.data = current_user.email
profile_image = url_for('static', filename='profile_pics/'+current_user.profile_image)
return render_template('account.html', profile_image=profile_image, form=form)
|
[
"franlopbri@gmail.com"
] |
franlopbri@gmail.com
|
3d28d52207a9bdd435b87599f365610d8f297c19
|
60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24
|
/IronPythonStubs/release/stubs.min/System/Net/__init___parts/AuthenticationSchemeSelector.py
|
6e9918078e9d0b7039aa183589532641d5b137c0
|
[
"MIT"
] |
permissive
|
shnlmn/Rhino-Grasshopper-Scripts
|
a9411098c5d1bbc55feb782def565d535b27b709
|
0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823
|
refs/heads/master
| 2020-04-10T18:59:43.518140
| 2020-04-08T02:49:07
| 2020-04-08T02:49:07
| 161,219,695
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,790
|
py
|
class AuthenticationSchemeSelector(MulticastDelegate,ICloneable,ISerializable):
"""
Selects the authentication scheme for an System.Net.HttpListener instance.
AuthenticationSchemeSelector(object: object,method: IntPtr)
"""
def BeginInvoke(self,httpRequest,callback,object):
""" BeginInvoke(self: AuthenticationSchemeSelector,httpRequest: HttpListenerRequest,callback: AsyncCallback,object: object) -> IAsyncResult """
pass
def CombineImpl(self,*args):
"""
CombineImpl(self: MulticastDelegate,follow: Delegate) -> Delegate
Combines this System.Delegate with the specified System.Delegate to form a new delegate.
follow: The delegate to combine with this delegate.
Returns: A delegate that is the new root of the System.MulticastDelegate invocation list.
"""
pass
def DynamicInvokeImpl(self,*args):
"""
DynamicInvokeImpl(self: Delegate,args: Array[object]) -> object
Dynamically invokes (late-bound) the method represented by the current delegate.
args: An array of objects that are the arguments to pass to the method represented by the current
delegate.-or- null,if the method represented by the current delegate does not require
arguments.
Returns: The object returned by the method represented by the delegate.
"""
pass
def EndInvoke(self,result):
""" EndInvoke(self: AuthenticationSchemeSelector,result: IAsyncResult) -> AuthenticationSchemes """
pass
def GetMethodImpl(self,*args):
"""
GetMethodImpl(self: MulticastDelegate) -> MethodInfo
Returns a static method represented by the current System.MulticastDelegate.
Returns: A static method represented by the current System.MulticastDelegate.
"""
pass
def Invoke(self,httpRequest):
""" Invoke(self: AuthenticationSchemeSelector,httpRequest: HttpListenerRequest) -> AuthenticationSchemes """
pass
def RemoveImpl(self,*args):
"""
RemoveImpl(self: MulticastDelegate,value: Delegate) -> Delegate
Removes an element from the invocation list of this System.MulticastDelegate that is equal to
the specified delegate.
value: The delegate to search for in the invocation list.
Returns: If value is found in the invocation list for this instance,then a new System.Delegate without
value in its invocation list; otherwise,this instance with its original invocation list.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,object,method):
""" __new__(cls: type,object: object,method: IntPtr) """
pass
def __reduce_ex__(self,*args):
pass
|
[
"magnetscoil@gmail.com"
] |
magnetscoil@gmail.com
|
00f52ec703752465b4057e361479299471343355
|
b4d9642fc50f41f9c09890ff00051d7734d9329e
|
/Raster-processing-R-bash-Python/reclassify_ocean_bathymetry.py
|
e54e7978833d3240e28643f6225fda8863010a8a
|
[] |
no_license
|
pumiko/studies-and-training
|
70ab64046060564e3338f3ae82e3405a596947db
|
a820abf9ee2fc37efdc1fd5bccaf12026d89d6b7
|
refs/heads/master
| 2021-01-21T09:56:10.267055
| 2017-03-12T19:33:11
| 2017-03-12T19:33:11
| 83,355,411
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
import arcpy
import numpy as np
from arcpy import env
from arcpy.sa import *
env.workspace = "path/to/folder"
start_ocean = np.arange(-11000,-9,10)
end_ocean = np.arange(-10990,1,10)
labels = np.arange(11000,0,-10)
array = np.column_stack((start_ocean,end_ocean,labels))
list_reclass_values = array.tolist()
Reclass_etopo_proba = Reclassify("etopo_oceany.tif", "VALUE", RemapRange(list_reclass_values), "NODATA")
|
[
"stec.magda@gmail.com"
] |
stec.magda@gmail.com
|
0176be918eb53c53becc78acdc3cb99103d62b87
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-mrsp.0/mrsp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=35/params.py
|
39b4dc35f69e50caff552d3f745c596e740bc5a0
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.618762',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 35,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
83d83a4abbfcae98b346764470f7f219f49ae4ef
|
24a56351c1417fbe126c74a22294381e32939006
|
/main.py
|
5660598d45947d591472543190d3a3aa3a438013
|
[] |
no_license
|
BhanuPrakashNani/Gesture-Detection
|
0265be86d09b64cf524d8720367c3c8a7bd7566e
|
0372efbdb54534cf92583b0771b101f584d3b605
|
refs/heads/master
| 2022-09-01T06:08:17.884212
| 2020-05-23T20:05:10
| 2020-05-23T20:05:10
| 267,622,010
| 0
| 0
| null | 2020-05-28T15:09:30
| 2020-05-28T15:09:29
| null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
from flask import Flask, render_template, Response
from recognize import VideoCamera
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(VideoCamera()),mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0',port='5000', debug=True)
|
[
"bvsabhishek@gmail.com"
] |
bvsabhishek@gmail.com
|
f6dee28a3ed4f81785df8e616bcd458485360a96
|
8a7b0e4aea78be10c08fe8e1ac98368f8bfdbae8
|
/EvaMap/Metrics/metric.py
|
3b3ac96e029477693c2e7cb3c0511c7aaa122110
|
[
"MIT"
] |
permissive
|
benj-moreau/EvaMap
|
feebc392e79eaedf0f9f171ef5aeb8f10bf3a243
|
42e616abe9f15925b885797d30496e30615989a0
|
refs/heads/master
| 2022-04-05T19:15:33.909365
| 2020-03-13T10:11:23
| 2020-03-13T10:11:23
| 198,458,252
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
def metric(onto=None, map=None, g_map=None, data=None):
return {
'name': None,
'feedbacks': [],
'score': 0
}
|
[
"benji_moreau@hotmail.fr"
] |
benji_moreau@hotmail.fr
|
81be67e31cb3e6f6890f31444e7794c51ff1f2c2
|
bae7ff2c12358cc053c600841b1f43008445dfa7
|
/rules/cnv.smk
|
6ab64b444de05390eda9df568226890e778b18ca
|
[] |
no_license
|
joys8998/bioinformatics-joy
|
bcd5393b927a59ac0e8365b1d88134f2e75e6d66
|
885dc9a043657186b35e403c2b4f626f329e1fdc
|
refs/heads/master
| 2021-07-23T12:06:10.736193
| 2021-01-14T22:46:17
| 2021-01-14T22:46:17
| 236,940,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,256
|
smk
|
rule process_intervals:
input:
ref="resources/genome.fa",
intervals="resources/calling_regions.hg38.interval_list"
params:
bin_len=0,
merging_rule="OVERLAPPING_ONLY",
tmp_dir=config["tmp_dir"]
output:
interval_preprocessed="sandbox/calling_regions.preprocessed.interval_list"
shell:
"""gatk PreprocessIntervals -L {input.intervals} \
-R {input.ref} --bin-length {params.bin_len} \
--interval-merging-rule {params.merging_rule} -O {output.interval_preprocessed} \
--tmp-dir {params.tmp_dir} && rm -r {params.tmp_dir}*"""
rule collect_read_counts:
input:
bam="recal/{sample}-{unit}-{condition}.bam"
params:
intervals="sandbox/calling_regions.preprocessed.interval_list",
merging_rule="OVERLAPPING_ONLY",
tmp_dir=config["tmp_dir"]
output:
read_count="sandbox/{sample}-{unit}-{condition}.counts.hdf5"
shell:
"""gatk CollectReadCounts -I {input.bam} \
-L {params.intervals} \
--interval-merging-rule {params.merging_rule} -O {output.read_count} \
--tmp-dir {params.tmp_dir} && rm -r {params.tmp_dir}*"""
rule create_read_count_pon:
input:
counts=expand("sandbox/{u.sample}-{u.unit}-normal.counts.hdf5", u=units.itertuples())
params:
i=lambda wildcards, input: ['-I ' + d for d in input.counts],
tmp_dir=config["tmp_dir"],
mimp=5.0
output:
pon_hdf5="sandbox/cnv.pon.hdf5"
shell:
"""gatk CreateReadCountPanelOfNormals --java-options '-Xmx6500m' {params.i} \
--minimum-interval-median-percentile {params.mimp} \
-O {output.pon_hdf5} --tmp-dir {params.tmp_dir} && rm -r {params.tmp_dir}*
"""
rule annotate_intervals:
input:
ref="resources/genome.fa"
params:
intervals="sandbox/calling_regions.preprocessed.interval_list",
merging_rule="OVERLAPPING_ONLY",
tmp_dir=config["tmp_dir"]
output:
annotated_intervals="sandbox/calling_regions_annotated_intervals.tsv"
shell:
"""gatk AnnotateIntervals -R {input.ref} \
-L {params.intervals} --interval-merging-rule {params.merging_rule} \
-O {output.annotated_intervals} \
--tmp-dir {params.tmp_dir} && rm -r {params.tmp_dir}*"""
rule denoise_read_count:
input:
annotated_intervals="sandbox/calling_regions_annotated_intervals.tsv",
pon_hdf5="sandbox/cnv.pon.hdf5",
read_count="sandbox/{sample}-{unit}-{condition}.counts.hdf5"
params:
tmp_dir=config["tmp_dir"]
output:
std_copy_ratios="sandbox/{sample}-{unit}-{condition}.standardizedCR.tsv",
denoised_copy_ratios="sandbox/{sample}-{unit}-{condition}.denoisedCR.tsv"
shell:
"""gatk DenoisereadCounts -I {input.read_count} \
--denoised-copy-ratios {output.denoised_copy_ratios} --standardized-copy-ratios {output.std_copy_ratios} \
--annotated-intervals {input.annotated_intervals} --tmp-dir {params.tmp_dir} --java-options '-Xmx12g' \
&& rm -r {params.tmp_dir}*"""
rule install_r_dependencies:
output:
"end_r.txt"
conda:
"../envs/r_plot.yml"
shell:
"""Rscript scripts/install_R_packages.R && 'r installation ended' > {output}"""
rule plot_denoised_copy_ratios:
input:
end_r="end_r.txt",
std_copy_ratios="sandbox/{sample}-{unit}-{condition}.standardizedCR.tsv",
denoised_copy_ratios="sandbox/{sample}-{unit}-{condition}.denoisedCR.tsv",
dict="resources/genome.dict"
params:
min_contig_len=46709983,
tmp_dir=config["tmp_dir"],
out_prefix="{sample}-{unit}-{condition}"
output:
dir=directory("sandbox/plots"),
png="sandbox/plots/{sample}-{unit}-{condition}.denoised.png"
shell:
"""rm {input.end_r} && gatk PlotDenoisedCopyRatios --standardized-copy-ratios {input.std_copy_ratios} \
--denoised-copy-ratios {input.denoised_copy_ratios} \
--sequence-dictionary {input.dict} --minimum-contig-length {params.min_contig_len} \
--output {output.dir} --output-prefix {params.out_prefix} \
--tmp-dir {params.tmp_dir} && rm -r {params.tmp_dir}*"""
|
[
"bordinijoy@gmail.com"
] |
bordinijoy@gmail.com
|
408acd829bd8489aabb3dea183c99d57896ec2ce
|
efc3bf4f88a2bfc885de5495c87433d345b54429
|
/ZOJ/3191.py
|
a11237f5b2af1c8cc1d724abc0dcc77bd51022aa
|
[] |
no_license
|
calvinxiao/Algorithm-Solution
|
26ff42cc26aaca87a4706b82a325a92829878552
|
afe254a4efa779598be8a82c5c5bcfcc94f80272
|
refs/heads/master
| 2016-09-05T21:08:35.852486
| 2015-08-23T15:13:23
| 2015-08-23T15:13:23
| 20,149,077
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
#Problem ID: 3191
#Submit Time: 2013-04-24 10:15:49
#Run Time: 10
#Run Memory: 320
#ZOJ User: calvinxiao
import sys
def getline():
return sys.stdin.readline()
def getint():
return input()
def getints():
return map(int, raw_input().split())
def getlist():
return raw_input().split()
#sys.stdin = open("0.in", "r")
while 1:
n = getint()
if n == -1:
break
now = 3
while n > 29:
n -= 30
now -= 1
if now < 0:
now += 12
if n == 0:
print "Exactly %d o'clock" % now
else:
before = now - 1
if before < 0:
before += 12
print "Between %d o'clock and %d o'clock" % (before, now)
|
[
"calvin.xiao@scaurugby.com"
] |
calvin.xiao@scaurugby.com
|
4f6f51d8e40d117b7c63ae6a13477f753f0bc66e
|
e970324a60eaa413532cf848b1650361ab7f3702
|
/mopidy_nfcread/nfc/clf/arygon.py
|
5ad9239e161887bb2b3c35e41deb952a42269d70
|
[
"Apache-2.0"
] |
permissive
|
gefangenimnetz/mopidy-nfcread
|
67141238c07c78d8108bb53ef92d76f7594fbcbb
|
268bea060a204f7fb3df266882228c65055481a3
|
refs/heads/master
| 2021-01-10T11:15:16.431108
| 2015-11-08T19:44:17
| 2015-11-08T19:44:17
| 45,315,318
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,682
|
py
|
# -*- coding: latin-1 -*-
# -----------------------------------------------------------------------------
# Copyright 2009-2015 Stephen Tiedemann <stephen.tiedemann@gmail.com>
#
# Licensed under the EUPL, Version 1.1 or - as soon they
# will be approved by the European Commission - subsequent
# versions of the EUPL (the "Licence");
# You may not use this work except in compliance with the
# Licence.
# You may obtain a copy of the Licence at:
#
# http://www.osor.eu/eupl
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
# -----------------------------------------------------------------------------
#
# Driver for the Arygon contactless reader with USB serial interface
#
import logging
log = logging.getLogger(__name__)
import os
import sys
import time
import errno
from . import pn531
from . import pn532
class ChipsetA(pn531.Chipset):
def write_frame(self, frame):
self.transport.write("2" + frame)
class DeviceA(pn531.Device):
def close(self):
self.chipset.transport.tty.write("0au") # device reset
self.chipset.close()
self.chipset = None
class ChipsetB(pn532.Chipset):
def write_frame(self, frame):
self.transport.write("2" + frame)
class DeviceB(pn532.Device):
def close(self):
self.chipset.transport.tty.write("0au") # device reset
self.chipset.close()
self.chipset = None
def init(transport):
transport.open(transport.port, 115200)
transport.tty.write("0av") # read version
response = transport.tty.readline()
if response.startswith("FF00000600V"):
log.debug("Arygon Reader AxxB Version %s", response[11:].strip())
transport.tty.timeout = 0.5
transport.tty.write("0at05")
if transport.tty.readline().startswith("FF0000"):
log.debug("MCU/TAMA communication set to 230400 bps")
transport.tty.write("0ah05")
if transport.tty.readline().startswith("FF0000"):
log.debug("MCU/HOST communication set to 230400 bps")
transport.tty.baudrate = 230400
transport.tty.timeout = 0.1
time.sleep(0.1)
chipset = ChipsetB(transport, logger=log)
device = DeviceB(chipset, logger=log)
device._vendor_name = "Arygon"
device._device_name = "ADRB"
return device
transport.open(transport.port, 9600)
transport.tty.write("0av") # read version
response = transport.tty.readline()
if response.startswith("FF00000600V"):
log.debug("Arygon Reader AxxA Version %s", response[11:].strip())
transport.tty.timeout = 0.5
transport.tty.write("0at05")
if transport.tty.readline().startswith("FF0000"):
log.debug("MCU/TAMA communication set to 230400 bps")
transport.tty.write("0ah05")
if transport.tty.readline().startswith("FF0000"):
log.debug("MCU/HOST communication set to 230400 bps")
transport.tty.baudrate = 230400
transport.tty.timeout = 0.1
time.sleep(0.1)
chipset = ChipsetA(transport, logger=log)
device = DeviceA(chipset, logger=log)
device._vendor_name = "Arygon"
device._device_name = "ADRA"
return device
raise IOError(errno.ENODEV, os.strerror(errno.ENODEV))
|
[
"kontakt@florian-kutschera.de"
] |
kontakt@florian-kutschera.de
|
85523e076c2a9670337e202fd9513822ccf7cf71
|
99f3bbb1b097d640f4518eab940587e7282bcad4
|
/problem7.py
|
de5b2d64b2498d159ddad31b0a5aa7e12d00b0d8
|
[] |
no_license
|
MariinoS/projectEuler
|
f2a5f59602944c21425ebcda3ec4c098ae2ee6de
|
4a9f3bb35ad1ed76d18e077a59cac254e526d3ad
|
refs/heads/master
| 2021-01-10T16:17:31.610231
| 2016-02-20T23:33:50
| 2016-02-20T23:33:50
| 51,226,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 789
|
py
|
# Project Euler: Problem 7 Source Code. By MariinoS. 7th Feb 2016.
# Task: By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13,
# we can see that the 6th prime is 13.
# What is the 10 001st prime number?
#
# My Solution:
def is_prime(x):
if x > 1:
for n in range(2, x):
if x % n == 0:
return False
break
else:
return True
else:
return False
def nth_prime(x):
prime = 1
number = 1
true = 0
while true < x:
if is_prime(number) == True:
prime = number
number += 1
true += 1
else:
number += 1
return prime
print nth_prime(10001)
# This script finishes in 126.795s.
# The answer = 104743
|
[
"MarinoSoro@MacBook-Pro-van-Marino.local"
] |
MarinoSoro@MacBook-Pro-van-Marino.local
|
36b46d502efc50b478c07c3614993b5e52cf6123
|
b5a127b843a6cd3237f44a7e7e24df8d7283068a
|
/hw01.py
|
648118e563a6fc63dcafbb70d930c184b7588428
|
[] |
no_license
|
mmssyy/msy
|
810542e719ee0384eed4de40d2ca396146b80135
|
d25c78a244d3a66061c15959f380cac16b93ebab
|
refs/heads/master
| 2020-04-26T08:27:32.549049
| 2019-11-12T13:57:59
| 2019-11-12T13:57:59
| 173,423,431
| 0
| 0
| null | 2019-05-13T12:26:59
| 2019-03-02T08:33:30
|
HTML
|
UTF-8
|
Python
| false
| false
| 140
|
py
|
for i in range(1, 10):
for j in range(i, 10):
print(f"{i}*{j}={i*j:<2d}", end=' ')
print()
print(" "*i, end='')
|
[
"498232736@qq.com"
] |
498232736@qq.com
|
ffbfd19801d8b95f7cd3475e687a7d9d2ddb7bb7
|
33d83bfd86d17c7182959f2af2974f086f0a900c
|
/ml_glaucoma/cli_options/train/tf_keras.py
|
53654be3a3b0c64bf55883081869091565346d53
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
SamuelMarks/ml-glaucoma
|
5205216bff6daf6b0b7941f0a34539644e2abed8
|
a1dffb106475ab9be9755429899cbfbe6a4a33f9
|
refs/heads/master
| 2021-03-19T16:53:53.354989
| 2020-12-19T08:50:03
| 2020-12-19T08:50:03
| 79,754,884
| 3
| 2
| null | 2019-10-06T13:08:23
| 2017-01-23T00:01:04
|
Python
|
UTF-8
|
Python
| false
| false
| 357
|
py
|
import tensorflow as tf
from ml_glaucoma import callbacks as callbacks_module
from ml_glaucoma.utils.helpers import get_upper_kv
valid_callbacks = get_upper_kv(tf.keras.callbacks)
valid_callbacks.update(get_upper_kv(callbacks_module))
SUPPORTED_CALLBACKS = tuple(sorted(valid_callbacks.keys()))
# Cleanup namespace
del callbacks_module, tf, get_upper_kv
|
[
"807580+SamuelMarks@users.noreply.github.com"
] |
807580+SamuelMarks@users.noreply.github.com
|
1882321dfb7d6c3d25f93fbf056a39ae8686edfc
|
2f2bebd590d02005654cd22953db7681dee764dc
|
/TestModel/wsgi.py
|
195940ed3078f15273928da379a1a491ea24e00e
|
[] |
no_license
|
DanielSamsonraj/College-Tasks
|
6af11882c62b1df75e4f5599b7296718d725c873
|
6964095a7236ae845ac84108f8db84bda8754417
|
refs/heads/master
| 2022-11-11T23:36:25.007332
| 2020-07-10T15:49:06
| 2020-07-10T15:49:06
| 275,524,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for TestModel project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TestModel.settings')
application = get_wsgi_application()
|
[
"danielsam458@gmail.com"
] |
danielsam458@gmail.com
|
5738c5b13a19960f4eb3b599e0dd2d595d8c83b4
|
28702e06390fc5b1c39f5945ab8365ed273483c3
|
/iFair.py
|
de703b44191cc273f6e5d0561041c08f41b045c7
|
[
"MIT"
] |
permissive
|
plahoti-lgtm/iFair
|
c4c48120041c5a2627bb86e2f09914e779d2fbdf
|
e504fc5411c2129b443ef77b3f016173cd3fb66e
|
refs/heads/master
| 2021-05-18T00:27:09.313891
| 2020-03-29T12:33:43
| 2020-03-29T12:33:43
| 251,023,153
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,013
|
py
|
"""
Implementation of the ICDE 2019 paper
iFair: Learning Individually Fair Data Representations for Algorithmic Decision Making
url: https://ieeexplore.ieee.org/document/8731591
citation:
@inproceedings{DBLP:conf/icde/LahotiGW19,
author = {Preethi Lahoti and
Krishna P. Gummadi and
Gerhard Weikum},
title = {iFair: Learning Individually Fair Data Representations for Algorithmic
Decision Making},
booktitle = {35th {IEEE} International Conference on Data Engineering, {ICDE} 2019,
Macao, China, April 8-11, 2019},
pages = {1334--1345},
publisher = {{IEEE}},
year = {2019},
url = {https://doi.org/10.1109/ICDE.2019.00121},
doi = {10.1109/ICDE.2019.00121},
timestamp = {Wed, 16 Oct 2019 14:14:56 +0200},
biburl = {https://dblp.org/rec/conf/icde/LahotiGW19.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
__author__: Preethi Lahoti
__email__: plahoti@mpi-inf.mpg.de
"""
import numpy as np
from iFair_impl.lowrank_helpers import iFair as ifair_func
from iFair_impl.lowrank_helpers import predict as ifair_predict
import sklearn.metrics.pairwise as pairwise
from scipy.optimize import minimize
class iFair:
def __init__(self, k=2, A_x=1e-2, A_z=1.0, max_iter=1000, nb_restarts=3):
self.k = k
self.A_x = A_x
self.A_z = A_z
self.max_iter = max_iter
self.nb_restarts = nb_restarts
self.opt_params = None
def fit(self, X_train, dataset=None):
"""
Learn the model using the training data. iFair.py._func
:param X: Training data. Expects last column of the matrix X to be the protected attribute.
"""
print('Fitting iFair...')
##if dataset object is not passed, assume that there is only 1 protected attribute and it is the last column of X
if dataset:
D_X_F = pairwise.euclidean_distances(X_train[:, dataset.nonsensitive_column_indices], X_train[:, dataset.nonsensitive_column_indices])
l = len(dataset.nonsensitive_column_indices)
else:
D_X_F = pairwise.euclidean_distances(X_train[:, :-1],
X_train[:, :-1])
l = X_train.shape[1] - 1
P = X_train.shape[1]
min_obj = None
opt_params = None
for i in range(self.nb_restarts):
x0_init = np.random.uniform(size=P * 2 + self.k + P * self.k)
#setting protected column weights to epsilon
## assumes that the column indices from l through P are protected and appear at the end
for i in range(l, P, 1):
x0_init[i] = 0.0001
bnd = [(None, None) if (i < P * 2) or (i >= P * 2 + self.k) else (0, 1)
for i in range(len(x0_init))]
opt_result = minimize(ifair_func, x0_init,
args=(X_train, D_X_F, self.k, self.A_x, self.A_z, 0),
method='L-BFGS-B',
jac=False,
bounds=bnd,
options={'maxiter': self.max_iter,
'maxfun': self.max_iter,
'eps': 1e-3})
if (min_obj is None) or (opt_result.fun < min_obj):
min_obj = opt_result.fun
opt_params = opt_result.x
self.opt_params = opt_params
def transform(self, X, dataset = None):
X_hat = ifair_predict(self.opt_params, X, k=self.k)
return X_hat
def fit_transform(self, X_train, dataset=None):
"""
Learns the model from the training data and returns the data in the new space.
:param X: Training data.
:return: Training data in the new space.
"""
print('Fitting and transforming...')
self.fit(X_train, dataset)
return self.transform(X_train)
|
[
"noreply@github.com"
] |
plahoti-lgtm.noreply@github.com
|
e86563173531d8b28011339b0d6151a1ad925692
|
6b06010f58958e522992e99efdf5056b15c037f3
|
/learning_journal.py
|
8c942735e81f6369cdc0f9c13e7d1f499db7301d
|
[] |
no_license
|
k1ycee/School_Work
|
139ee1fcff5f294819c2ce0b410b194c3dca34e0
|
9dd3dff4a767d84021e11c57d7a9f3a7345bd474
|
refs/heads/master
| 2023-03-22T20:27:33.410554
| 2021-03-22T08:05:39
| 2021-03-22T08:05:39
| 340,304,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,374
|
py
|
# import PyPDF2
# from gtts import gTTS
# import os
# def countdown(n):
# if n <= 0:
# print('Blastoff!')
# else:
# print(n)
# countdown(n-1)
# # Function that counts up from a negative number
# def countup(n):
# if n >= 0:
# print(n)
# print('BlastUp!')
# else:
# print(n)
# countup(n+1)
# number = int(input("Input a number: "))
# if number < 0:
# countup(number)
# elif number > 0:
# countdown(number)
# elif number == 0:
# print("You can't do a countdown with this number")
# acceptNumberForCountDown()
# def hypoteneus(a,b):
# h = a**2 + b**2
# print(h, "sum of the product of 'a' and 'b'")
# dist = h ** 0.5
# print(dist, "is the length of the hypoteneus")
# return dist
# # print(hypoteneus(3,4))
# # print(hypoteneus(2,3))
# # print(hypoteneus(6,5))
# def open_pdf_file(page):
# pdfFile = open('test_material.pdf', "rb") #hard-coding a specific book into the program
# pdfReader = PyPDF2.PdfFileReader(pdfFile) #reading the pdffile
# pageToRead = pdfReader.getPage(page).extractText() # picking a particular page to read
# # print(pdfReader.getPage(page).extractText(), "Content of the page argument")
# return pageToRead
# def read_text():
# language = 'en-US' #selecting language to use
# speech = gTTS(text = open_pdf_file(303), lang = language, slow = False) #calling the open_pdf_file with 303 as an argument
# speech.save("voice.mp3") #saving the text as an MP3 to the file system
# # os.system("start voice.mp3")
# read_text()
# def traversal():
# prefixes = 'JKLMNOPQ'
# suffix = 'ack'
# for letter in prefixes:
# word = letter + suffix
# if(word == "Oack"):
# word = "Ouack" #re-assigning the generated to a new word
# elif(word == "Qack"):
# word = "Quack" #re-assigning the generated to a new word
# print(word)
# # traversal()
# # name1 = "hannah"
# # print(name1[5:])
# # name2 = "sinzu"
# # print(name2[0:len(name2) -1])
# # name3 = "basketmouth"
# # print(name3[6: len(name3)])
# n = 10
# while n != 1:
# print (n,)
# if n % 2 == 0: # n is even
# n = n // 2
# else: # n is odd
# n = n * 3 + 1
# def subroutine( n ):
# while n > 0:
# print (n,)
# n = n - 1
# subroutine(10)
animal_shellter = {
"Teddy": ["dog",4,"male"],
"Elvis": ["dog",1,"male"],
"Sheyla": ["dog",5,"female"],
"Topic": ["hamster",3,"male"],
"Kuzya": ["cat",10,"male"],
"Misi": ["cat",8,"female"],
}
print(animal_shellter.items())
# def invert(d):
# inverse = dict()
# for key in d:
# val = d[key]
# for item in val:
# if item not in inverse:
# inverse[item] = [key]
# else:
# inverse[item].append(key)
# return inverse
# def invert_file_content():
# with open("dict.txt") as file_content:
# inverted_file = invert(dict(eval(file_content.read())))
# string_inverted_file = str(inverted_file)
# write_to_file(string_inverted_file)
# def write_to_file(file):
# new_file = open("inverted_dict.txt", "w")
# new_file.write(file)
# # invert_file_content()
# fin = open('dict.txt')
# for line in fin:
# print(type(fin))
# print(type(line))
# word = line.strip()
# print(type(word))
# print(word)
try:
fin = open('answer.txt')
fin.write('Yes')
except:
print('No')
print('Maybe')
n = 10
while n != 1:
print (n,end=' ')
if n % 2 == 0: # n is even
n = n // 2
else: # n is odd
n = n * 3 + 1
mylist = ["now", "four", "is", "score", "the", "and seven", "time", "years", "for"]
a=0
while a < 7:
print (mylist[a],)
a += 2
mylist = [ [2,4,1], [1,2,3], [2,3,5] ]
a=0
b=0
total = 0
while a <= 2:
while b < 2:
total += mylist[a][b]
b += 1
a += 1
b = 0
print (total)
index = "Ability is a poor man's wealth".find("w")
print(index)
while True:
while 1 > 0:
break
print("Got it!")
break
# def recurse(a):
# if (a == 0):
# print(a)
# else:
# recurse(a)
# recurse(1)
mylist = [ [2,4,1], [1,2,3], [2,3,5] ]
total = 0
for sublist in mylist:
total += sum(sublist)
print(total)
|
[
"chiagozieani145@gmail.com"
] |
chiagozieani145@gmail.com
|
8b3f5f4ad3fcb11d245e75d80e2e90e86bb035fc
|
31e49b0c2fc73529af1b49724edca11b1ad5bfb1
|
/data/generate_frame.py
|
405c86e8db376f1249bb568d805363db7e1bc66e
|
[
"MIT"
] |
permissive
|
beibuwandeluori/DeeperForensicsChallengeSolution
|
0030fd25b4fa994656e1f58ae1a38e4bda5a55e3
|
d19284d48d386a7409f097ad2755c6a9c31a5729
|
refs/heads/master
| 2023-04-21T09:28:50.963428
| 2021-05-21T14:22:14
| 2021-05-21T14:22:14
| 309,660,404
| 29
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,841
|
py
|
import numpy as np
import cv2
import os
import shutil
from tqdm import tqdm
def extract_frames(videos_path, frame_subsample_count=30, output_path=None):
reader = cv2.VideoCapture(videos_path)
# fps = video.get(cv2.CAP_PROP_FPS)
frame_num = 0
while reader.isOpened():
success, whole_image = reader.read()
if not success:
break
if frame_num % frame_subsample_count == 0:
save_path = os.path.join(output_path, '{:04d}.png'.format(frame_num))
cv2.imwrite(save_path, whole_image)
frame_num += 1
break
reader.release()
def main(vid):
video_path = '/data1/cby/dataset/DeepForensic/videos/manipulated_videos/' + vid
face_path = '/data1/cby/dataset/DeepForensic/frames/manipulated_images/' + vid
if not os.path.isdir(face_path):
os.mkdir(face_path)
print(face_path)
video_file_path = video_path
face_file_path = face_path
if not os.path.isdir(face_file_path):
os.mkdir(face_file_path)
for name in tqdm(os.listdir(video_file_path)):
input_path = os.path.join(video_file_path, name)
if name.find('.mp4') == -1:
try:
shutil.copy(input_path, face_file_path)
continue
except:
continue
output_path = os.path.join(face_file_path, name)
if not os.path.isdir(output_path):
os.mkdir(output_path)
if len(os.listdir(output_path)) != 0:
continue
extract_frames(input_path, frame_subsample_count=20, output_path=output_path)
def getFile(path, format='mp4'):
files = os.listdir(path) # 得到文件夹下的所有文件,包含文件夹名称
FileList = []
for name in files:
if os.path.isdir(os.path.join(path, name)):
FileList.extend(getFile(os.path.join(path, name), format)) #回调函数,对所有子文件夹进行搜索
elif os.path.isfile(os.path.join(path, name)):
if format.lower() in name.lower():
FileList.append(os.path.join(path, name))
else:
print("未知文件:%s", name)
return FileList
def main_real(vid):
video_path = '/data1/cby/dataset/DeepForensic/videos/source_videos/' + vid
face_path = '/data1/cby/dataset/DeepForensic/frames/source_images/' + vid
if not os.path.isdir(face_path):
os.mkdir(face_path)
print(face_path)
video_file_path = video_path
face_file_path = face_path
if not os.path.isdir(face_file_path):
os.mkdir(face_file_path)
for input_path in tqdm(getFile(video_file_path, format='mp4')):
# output_path = os.path.join(face_file_path, input_path)
output_path = input_path.replace(video_path, face_path)
if not os.path.isdir(output_path):
os.makedirs(output_path)
if len(os.listdir(output_path)) != 0:
continue
extract_frames(input_path, frame_subsample_count=50, output_path=output_path)
if __name__ == "__main__":
# vids = os.listdir('/data1/cby/dataset/DeepForensic/videos/source_videos')
# print('vids total lenght:', len(vids))
# start = 80
# end = start + 20
# print(vids[start:end], start, end)
# for i, vid in enumerate(vids[start:end]):
# print(start + i, 'Start extract frames in', vid)
# main_real(vid)
# print(start + i, 'Extract frames in', vid, 'Finished!')
vids = os.listdir('/data1/cby/dataset/DeepForensic/videos/manipulated_videos')
print('vids total lenght:', len(vids))
start = 9
end = start + 3
print(vids[start:end], start, end)
for i, vid in enumerate(vids[start:end]):
print(start + i, 'Start extract frames in', vid)
main(vid)
print(start + i, 'Extract frames in', vid, 'Finished!')
|
[
"1329636999@qq.com"
] |
1329636999@qq.com
|
636c19716c6fe400a8be0ff1adb798143928282e
|
384324291014bc14a83253416923bbb164283a4e
|
/moveZeroes.py
|
182ffe8c245a437c4e5855a772f70bd04c923130
|
[] |
no_license
|
Sammyuel/LeetcodeSolutions
|
b9147717f6693c212ba16eaf0df2bd7a3ee53ea7
|
0864b4f8a52d9463d09def8d54a9b852e4073dcc
|
refs/heads/master
| 2020-03-28T09:33:32.014502
| 2018-11-08T00:57:43
| 2018-11-08T00:57:43
| 148,042,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
pos = 0
for i in xrange(len(nums)):
if nums[i]:
nums[i], nums[pos] = nums[pos], nums[i]
pos += 1
|
[
"sam.lee1@bell.ca"
] |
sam.lee1@bell.ca
|
5f0cf6160c0e0b51bb29880c1a50839556e3a902
|
ff3b19b5cc5aac4b856c924dc86af3ffb631948e
|
/Test/getReaction.py
|
5486f02d9878c842fd166793362035bd03277b9b
|
[
"Apache-2.0"
] |
permissive
|
yuxiabuyouren/Test
|
0bc8be254b07e0051f6dcf8c2ab4d6e978559f26
|
f1335c7dd0edb66c6a75fa2a93a16336333782c5
|
refs/heads/master
| 2020-03-20T05:34:53.846340
| 2018-06-05T12:25:32
| 2018-06-05T12:25:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,536
|
py
|
from django.http import HttpResponse
import json
import re
from collections import defaultdict
def getReaction(request):
treatments = []
reactions = defaultdict(dict)
snodes = request.GET['snodes'].strip().split("\t")
#print(len(snodes))
for snode in snodes:
items = snode.rstrip(";").split("/")
treatment = items[0]
drugs = items[1].split(";")
for drug in drugs:
drug_name, elem = drug.split(",")
treatments.append((treatment + "(" + drug_name + ")", elem))
with open("reaction.json", encoding="utf-8") as f:
savedReactions = json.load(f)
for i in range(len(treatments)-1):
for j in range(i+1, len(treatments)):
if treatments[i][1] + "," + treatments[j][1] in savedReactions:
key = treatments[i][0] + "," + treatments[j][0]
elem = treatments[i][1] + "," + treatments[j][1]
props = savedReactions[elem].split("|")
reactions[key]["elem"] = elem
reactions[key]["significance"] = props[0]
reactions[key]["reaction mechanism"] = props[1]
reactions[key]["url"] = props[2]
reactions[key]["to_consumer"] = props[3]
reactions[key]["to_professor"] = props[4]
data = {"reactions": reactions}
if reactions:
data["flag"] = True
else:
data["flag"] = False
# print(reactions)
return HttpResponse(json.dumps(data), content_type='application/json')
|
[
"sangyunxin@gmail.com"
] |
sangyunxin@gmail.com
|
84fece833becbc375cab5cf7c904cf9910960ad5
|
e64f34e6c03297239e252070874b509fcaf09686
|
/vmall-server/app/api_v1/order.py
|
6d78c8daa12e815ba6f7339fcc1c0d25ce78fc3a
|
[] |
no_license
|
goobai/vmall
|
e0205d31a800033c913448ab48446afc0df9572c
|
9b130303cf78e04cbe1d8df2bd30fb774cf65d48
|
refs/heads/master
| 2023-05-12T01:40:13.789619
| 2023-02-02T06:14:26
| 2023-02-02T06:14:26
| 208,969,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,108
|
py
|
from . import bp
from flask import jsonify, request, redirect, url_for
from app.models import *
from flask_jwt_extended import jwt_required, create_refresh_token, create_access_token, get_jwt_identity
from sqlalchemy import desc, func
from app.utils import generate_order_id
"""
商品单接口:
1、提交订单
2、支付订单金额
3、查询订单状态
4、查询历史订单
"""
@bp.route('/order/confirm', methods=['POST', 'GET'])
@jwt_required
def order_confirm():
uid = get_jwt_identity()
if request.method == "POST":
"""生成订单 订单生成成功后将商品从购物车移除"""
products = request.json.get('products') # 订单中的商品及其对应数量
address_id = request.json.get('address_id') # 收货地址
# coupon = request.json.get('coupon') # 优惠券
if products:
# 如果存在跨店商品,根据店铺拆分订单,每个店铺所有商品对应一个订单
for shop in products:
order_id = generate_order_id(uid=uid)
if not order_id:
return jsonify(code=0, data={}, msg="订单生成失败!")
new_order = Order(user_id=uid, user_address_id=address_id, shop_id=shop['shop_id'], order_id=order_id)
db.session.add(new_order)
amount = 0
# 将每个店铺的商品添加到订单商品表
for product in shop['products']:
p_sku = ProductSku.query.filter_by(id=product['id']).first()
c_sku = Cart.query.filter_by(sku_id=product['id'], user_id=uid).first()
total_price = product['count'] * p_sku.price
order_product = OrderProduct(order_id=order_id, sku_id=p_sku.id, price=p_sku.price,
count=product['count'], total_price=total_price, shop_id=p_sku.shop_id)
db.session.add(order_product)
db.session.delete(c_sku)
amount = amount + total_price
new_order.total_price = amount
order_payment = OrderPayment(order_id=order_id, amount=amount)
db.session.add(order_payment)
db.session.commit()
data = ""
return jsonify(code=1, data=data, msg="下单成功!")
else:
return jsonify(code=0, data={}, msg="参数错误!")
if request.method == "GET":
"""获取订单信息"""
order_id = request.args.get('order_id')
@bp.route('/order/pay')
def order_pay():
"""
用户支付成功后根据店铺进行订单拆分
:return:
"""
uid = get_jwt_identity()
if request.method == "POST":
"""支付"""
order_id = request.json.get('order_id')
# region
# endregion
@bp.route('/order/confirm/products/', methods=['POST'])
@jwt_required
def product_confirm():
"""获取订单确认商品"""
if request.method == "POST":
"""查询购物车中选中商品,并且返回选中商品名,价格,数量,店铺名"""
uid = get_jwt_identity()
cart_skus = Cart.query.filter_by(user_id=uid).order_by(desc(Cart.modify_time)).all()
data = {'totalCounts': 0, "totalPrice": 0}
if cart_skus:
results = db.session.query(Cart).outerjoin(ProductSku, Cart.sku_id == ProductSku.id).filter(
Cart.user_id == uid, Cart.checked == 1).outerjoin(Shop, ProductSku.shop_id == Shop.id).with_entities(
ProductSku.shop_id, Shop.name).group_by(ProductSku.shop_id).all()
shop_list = [dict(zip(result.keys(), result)) for result in results]
data["shops"] = shop_list
for shop in shop_list:
shop_products = Cart.query.filter_by(user_id=uid, shop_id=shop["shop_id"], checked=1).order_by(
desc(Cart.modify_time)).all()
shop["products"] = [shop_product.to_dict() for shop_product in shop_products]
# 查询选中商品价格
results = db.session.query(Cart).outerjoin(ProductSku, Cart.sku_id == ProductSku.id).filter(
Cart.user_id == uid, Cart.checked == 1).with_entities(
func.sum(Cart.count * ProductSku.price).label('totalPrice')).all()
price = [dict(zip(result.keys(), result)) for result in results]
if price[0]['totalPrice']:
data['totalPrice'] = int(price[0]['totalPrice'])
return jsonify({
'code': 1,
'data': data
})
else:
return jsonify({'code': 0, "msg": "你的购物车空空如也!"})
@bp.route('/orders', methods=["POST"])
@jwt_required
def order_info():
"""查询订单信息
返回:订单号 ,商品 ,总价"""
uid = get_jwt_identity()
# 订单状态 0:生成订单,待付款 ;1:付款完成,待发货;2:发货完成,物流中,待确认收货 ;3:确认收货,待评价 4:订单完成
# 5: 已取消
order_status = request.json.get('orderStatus')
offset = request.json.get('offset')
limit = 10
msg = "订单查询成功!"
data=[]
if order_status == 0:
orders = Order.query.filter_by(user_id=uid, order_status=order_status).order_by(desc(Order.create_time)).limit(
limit).offset(offset).all()
data = [order.to_dict() for order in orders]
return jsonify(code=1, data=data, msg=msg)
elif order_status == 1:
orders = Order.query.filter_by(user_id=uid, order_status=order_status).order_by(desc(Order.create_time)).limit(
limit).offset(offset).all()
data = [order.to_dict() for order in orders]
elif order_status == 2:
orders = Order.query.filter_by(user_id=uid, order_status=order_status).order_by(desc(Order.create_time)).limit(
limit).offset(offset).all()
data = [order.to_dict() for order in orders]
elif order_status == 3:
orders = Order.query.filter_by(user_id=uid, order_status=order_status).order_by(desc(Order.create_time)).limit(
limit).offset(offset).all()
data = [order.to_dict() for order in orders]
elif order_status == 4:
orders = Order.query.filter_by(user_id=uid, order_status=order_status).order_by(desc(Order.create_time)).limit(
limit).offset(offset).all()
data = [order.to_dict() for order in orders]
elif order_status == 5:
orders = Order.query.filter_by(user_id=uid, order_status=order_status).order_by(desc(Order.create_time)).limit(
limit).offset(offset).all()
data = [order.to_dict() for order in orders]
elif order_status == 9:
# 查询所有订单
orders = Order.query.filter_by(user_id=uid).order_by(desc(Order.create_time)).limit(
limit).offset(offset).all()
data = [order.to_dict() for order in orders]
else:
return jsonify(code=0, msg="查询失败")
return jsonify(code=1, data=data, msg=msg)
|
[
"834207470@qq.com"
] |
834207470@qq.com
|
a40ae26e22d1343dac0a948f1a5dd87ee495e2a2
|
1ac5bc7013898f54c58ffc48aa785f880b0ecbf5
|
/vyper_parser/ast.py
|
3329c4cc4c679ef49df1787e88379279811d609a
|
[
"MIT"
] |
permissive
|
gihyeonsung/vyper-parser
|
c431e7853e03d32d5e194afc83bafde80d3ed718
|
b117cfbe262d0c5f06a7a0d216c210dc28cb514c
|
refs/heads/master
| 2023-02-05T13:23:21.742632
| 2020-12-29T04:07:40
| 2020-12-29T04:07:40
| 325,181,995
| 0
| 0
|
MIT
| 2020-12-29T04:04:21
| 2020-12-29T04:04:20
| null |
UTF-8
|
Python
| false
| false
| 28,245
|
py
|
import ast as python_ast
import typing
from vyper_parser.types import (
SubclassesDict,
)
from vyper_parser.utils import (
Singleton,
get_all_subclasses_dict,
)
constant = typing.Any
identifier = str
singleton = typing.Union[None, bool]
AliasSeq = typing.Sequence['alias']
ArgSeq = typing.Sequence['arg']
CmpOpSeq = typing.Sequence[typing.Type['cmpop']]
ComprehensionSeq = typing.Sequence['comprehension']
ExceptHandlerSeq = typing.Sequence['excepthandler']
ExprSeq = typing.Sequence['expr']
IdentifierSeq = typing.Sequence['identifier']
KeywordSeq = typing.Sequence['keyword']
SliceSeq = typing.Sequence['slice']
StmtSeq = typing.Sequence['stmt']
WithItemSeq = typing.Sequence['withitem']
def translate_parsing_pos(val: typing.Any, line_delta: int, col_delta: int) -> None:
"""
Translates the parsing position of an AST node and all of its child nodes.
"""
if isinstance(val, (list, tuple)):
# Translate each item in sequence
for item in val:
translate_parsing_pos(item, line_delta, col_delta)
elif isinstance(val, VyperAST):
# Translate this node
if isinstance(val, PosAttributes):
val.lineno += line_delta
val.col_offset += col_delta
# Translate all of this node's children
for field in val.__slots__:
child = getattr(val, field)
translate_parsing_pos(child, line_delta, col_delta)
else:
# This isn't a node or a collection of nodes. Do nothing.
return
class VyperAST:
__slots__ = ()
_all_subclasses_dict_cache: typing.Optional[SubclassesDict] = None
@classmethod
def all_subclasses_dict(cls) -> SubclassesDict:
"""
Returns a dictionary of all the subclasses in the ``VyperAST`` class
tree keyed by name.
"""
cache = getattr(cls, '_all_subclasses_dict_cache', None)
if cache is not None:
return cache
class_dict = get_all_subclasses_dict(cls, same_module=True)
cls._all_subclasses_dict_cache = class_dict
return class_dict
@classmethod
def from_python_ast(
cls,
val: typing.Any,
seq_class: typing.Union[typing.Type[list], typing.Type[tuple]] = tuple,
) -> typing.Any:
"""
Convert a python AST into a vyper AST.
"""
if isinstance(val, (list, tuple)):
return seq_class(cls.from_python_ast(v) for v in val)
elif isinstance(val, python_ast.AST):
python_class_name = val.__class__.__name__
vyper_class = cls.all_subclasses_dict()[python_class_name]
node_kwargs = {}
for f in val._fields:
node_kwargs[f] = cls.from_python_ast(getattr(val, f))
for a in val._attributes:
node_kwargs[a] = getattr(val, a)
return vyper_class(**node_kwargs)
else:
return val
class mod(VyperAST):
__slots__ = ()
class Module(mod):
__slots__ = ('body',)
def __init__(self,
body: StmtSeq):
self.body = body
class Interactive(mod):
__slots__ = ('body',)
def __init__(self,
body: StmtSeq):
self.body = body
class Expression(mod):
__slots__ = ('body',)
def __init__(self,
body: 'expr'):
self.body = body
class PosAttributes:
__slots__ = ('lineno', 'col_offset')
def __init__(self,
*,
lineno: int = None,
col_offset: int = None):
self.lineno = lineno
self.col_offset = col_offset
class stmt(PosAttributes, VyperAST):
__slots__ = ()
class FunctionDef(stmt):
__slots__ = ('name', 'args', 'body', 'decorator_list', 'returns')
def __init__(self,
name: identifier,
args: 'arguments',
body: StmtSeq,
decorator_list: ExprSeq,
returns: 'expr' = None,
*,
lineno: int = None,
col_offset: int = None):
self.name = name
self.args = args
self.body = body
self.decorator_list = decorator_list
self.returns = returns
super().__init__(lineno=lineno, col_offset=col_offset)
class AsyncFunctionDef(stmt):
__slots__ = ('name', 'args', 'body', 'decorator_list', 'returns')
def __init__(self,
name: identifier,
args: 'arguments',
body: StmtSeq,
decorator_list: ExprSeq,
returns: 'expr' = None,
*,
lineno: int = None,
col_offset: int = None):
self.name = name
self.args = args
self.body = body
self.decorator_list = decorator_list
self.returns = returns
super().__init__(lineno=lineno, col_offset=col_offset)
class ClassDef(stmt):
__slots__ = ('name', 'bases', 'keywords', 'body', 'decorator_list')
def __init__(self,
name: identifier,
bases: ExprSeq,
keywords: KeywordSeq,
body: StmtSeq,
decorator_list: ExprSeq,
*,
lineno: int = None,
col_offset: int = None):
self.name = name
self.bases = bases
self.keywords = keywords
self.body = body
self.decorator_list = decorator_list
super().__init__(lineno=lineno, col_offset=col_offset)
class Return(stmt):
__slots__ = ('value',)
def __init__(self,
value: 'expr' = None,
*,
lineno: int = None,
col_offset: int = None):
self.value = value
super().__init__(lineno=lineno, col_offset=col_offset)
class Delete(stmt):
__slots__ = ('targets',)
def __init__(self,
targets: ExprSeq,
*,
lineno: int = None,
col_offset: int = None):
self.targets = targets
super().__init__(lineno=lineno, col_offset=col_offset)
class Assign(stmt):
__slots__ = ('targets', 'value')
def __init__(self,
targets: ExprSeq,
value: 'expr',
*,
lineno: int = None,
col_offset: int = None):
self.targets = targets
self.value = value
super().__init__(lineno=lineno, col_offset=col_offset)
class AugAssign(stmt):
__slots__ = ('target', 'op', 'value')
def __init__(self,
target: 'expr',
op: typing.Type['operator'],
value: 'expr',
*,
lineno: int = None,
col_offset: int = None):
self.target = target
self.op = op
self.value = value
super().__init__(lineno=lineno, col_offset=col_offset)
class AnnAssign(stmt):
__slots__ = ('target', 'annotation', 'simple', 'value')
def __init__(self,
target: 'expr',
annotation: 'expr',
simple: bool,
value: 'expr' = None,
*,
lineno: int = None,
col_offset: int = None):
self.target = target
self.annotation = annotation
self.simple = simple
self.value = value
super().__init__(lineno=lineno, col_offset=col_offset)
class For(stmt):
__slots__ = ('target', 'iter', 'body', 'orelse')
def __init__(self,
target: 'expr',
iter: 'expr',
body: StmtSeq,
orelse: StmtSeq,
*,
lineno: int = None,
col_offset: int = None):
self.target = target
self.iter = iter
self.body = body
self.orelse = orelse
super().__init__(lineno=lineno, col_offset=col_offset)
class AsyncFor(stmt):
__slots__ = ('target', 'iter', 'body', 'orelse')
def __init__(self,
target: 'expr',
iter: 'expr',
body: StmtSeq,
orelse: StmtSeq,
*,
lineno: int = None,
col_offset: int = None):
self.target = target
self.iter = iter
self.body = body
self.orelse = orelse
super().__init__(lineno=lineno, col_offset=col_offset)
class While(stmt):
__slots__ = ('test', 'body', 'orelse')
def __init__(self,
test: 'expr',
body: StmtSeq,
orelse: StmtSeq,
*,
lineno: int = None,
col_offset: int = None):
self.test = test
self.body = body
self.orelse = orelse
super().__init__(lineno=lineno, col_offset=col_offset)
class If(stmt):
__slots__ = ('test', 'body', 'orelse')
def __init__(self,
test: 'expr',
body: StmtSeq,
orelse: StmtSeq,
*,
lineno: int = None,
col_offset: int = None):
self.test = test
self.body = body
self.orelse = orelse
super().__init__(lineno=lineno, col_offset=col_offset)
class With(stmt):
__slots__ = ('items', 'body')
def __init__(self,
items: WithItemSeq,
body: StmtSeq,
*,
lineno: int = None,
col_offset: int = None):
self.items = items
self.body = body
super().__init__(lineno=lineno, col_offset=col_offset)
class AsyncWith(stmt):
__slots__ = ('items', 'body')
def __init__(self,
items: WithItemSeq,
body: StmtSeq,
*,
lineno: int = None,
col_offset: int = None):
self.items = items
self.body = body
super().__init__(lineno=lineno, col_offset=col_offset)
class Raise(stmt):
__slots__ = ('exc', 'cause')
def __init__(self,
exc: 'expr' = None,
cause: 'expr' = None,
*,
lineno: int = None,
col_offset: int = None):
self.exc = exc
self.cause = cause
super().__init__(lineno=lineno, col_offset=col_offset)
class Try(stmt):
__slots__ = ('body', 'handlers', 'orelse', 'finalbody')
def __init__(self,
body: StmtSeq,
handlers: ExceptHandlerSeq,
orelse: StmtSeq,
finalbody: StmtSeq,
*,
lineno: int = None,
col_offset: int = None):
self.body = body
self.handlers = handlers
self.orelse = orelse
self.finalbody = finalbody
super().__init__(lineno=lineno, col_offset=col_offset)
class Assert(stmt):
__slots__ = ('test', 'msg')
def __init__(self,
test: 'expr',
msg: 'expr' = None,
*,
lineno: int = None,
col_offset: int = None):
self.test = test
self.msg = msg
super().__init__(lineno=lineno, col_offset=col_offset)
class Import(stmt):
__slots__ = ('names',)
def __init__(self,
names: AliasSeq,
*,
lineno: int = None,
col_offset: int = None):
self.names = names
super().__init__(lineno=lineno, col_offset=col_offset)
class ImportFrom(stmt):
__slots__ = ('names', 'module', 'level')
def __init__(self,
names: AliasSeq,
module: identifier = None,
level: int = None,
*,
lineno: int = None,
col_offset: int = None):
self.names = names
self.module = module
self.level = level
super().__init__(lineno=lineno, col_offset=col_offset)
class Global(stmt):
__slots__ = ('names',)
def __init__(self,
names: IdentifierSeq,
*,
lineno: int = None,
col_offset: int = None):
self.names = names
super().__init__(lineno=lineno, col_offset=col_offset)
class Nonlocal(stmt):
__slots__ = ('names',)
def __init__(self,
names: IdentifierSeq,
*,
lineno: int = None,
col_offset: int = None):
self.names = names
super().__init__(lineno=lineno, col_offset=col_offset)
class Expr(stmt):
__slots__ = ('value',)
def __init__(self,
value: 'expr',
*,
lineno: int = None,
col_offset: int = None):
self.value = value
super().__init__(lineno=lineno, col_offset=col_offset)
class Pass(stmt):
__slots__ = ()
class Break(stmt):
__slots__ = ()
class Continue(stmt):
__slots__ = ()
class expr(PosAttributes, VyperAST):
__slots__ = ()
class BoolOp(expr):
__slots__ = ('op', 'values')
def __init__(self,
op: typing.Type['boolop'],
values: ExprSeq,
*,
lineno: int = None,
col_offset: int = None):
self.op = op
self.values = values
super().__init__(lineno=lineno, col_offset=col_offset)
class BinOp(expr):
__slots__ = ('left', 'op', 'right')
def __init__(self,
left: expr,
op: typing.Type['operator'],
right: expr,
*,
lineno: int = None,
col_offset: int = None):
self.left = left
self.op = op
self.right = right
super().__init__(lineno=lineno, col_offset=col_offset)
class UnaryOp(expr):
__slots__ = ('op', 'operand')
def __init__(self,
op: typing.Type['unaryop'],
operand: expr,
*,
lineno: int = None,
col_offset: int = None):
self.op = op
self.operand = operand
super().__init__(lineno=lineno, col_offset=col_offset)
class Lambda(expr):
__slots__ = ('args', 'body')
def __init__(self,
args: 'arguments',
body: expr,
*,
lineno: int = None,
col_offset: int = None):
self.args = args
self.body = body
super().__init__(lineno=lineno, col_offset=col_offset)
class IfExp(expr):
__slots__ = ('test', 'body', 'orelse')
def __init__(self,
test: expr,
body: expr,
orelse: expr,
*,
lineno: int = None,
col_offset: int = None):
self.test = test
self.body = body
self.orelse = orelse
super().__init__(lineno=lineno, col_offset=col_offset)
class Dict(expr):
__slots__ = ('keys', 'values')
def __init__(self,
keys: ExprSeq,
values: ExprSeq,
*,
lineno: int = None,
col_offset: int = None):
self.keys = keys
self.values = values
super().__init__(lineno=lineno, col_offset=col_offset)
class Set(expr):
__slots__ = ('elts',)
def __init__(self,
elts: ExprSeq,
*,
lineno: int = None,
col_offset: int = None):
self.elts = elts
super().__init__(lineno=lineno, col_offset=col_offset)
class ListComp(expr):
__slots__ = ('elt', 'generators')
def __init__(self,
elt: expr,
generators: ComprehensionSeq,
*,
lineno: int = None,
col_offset: int = None):
self.elt = elt
self.generators = generators
super().__init__(lineno=lineno, col_offset=col_offset)
class SetComp(expr):
__slots__ = ('elt', 'generators')
def __init__(self,
elt: expr,
generators: ComprehensionSeq,
*,
lineno: int = None,
col_offset: int = None):
self.elt = elt
self.generators = generators
super().__init__(lineno=lineno, col_offset=col_offset)
class DictComp(expr):
__slots__ = ('key', 'value', 'generators')
def __init__(self,
key: expr,
value: expr,
generators: ComprehensionSeq,
*,
lineno: int = None,
col_offset: int = None):
self.key = key
self.value = value
self.generators = generators
super().__init__(lineno=lineno, col_offset=col_offset)
class GeneratorExp(expr):
__slots__ = ('elt', 'generators')
def __init__(self,
elt: expr,
generators: ComprehensionSeq,
*,
lineno: int = None,
col_offset: int = None):
self.elt = elt
self.generators = generators
super().__init__(lineno=lineno, col_offset=col_offset)
class Await(expr):
__slots__ = ('value',)
def __init__(self,
value: expr,
*,
lineno: int = None,
col_offset: int = None):
self.value = value
super().__init__(lineno=lineno, col_offset=col_offset)
class Yield(expr):
__slots__ = ('value',)
def __init__(self,
value: expr = None,
*,
lineno: int = None,
col_offset: int = None):
self.value = value
super().__init__(lineno=lineno, col_offset=col_offset)
class YieldFrom(expr):
__slots__ = ('value',)
def __init__(self,
value: expr,
*,
lineno: int = None,
col_offset: int = None):
self.value = value
super().__init__(lineno=lineno, col_offset=col_offset)
class Compare(expr):
__slots__ = ('left', 'ops', 'comparators')
def __init__(self,
left: expr,
ops: CmpOpSeq,
comparators: ExprSeq,
*,
lineno: int = None,
col_offset: int = None):
self.left = left
self.ops = ops
self.comparators = comparators
super().__init__(lineno=lineno, col_offset=col_offset)
class Call(expr):
__slots__ = ('func', 'args', 'keywords')
def __init__(self,
func: expr,
args: ExprSeq,
keywords: KeywordSeq,
*,
lineno: int = None,
col_offset: int = None):
self.func = func
self.args = args
self.keywords = keywords
super().__init__(lineno=lineno, col_offset=col_offset)
class Num(expr):
__slots__ = ('n',)
def __init__(self,
n: typing.Union[int, float],
*,
lineno: int = None,
col_offset: int = None):
self.n = n
super().__init__(lineno=lineno, col_offset=col_offset)
class Str(expr):
__slots__ = ('s',)
def __init__(self,
s: str,
*,
lineno: int = None,
col_offset: int = None):
self.s = s
super().__init__(lineno=lineno, col_offset=col_offset)
class FormattedValue(expr):
__slots__ = ('value', 'conversion', 'format_spec')
def __init__(self,
value: expr,
conversion: int = None,
format_spec: expr = None,
*,
lineno: int = None,
col_offset: int = None):
self.value = value
self.conversion = conversion
self.format_spec = format_spec
super().__init__(lineno=lineno, col_offset=col_offset)
class JoinedStr(expr):
__slots__ = ('values',)
def __init__(self,
values: ExprSeq,
*,
lineno: int = None,
col_offset: int = None):
self.values = values
super().__init__(lineno=lineno, col_offset=col_offset)
class Bytes(expr):
__slots__ = ('s',)
def __init__(self,
s: bytes,
*,
lineno: int = None,
col_offset: int = None):
self.s = s
super().__init__(lineno=lineno, col_offset=col_offset)
class NameConstant(expr):
__slots__ = ('value',)
def __init__(self,
value: singleton,
*,
lineno: int = None,
col_offset: int = None):
self.value = value
super().__init__(lineno=lineno, col_offset=col_offset)
class Ellipsis(expr):
__slots__ = ()
class Constant(expr):
__slots__ = ('value',)
def __init__(self,
value: constant,
*,
lineno: int = None,
col_offset: int = None):
self.value = value
super().__init__(lineno=lineno, col_offset=col_offset)
class Attribute(expr):
__slots__ = ('value', 'attr', 'ctx')
def __init__(self,
value: expr,
attr: identifier,
ctx: typing.Type['expr_context'],
*,
lineno: int = None,
col_offset: int = None):
self.value = value
self.attr = attr
self.ctx = ctx
super().__init__(lineno=lineno, col_offset=col_offset)
class Subscript(expr):
__slots__ = ('value', 'slice', 'ctx')
def __init__(self,
value: expr,
slice: slice,
ctx: typing.Type['expr_context'],
*,
lineno: int = None,
col_offset: int = None):
self.value = value
self.slice = slice
self.ctx = ctx
super().__init__(lineno=lineno, col_offset=col_offset)
class Starred(expr):
__slots__ = ('value', 'ctx')
def __init__(self,
value: expr,
ctx: typing.Type['expr_context'],
*,
lineno: int = None,
col_offset: int = None):
self.value = value
self.ctx = ctx
super().__init__(lineno=lineno, col_offset=col_offset)
class Name(expr):
__slots__ = ('id', 'ctx')
def __init__(self,
id: identifier,
ctx: typing.Type['expr_context'],
*,
lineno: int = None,
col_offset: int = None):
self.id = id
self.ctx = ctx
super().__init__(lineno=lineno, col_offset=col_offset)
class List(expr):
__slots__ = ('elts', 'ctx')
def __init__(self,
elts: ExprSeq,
ctx: typing.Type['expr_context'],
*,
lineno: int = None,
col_offset: int = None):
self.elts = elts
self.ctx = ctx
super().__init__(lineno=lineno, col_offset=col_offset)
class Tuple(expr):
__slots__ = ('elts', 'ctx')
def __init__(self,
elts: ExprSeq,
ctx: typing.Type['expr_context'],
*,
lineno: int = None,
col_offset: int = None):
self.elts = elts
self.ctx = ctx
super().__init__(lineno=lineno, col_offset=col_offset)
class expr_context(Singleton, VyperAST):
pass
class Load(expr_context):
pass
class Store(expr_context):
pass
class Del(expr_context):
pass
class AugLoad(expr_context):
pass
class AugStore(expr_context):
pass
class Param(expr_context):
pass
class slice(VyperAST):
__slots__ = ()
class Slice(slice):
__slots__ = ('lower', 'upper', 'step')
def __init__(self,
lower: 'expr' = None,
upper: 'expr' = None,
step: 'expr' = None):
self.lower = lower
self.upper = upper
self.step = step
class ExtSlice(slice):
__slots__ = ('dims',)
def __init__(self,
dims: SliceSeq):
self.dims = dims
class Index(slice):
__slots__ = ('value',)
def __init__(self,
value: 'expr'):
self.value = value
class boolop(Singleton, VyperAST):
pass
class And(boolop):
pass
class Or(boolop):
pass
class operator(VyperAST):
pass
class Add(operator):
pass
class Sub(operator):
pass
class Mult(operator):
pass
class MatMult(operator):
pass
class Div(operator):
pass
class Mod(operator):
pass
class Pow(operator):
pass
class LShift(operator):
pass
class RShift(operator):
pass
class BitOr(operator):
pass
class BitXor(operator):
pass
class BitAnd(operator):
pass
class FloorDiv(operator):
pass
class unaryop(VyperAST):
pass
class Invert(unaryop):
pass
class Not(unaryop):
pass
class UAdd(unaryop):
pass
class USub(unaryop):
pass
class cmpop(VyperAST):
pass
class Eq(cmpop):
pass
class NotEq(cmpop):
pass
class Lt(cmpop):
pass
class LtE(cmpop):
pass
class Gt(cmpop):
pass
class GtE(cmpop):
pass
class Is(cmpop):
pass
class IsNot(cmpop):
pass
class In(cmpop):
pass
class NotIn(cmpop):
pass
class comprehension(VyperAST):
__slots__ = ('target', 'iter', 'ifs', 'is_async')
def __init__(self,
target: expr,
iter: expr,
ifs: ExprSeq,
is_async: bool):
self.target = target
self.iter = iter
self.ifs = ifs
self.is_async = is_async
class excepthandler(PosAttributes, VyperAST):
__slots__ = ()
class ExceptHandler(excepthandler):
__slots__ = ('body', 'type', 'name')
def __init__(self,
body: StmtSeq,
type: expr = None,
name: identifier = None,
*,
lineno: int = None,
col_offset: int = None):
self.body = body
self.type = type
self.name = name
super().__init__(lineno=lineno, col_offset=col_offset)
class arguments(VyperAST):
__slots__ = ('args', 'kwonlyargs', 'kw_defaults', 'defaults', 'vararg', 'kwarg')
def __init__(self,
args: ArgSeq,
kwonlyargs: ArgSeq,
kw_defaults: ExprSeq,
defaults: ExprSeq,
vararg: 'arg' = None,
kwarg: 'arg' = None):
self.args = args
self.kwonlyargs = kwonlyargs
self.kw_defaults = kw_defaults
self.defaults = defaults
self.vararg = vararg
self.kwarg = kwarg
class arg(PosAttributes, VyperAST):
__slots__ = ('arg', 'annotation')
def __init__(self,
arg: identifier,
annotation: expr = None,
*,
lineno: int = None,
col_offset: int = None):
self.arg = arg
self.annotation = annotation
super().__init__(lineno=lineno, col_offset=col_offset)
class keyword(VyperAST):
__slots__ = ('value', 'arg')
def __init__(self,
value: expr,
arg: identifier = None):
self.value = value
self.arg = arg
class alias(VyperAST):
__slots__ = ('name', 'asname')
def __init__(self,
name: identifier,
asname: identifier = None):
self.name = name
self.asname = asname
class withitem(VyperAST):
__slots__ = ('context_expr', 'optional_vars')
def __init__(self,
context_expr: expr,
optional_vars: expr = None):
self.context_expr = context_expr
self.optional_vars = optional_vars
|
[
"davesque@gmail.com"
] |
davesque@gmail.com
|
a8d03bbcf43bc5c07f17f932b44e7e0dafad0cd3
|
c57b0d390d7bc671060e56dc1fc61fe33943c4bf
|
/record.py
|
b6312406e3b0a20fec0c350b85880415f351c6af
|
[] |
no_license
|
Technically-Tony/SafeFolder
|
010395a201f105c592055135429dcf0bb3d1e38e
|
53ce435df454266f8c5d4434ac3f687319b7f178
|
refs/heads/master
| 2023-04-13T18:18:18.893484
| 2021-04-22T15:51:17
| 2021-04-22T15:51:17
| 360,559,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,204
|
py
|
import sqlite3
from hashlib import sha256
ADMIN_PASSWORD = "123456"
connect = input("Please key in your password?\n")
while connect != ADMIN_PASSWORD:
connect = input("Please key in your password?\n")
if connect == "q":
break
def create_password(pass_key, service, admin_pass):
return sha256(admin_pass.encode('utf-8') + service.lower().encode('utf-8') + pass_key.encode('utf-8')).hexdigest()[
:15] # the 15 is for tha amount of characters in the password
def get_hex_key(admin_pass, service):
return sha256(admin_pass.encode('utf-8') + service.lower().encode('utf-8')).hexdigest()
conn = sqlite3.connect('pass_manager.db')
def get_password(admin_pass, service):
secret_key = get_hex_key(admin_pass, service)
cursor = conn.execute("SELECT * FROM KEYS WHERE PASS_KEY=" + '"' + secret_key + '"')
pass_key = ""
for row in cursor:
pass_key = row[0]
return create_password(pass_key, service, admin_pass)
def add_password(service, admin_pass):
secret_key = get_hex_key(admin_pass, service)
command = 'INSERT INTO KEYS (PASS_KEY) VALUES (%s);' %('"' + secret_key + '"')
conn.execute(command)
conn.commit()
return create_password(secret_key, service, admin_pass)
if connect == ADMIN_PASSWORD:
try:
conn.execute('''CREATE TABLE KEYS
(PASS_KEY TEXT PRIMARY KEY NOT NULL);''')
print("Your safe has been created!\nWhat would you like to store in it today?")
except:
print("You have a safe, what would you like to store in it today?")
while True:
print('*' * 15)
print("COMMANDS:")
print("q = quit program")
print("sp = store password")
print("gp = get password")
print('*' * 15)
if input == "q":
break
if input == "sp":
service = input("What is the name of the service?\n")
print("\n" + service.capitalize() + " password created:\n" + add_password(service, ADMIN_PASSWORD))
if input == "gp":
service = input("What is the name of the service?\n")
print("\n" + service.capitalize() + " password:\n" + get_password(ADMIN_PASSWORD, service))
|
[
"tkagete@icloud.com"
] |
tkagete@icloud.com
|
c667a0adc433827a28b8642bbe65c0f86d1a64af
|
486820178701ecb337f72fd00cd2e281c1f3bbb2
|
/teuthology_master/virtualenv/bin/lss3
|
d697c0259f1b300fe60ca594252427be3b35e02a
|
[
"MIT"
] |
permissive
|
hgichon/anycloud-test
|
9e0161bc563a20bd048ecff57ad7bf72dcb1d420
|
0d4cd18d8b6bb4dcf1b59861fea21fefe6a2c922
|
refs/heads/master
| 2016-09-11T09:32:23.832032
| 2015-06-24T00:58:19
| 2015-06-24T00:58:19
| 35,654,249
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,487
|
#!/home/teuthworker/src/teuthology_master/virtualenv/bin/python
import boto
from boto.exception import S3ResponseError
from boto.s3.connection import OrdinaryCallingFormat
def sizeof_fmt(num):
for x in ['b ', 'KB', 'MB', 'GB', 'TB', 'XB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f %s" % (num, x)
def list_bucket(b, prefix=None, marker=None):
"""List everything in a bucket"""
from boto.s3.prefix import Prefix
from boto.s3.key import Key
total = 0
if prefix:
if not prefix.endswith("/"):
prefix = prefix + "/"
query = b.list(prefix=prefix, delimiter="/", marker=marker)
print("%s" % prefix)
else:
query = b.list(delimiter="/", marker=marker)
num = 0
for k in query:
num += 1
mode = "-rwx---"
if isinstance(k, Prefix):
mode = "drwxr--"
size = 0
else:
size = k.size
for g in k.get_acl().acl.grants:
if g.id == None:
if g.permission == "READ":
mode = "-rwxr--"
elif g.permission == "FULL_CONTROL":
mode = "-rwxrwx"
if isinstance(k, Key):
print("%s\t%s\t%010s\t%s" % (mode, k.last_modified,
sizeof_fmt(size), k.name))
else:
#If it's not a Key object, it doesn't have a last_modified time, so
#print nothing instead
print("%s\t%s\t%010s\t%s" % (mode, ' ' * 24,
sizeof_fmt(size), k.name))
total += size
print ("=" * 80)
print ("\t\tTOTAL: \t%010s \t%i Files" % (sizeof_fmt(total), num))
def list_buckets(s3, display_tags=False):
"""List all the buckets"""
for b in s3.get_all_buckets():
print(b.name)
if display_tags:
try:
tags = b.get_tags()
for tag in tags[0]:
print(" %s:%s" % (tag.key, tag.value))
except S3ResponseError as e:
if e.status != 404:
raise
def main():
import optparse
import sys
usage = "usage: %prog [options] [BUCKET1] [BUCKET2]"
description = "List all S3 buckets OR list keys in the named buckets"
parser = optparse.OptionParser(description=description, usage=usage)
parser.add_option('-m', '--marker',
help='The S3 key where the listing starts after it.')
parser.add_option('-t', '--tags', action='store_true',
help='Display tags when listing all buckets.')
options, buckets = parser.parse_args()
marker = options.marker
if not buckets:
list_buckets(boto.connect_s3(), options.tags)
sys.exit(0)
if options.tags:
print("-t option only works for the overall bucket list")
sys.exit(1)
pairs = []
mixedCase = False
for name in buckets:
if "/" in name:
pairs.append(name.split("/", 1))
else:
pairs.append([name, None])
if pairs[-1][0].lower() != pairs[-1][0]:
mixedCase = True
if mixedCase:
s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())
else:
s3 = boto.connect_s3()
for name, prefix in pairs:
list_bucket(s3.get_bucket(name), prefix, marker=marker)
if __name__ == "__main__":
main()
|
[
"hgichon@gmail.com"
] |
hgichon@gmail.com
|
|
ebb6289f6a70902ad76e593b705064a119d35d6a
|
6727f6f02ef8bf1403f02b0fc11c98f364a621b5
|
/python_basic/bin/easy_install
|
34ce48240a93e7624ef98e31464dab8bd16a422e
|
[] |
no_license
|
doulos76/Python_Study
|
fdd73012ce9f1d61d1805e992a1c2893c5b77a45
|
cd978ec0f60fe16de575122f1dcf37bc5895aac6
|
refs/heads/master
| 2020-05-05T01:43:51.810526
| 2019-11-22T12:53:12
| 2019-11-22T12:53:12
| 179,612,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
#!/Users/minhoigoo/Documents/development/Python_Study/python_basic/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"inojyes@gmail.com"
] |
inojyes@gmail.com
|
|
924abbfe7df5d7cef4621a9d90e78d44bb72dcb0
|
a358ae526370c264d57e0a0f1cc68095c13674af
|
/Day 052/main.py
|
c201cfcdd0eb2fb9e63ed42be08bb9060950f56a
|
[] |
no_license
|
LintaoC/udemy_100DaysOfCode_python
|
ef2d1c792cd8d46f17481150df11da3d866cc5ce
|
a533662934655b8767530bc06d7f68081e5f54c5
|
refs/heads/main
| 2023-08-14T09:11:27.304476
| 2021-09-20T19:54:35
| 2021-09-20T19:54:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,844
|
py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException, ElementClickInterceptedException
import time
import os
SIMILAR_ACCOUNT = "elonofficiall"
USERNAME = "xxxx"
PASSWORD = "xxxx"
#FIREFOX_DRIVER_PATH = "/home/zellkoss/Programme/geckodriver"
GOOGLE_DRIVER_PATH = "C:\dev\chromedriver.exe"
class InstaFollower:
def __init__(self):
self.driver = webdriver.Chrome(executable_path=GOOGLE_DRIVER_PATH)
# self.driver = webdriver.Firefox(executable_path=FIREFOX_DRIVER_PATH)
def login(self):
self.driver.get("https://www.instagram.com/accounts/login/")
# Accept Cookies
cookies_button = self.driver.find_element_by_class_name("bIiDR")
cookies_button.click()
time.sleep(3)
# Login to Instagram
username_input = self.driver.find_element_by_name("username")
psw_input = self.driver.find_element_by_name("password")
username_input.send_keys(USERNAME)
psw_input.send_keys(PASSWORD)
psw_input.send_keys(Keys.ENTER)
time.sleep(4)
# Do not save info
not_saving_button = self.driver.find_element_by_class_name("y3zKF")
not_saving_button.click()
time.sleep(4)
# Do not turn on notification
notification_button = self.driver.find_element_by_class_name("HoLwm")
notification_button.click()
time.sleep(4)
def find_followers(self):
self.driver.get(f"https://www.instagram.com/{SIMILAR_ACCOUNT}/")
follower_button = self.driver.find_element_by_xpath('//*[@id="react-root"]/section/main/div/header/section/ul/li[2]/a')
follower_button.click()
time.sleep(3)
scroll_bar = self.driver.find_element_by_class_name("isgrP")
for n in range(10):
self.driver.execute_script("arguments[0].scrollTop = arguments[0].scrollHeight", scroll_bar)
time.sleep(1)
def follow(self):
# If I selected the buttons with the class y3zKF, only the people who are not followed will be listed !
list_followers = self.driver.find_elements_by_class_name("y3zKF")
# If I selected the buttons with the css selector all the buttons will be listed !
#list_followers = self.driver.find_elements_by_css_selector("li button")
for item in list_followers:
try:
item.click()
except ElementClickInterceptedException:
print("Already followed")
cancel_button = self.driver.find_element_by_class_name("HoLwm")
cancel_button.click()
time.sleep(2)
insta = InstaFollower()
insta.login()
insta.find_followers()
insta.follow()
|
[
"noreply@github.com"
] |
LintaoC.noreply@github.com
|
d8f338e99ff87f248eb0fe86f28f3acc7fd4ed42
|
da6a4c0189ceb34d2503133548842eff1c18264e
|
/games/coop_game.py
|
333071e3d752d9eb171c055bcd6528cddc82fda0
|
[
"Apache-2.0"
] |
permissive
|
idoheinemann/Leverage-Graph
|
5572dea6438dd3496884e1e18e317db2e230949a
|
39f8c04e5b5f3a8af969f2f9d83ddb8c9ce3c028
|
refs/heads/main
| 2023-08-11T12:53:55.037503
| 2021-09-27T09:39:14
| 2021-09-27T09:39:14
| 410,688,015
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
import abc
from _types import Coalition, Value, Player, Payoff
from sympy.utilities.iterables import multiset_permutations
import math
import numpy as np
from tools import normalize_payoff
class CoopGame(abc.ABC):
def __init__(self, players_amount: int):
self.grand_coalition = set(range(players_amount))
self.players_amount = players_amount
@abc.abstractmethod
def value(self, coalition: Coalition) -> Value:
pass
def added_value(self, coalition: Coalition, player: Player) -> Value:
return self.value(coalition | {player}) - self.value(coalition)
def shapely_values(self, coalition: Coalition) -> Payoff:
payoffs = np.zeros(self.players_amount)
for perm in multiset_permutations(coalition):
temp_coalition = set()
for p in perm:
payoffs[p] += self.added_value(temp_coalition, p)
temp_coalition.add(p)
combs = math.factorial(len(coalition))
return payoffs / combs
def shapely_normal(self, coalition: Coalition) -> Payoff:
return normalize_payoff(self.shapely_values(coalition))
|
[
"idohaineman@gmail.com"
] |
idohaineman@gmail.com
|
a4addd7c329bcb221b029d23e293ee477c5a4736
|
b2f02d8716b2021cc4a7f299a52aadadb7d1a07a
|
/Dns.py
|
75b3bf6ab0e3bc27957da98e89b89ff277649430
|
[] |
no_license
|
gostkin/networks-dns-hw
|
47badb4d5a98c665f223a181b87269cd1d54ef45
|
247d5c7d0871fc1f737f4f1b0f97cf8abd2ac78f
|
refs/heads/master
| 2023-05-29T09:46:58.851840
| 2021-03-28T16:28:25
| 2021-03-28T16:28:25
| 352,379,507
| 0
| 0
| null | 2023-05-03T07:47:03
| 2021-03-28T16:27:13
|
Python
|
UTF-8
|
Python
| false
| false
| 3,119
|
py
|
import typing as tp
import socket
from DnsResponse import DnsResponse
from IpRecord import IpRecord
from Trace import Trace
ROOT_SERVERS_DNS = {
"a.root-servers.net": '198.41.0.4',
"b.root-servers.net": '199.9.14.201',
"c.root-servers.net": '192.33.4.12',
"d.root-servers.net": '199.7.91.13',
"e.root-servers.net": '192.203.230.10',
"f.root-servers.net": '192.5.5.241',
"g.root-servers.net": '192.112.36.4',
"h.root-servers.net": '198.97.190.53',
"i.root-servers.net": '192.36.148.17',
"j.root-servers.net": '192.58.128.30',
"k.root-servers.net": '193.0.14.129',
"l.root-servers.net": '199.7.83.42',
"m.root-servers.net": '202.12.27.33',
}
def find_recursive(
domain: str,
dns_servers: tp.Dict[str, str],
trace: Trace
) -> tp.Optional[IpRecord]:
for server_domain, host in dns_servers.items():
trace.add(f"{host} {server_domain}")
response = create_and_send_request(domain, host)
if not response.request_success or not response.parsed_success:
continue
if response.aa:
return response.get_a_ip_record()
domains, servers = response.get_domains_and_servers()
if len(servers) == 0 and len(domains) != 0:
new_domain = domains[0]
result = find_recursive(new_domain, ROOT_SERVERS_DNS, trace)
if result:
dns_servers = {new_domain: ip for _, ip in result.ips}
return find_recursive(domain, dns_servers, trace)
else:
return find_recursive(domain, servers, trace)
return None
def create_and_send_request(domain: str, ip: str) -> DnsResponse:
request = create_request(domain=domain, request_id=228)
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.sendto(request, (ip, 53))
response = DnsResponse(s.recvfrom(4096)[0])
except Exception as e:
print(e)
return DnsResponse()
# print(vars(response))
# for res in response.a_records:
# print(vars(res))
return response
def create_request(domain: str, request_id: int) -> bytes:
parameters = bytearray()
parameters.extend(request_id.to_bytes(2, byteorder='big', signed=False))
zero = 0
parameters.extend(zero.to_bytes(2, byteorder='big', signed=False))
questions = 1
parameters.extend(questions.to_bytes(2, byteorder='big', signed=False))
parameters.extend(zero.to_bytes(6, byteorder='big', signed=False))
parts = domain.split(".")
if len(parts[-1]) == 0:
parts.pop()
for part in parts:
part_len = len(part)
if part_len > 255:
raise OverflowError
parameters.extend(part_len.to_bytes(1, byteorder='big', signed=False))
parameters.extend(bytes(part, encoding="ascii"))
parameters.extend(zero.to_bytes(1, byteorder='big', signed=False))
q_type = 1
parameters.extend(q_type.to_bytes(2, byteorder='big', signed=False))
q_class = 1
parameters.extend(q_class.to_bytes(2, byteorder='big', signed=False))
return bytes(parameters)
|
[
"gostkin@chatfuel.com"
] |
gostkin@chatfuel.com
|
57ce9340f31634e6e0a201a76d13ddb85e094906
|
4ba474fe4deba9ccad2f82fac97031720650ccf4
|
/blog/models.py
|
44efded4068e1947a3f1da17ade87deb631242b2
|
[] |
no_license
|
Jimmy9507/rhinoceros
|
0e00dbbdb787412835b57a250e529289fe22a46e
|
eb913a0e71bde8e11cf4789863b7fee4043a384d
|
refs/heads/master
| 2020-04-12T21:38:34.663966
| 2019-01-24T23:11:55
| 2019-01-24T23:11:55
| 162,767,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
from django.db import models
# Create your models here.
class Article(models.Model):
#标题
title=models.CharField(max_length=32,default='Title')
#内容
content=models.TextField(null=True)
pub_time=models.DateTimeField(null=True )
def __unicode__(self):
return self.title
|
[
"406403730@qq.com"
] |
406403730@qq.com
|
dd093afdea197edc3af544cab2e6bbde1fa450d6
|
93037409a617094334b68d2d93340a8c4304e8a5
|
/cam_tf_alignment/utils/data_utils.py
|
064eb9f4b9012182248e91007fb6981d03f0ec11
|
[] |
no_license
|
DCSaunders/cam-tf-seq2seq
|
ff4fab5f66a9ade7a91614da08d2fe912b46465e
|
b4a7d54562196105c9e163ed6ca2e0834d464821
|
refs/heads/master
| 2021-01-21T12:50:17.301084
| 2019-05-08T13:14:58
| 2019-05-08T13:14:58
| 102,101,143
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,109
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for downloading data from WMT, tokenizing, vocabularies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import tarfile
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.platform import gfile
import logging
# Special vocabulary symbols - we always put them at the start.
_PAD = b"_PAD"
_GO = b"_GO"
_EOS = b"_EOS"
_UNK = b"_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
# URLs for WMT data.
_WMT_ENFR_TRAIN_URL = "http://www.statmt.org/wmt10/training-giga-fren.tar"
_WMT_ENFR_DEV_URL = "http://www.statmt.org/wmt15/dev-v2.tgz"
def no_pad_symbol():
global PAD_ID
global UNK_ID
UNK_ID = 0
PAD_ID = -1
def maybe_download(directory, filename, url):
"""Download filename from url unless it's already in directory."""
if not os.path.exists(directory):
print("Creating directory %s" % directory)
os.mkdir(directory)
filepath = os.path.join(directory, filename)
if not os.path.exists(filepath):
print("Downloading %s to %s" % (url, filepath))
filepath, _ = urllib.request.urlretrieve(url, filepath)
statinfo = os.stat(filepath)
print("Succesfully downloaded", filename, statinfo.st_size, "bytes")
return filepath
def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path."""
print("Unpacking %s to %s" % (gz_path, new_path))
with gzip.open(gz_path, "rb") as gz_file:
with open(new_path, "wb") as new_file:
for line in gz_file:
new_file.write(line)
def get_wmt_enfr_train_set(directory):
"""Download the WMT en-fr training corpus to directory unless it's there."""
train_path = os.path.join(directory, "giga-fren.release2.fixed")
if not (gfile.Exists(train_path +".fr") and gfile.Exists(train_path +".en")):
corpus_file = maybe_download(directory, "training-giga-fren.tar",
_WMT_ENFR_TRAIN_URL)
print("Extracting tar file %s" % corpus_file)
with tarfile.open(corpus_file, "r") as corpus_tar:
corpus_tar.extractall(directory)
gunzip_file(train_path + ".fr.gz", train_path + ".fr")
gunzip_file(train_path + ".en.gz", train_path + ".en")
return train_path
def get_wmt_enfr_dev_set(directory):
"""Download the WMT en-fr training corpus to directory unless it's there."""
dev_name = "newstest2013"
dev_path = os.path.join(directory, dev_name)
if not (gfile.Exists(dev_path + ".fr") and gfile.Exists(dev_path + ".en")):
dev_file = maybe_download(directory, "dev-v2.tgz", _WMT_ENFR_DEV_URL)
print("Extracting tgz file %s" % dev_file)
with tarfile.open(dev_file, "r:gz") as dev_tar:
fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr")
en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en")
fr_dev_file.name = dev_name + ".fr" # Extract without "dev/" prefix.
en_dev_file.name = dev_name + ".en"
dev_tar.extract(fr_dev_file, directory)
dev_tar.extract(en_dev_file, directory)
return dev_path
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(_WORD_SPLIT.split(space_separated_fragment))
return [w for w in words if w]
def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,
tokenizer=None, normalize_digits=True):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
data_path: data file that will be used to create vocabulary.
max_vocabulary_size: limit on the size of the created vocabulary.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(vocabulary_path):
print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
vocab = {}
with gfile.GFile(data_path, mode="rb") as f:
counter = 0
for line in f:
counter += 1
if counter % 100000 == 0:
print(" processing line %d" % counter)
line = tf.compat.as_bytes(line)
tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
for w in tokens:
word = _DIGIT_RE.sub(b"0", w) if normalize_digits else w
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary,
tokenizer=None, normalize_digits=True):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: the sentence in bytes format to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
a list of integers, the token-ids for the sentence.
"""
if tokenizer:
words = tokenizer(sentence)
else:
words = basic_tokenizer(sentence)
if not normalize_digits:
return [vocabulary.get(w, UNK_ID) for w in words]
# Normalize digits by 0 before looking words up in the vocabulary.
return [vocabulary.get(_DIGIT_RE.sub(b"0", w), UNK_ID) for w in words]
def data_to_token_ids(data_path, target_path, vocabulary_path,
tokenizer=None, normalize_digits=True):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(target_path):
print("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode="rb") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(line, vocab, tokenizer,
normalize_digits)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
def prepare_wmt_data(data_dir, en_vocabulary_size, fr_vocabulary_size, tokenizer=None):
"""Get WMT data into data_dir, create vocabularies and tokenize data.
Args:
data_dir: directory in which the data sets will be stored.
en_vocabulary_size: size of the English vocabulary to create and use.
fr_vocabulary_size: size of the French vocabulary to create and use.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
Returns:
A tuple of 6 elements:
(1) path to the token-ids for English training data-set,
(2) path to the token-ids for French training data-set,
(3) path to the token-ids for English development data-set,
(4) path to the token-ids for French development data-set,
(5) path to the English vocabulary file,
(6) path to the French vocabulary file.
"""
# Get wmt data to the specified directory.
train_path = get_wmt_enfr_train_set(data_dir)
dev_path = get_wmt_enfr_dev_set(data_dir)
# Create vocabularies of the appropriate sizes.
fr_vocab_path = os.path.join(data_dir, "vocab%d.fr" % fr_vocabulary_size)
en_vocab_path = os.path.join(data_dir, "vocab%d.en" % en_vocabulary_size)
create_vocabulary(fr_vocab_path, train_path + ".fr", fr_vocabulary_size, tokenizer)
create_vocabulary(en_vocab_path, train_path + ".en", en_vocabulary_size, tokenizer)
# Create token ids for the training data.
fr_train_ids_path = train_path + (".ids%d.fr" % fr_vocabulary_size)
en_train_ids_path = train_path + (".ids%d.en" % en_vocabulary_size)
data_to_token_ids(train_path + ".fr", fr_train_ids_path, fr_vocab_path, tokenizer)
data_to_token_ids(train_path + ".en", en_train_ids_path, en_vocab_path, tokenizer)
# Create token ids for the development data.
fr_dev_ids_path = dev_path + (".ids%d.fr" % fr_vocabulary_size)
en_dev_ids_path = dev_path + (".ids%d.en" % en_vocabulary_size)
data_to_token_ids(dev_path + ".fr", fr_dev_ids_path, fr_vocab_path, tokenizer)
data_to_token_ids(dev_path + ".en", en_dev_ids_path, en_vocab_path, tokenizer)
return (en_train_ids_path, fr_train_ids_path,
en_dev_ids_path, fr_dev_ids_path,
en_vocab_path, fr_vocab_path)
def get_training_data(config):
if config['use_default_data']:
"""Train a en->fr translation model using WMT data."""
logging.info("Preparing data in dir=%s" % config['data_dir'])
src_train, trg_train, src_dev, trg_dev, _, _ = prepare_wmt_data(
config['data_dir'], config['src_vocab_size'], config['trg_vocab_size'], tokenizer=None)
elif config['save_npz']:
# do not need data
return None, None, None, None
else:
if config['train_src_idx'] != None and config['train_trg_idx'] != None and \
config['dev_src_idx'] != None and config['dev_trg_idx'] != None:
logging.info("Get indexed training and dev data")
src_train, trg_train, src_dev, trg_dev = config['train_src_idx'], config['train_trg_idx'], \
config['dev_src_idx'], config['dev_trg_idx']
elif config['train_src'] != None and config['train_trg'] != None and \
config['dev_src'] != None and config['dev_trg'] != None:
logging.info("Index tokenized training and dev data and write to dir=%s" % config['data_dir'])
src_train, trg_train, src_dev, trg_dev, _, _ = prepare_data(
config['data_dir'], config['src_vocab_size'], config['trg_vocab_size'],
config['train_src'], config['train_trg'], config['dev_src'], config['dev_trg'],
config['src_lang'], config['trg_lang'])
else:
logging.error("You have to provide either tokenized or integer-mapped training and dev data usinig " \
"--train_src, --train_trg, --dev_src, --dev_trg or --train_src_idx, --train_trg_idx, --dev_src_idx, --dev_trg_idx")
exit(1)
return src_train, trg_train, src_dev, trg_dev
def prepare_data(data_dir, src_vocabulary_size, trg_vocabulary_size,
train_src, train_trg, dev_src, dev_trg, src_lang, trg_lang):
"""Create vocabularies and index data, data assumed to be tokenized.
Args:
data_dir: directory in which the data will be stored.
src_vocabulary_size: size of the source vocabulary to create and use.
trg_vocabulary_size: size of the target vocabulary to create and use.
train_src: Tokenized source training data
train_trg: Tokenized target training data
dev_src: Tokenized source dev data
dev_trg: Tokenized target dev data
src_lang: Source language
trg_lang: Target language
Returns:
A tuple of 6 elements:
(1) path to the token-ids for source training data-set,
(2) path to the token-ids for target training data-set,
(3) path to the token-ids for source development data-set,
(4) path to the token-ids for target development data-set,
(5) path to the source vocabulary file,
(6) path to the target vocabulary file.
"""
# Output paths
train_path = os.path.join(data_dir, "train")
dev_path = os.path.join(data_dir, "dev")
# Create vocabularies of the appropriate sizes.
src_vocab_path = os.path.join(data_dir, "vocab%d" % src_vocabulary_size + "." + src_lang)
trg_vocab_path = os.path.join(data_dir, "vocab%d" % trg_vocabulary_size + "." + trg_lang)
create_vocabulary(src_vocab_path, train_src, src_vocabulary_size)
create_vocabulary(trg_vocab_path, train_trg, trg_vocabulary_size)
# Create token ids for the training data.
src_train_ids_path = train_path + (".ids%d" % src_vocabulary_size + "." + src_lang)
trg_train_ids_path = train_path + (".ids%d" % trg_vocabulary_size + "." + trg_lang)
data_to_token_ids(train_path + "." + src_lang, src_train_ids_path, src_vocab_path)
data_to_token_ids(train_path + "." + trg_lang, trg_train_ids_path, trg_vocab_path)
# Create token ids for the development data.
src_dev_ids_path = dev_path + (".ids%d" % src_vocabulary_size + "." + src_lang)
trg_dev_ids_path = dev_path + (".ids%d" % trg_vocabulary_size + "." + trg_lang)
data_to_token_ids(dev_path + "." + src_lang, src_dev_ids_path, src_vocab_path)
data_to_token_ids(dev_path + "." + trg_lang, trg_dev_ids_path, trg_vocab_path)
return (src_train_ids_path, trg_train_ids_path,
src_dev_ids_path, trg_dev_ids_path,
src_vocab_path, trg_vocab_path)
def read_data(buckets, source_path, target_path, max_size=None, src_vcb_size=None, trg_vcb_size=None, add_src_eos=True, align_file=None, align_delimits=None):
"""Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
if add_src_eos:
logging.info("Add EOS symbol to all source sentences")
if src_vcb_size:
logging.info("Replace OOV words with id={} for src_vocab_size={}".format(UNK_ID, src_vcb_size))
if trg_vcb_size:
logging.info("Replace OOV words with id={} for trg_vocab_size={}".format(UNK_ID, trg_vcb_size))
data_set = [[] for _ in buckets]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 100000 == 0:
logging.info(" reading data line %d" % counter)
source_ids = [int(x) for x in source.split()]
if add_src_eos:
source_ids.append(EOS_ID)
target_ids = [int(x) for x in target.split()]
target_ids.append(EOS_ID)
if src_vcb_size:
# Replace source OOV words with unk (in case this has not been done on the source side)
source_ids = [ wid if wid < src_vcb_size else UNK_ID for wid in source_ids ]
if trg_vcb_size:
# Replace target OOV words with unk (in case this has not been done on the target side)
target_ids = [ wid if wid < trg_vcb_size else UNK_ID for wid in target_ids ]
alignment = None
if align_file is not None:
align_line = align_file.readline()
val_delimit, triple_delimit = align_delimits
alignment = []
for triple in align_line.split(triple_delimit):
for val in triple.strip().split(val_delimit):
alignment.append(float(val.strip()))
for bucket_id, (source_size, target_size) in enumerate(buckets):
# Target will get additional GO symbol
if len(source_ids) <= source_size and len(target_ids) < target_size:
data_set[bucket_id].append([source_ids, target_ids])
if alignment:
data_set[bucket_id][-1].append(alignment)
break # skips training example if it fits in no bucket
source, target = source_file.readline(), target_file.readline()
return data_set
|
[
"ds636@cam.ac.uk"
] |
ds636@cam.ac.uk
|
a6ba0e0999eee8ffcb1eba0dc7edeb8373fa5a61
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_noisy1660.py
|
d3db3c4cd504b1ed00c9774adc0fa6e92a169529
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,553
|
py
|
# qubit number=5
# total number=59
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=31
prog.cz(input_qubit[1],input_qubit[0]) # number=32
prog.h(input_qubit[1]) # number=52
prog.h(input_qubit[0]) # number=33
prog.h(input_qubit[1]) # number=44
prog.cz(input_qubit[0],input_qubit[1]) # number=45
prog.h(input_qubit[1]) # number=46
prog.h(input_qubit[1]) # number=56
prog.cz(input_qubit[0],input_qubit[1]) # number=57
prog.h(input_qubit[1]) # number=58
prog.x(input_qubit[1]) # number=54
prog.cx(input_qubit[0],input_qubit[1]) # number=55
prog.h(input_qubit[1]) # number=48
prog.cz(input_qubit[0],input_qubit[1]) # number=49
prog.h(input_qubit[1]) # number=50
prog.x(input_qubit[0]) # number=26
prog.cx(input_qubit[1],input_qubit[0]) # number=27
prog.h(input_qubit[1]) # number=37
prog.cz(input_qubit[0],input_qubit[1]) # number=38
prog.h(input_qubit[1]) # number=39
prog.x(input_qubit[1]) # number=35
prog.cx(input_qubit[0],input_qubit[1]) # number=36
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
prog.cx(input_qubit[3],input_qubit[2]) # number=43
prog.cx(input_qubit[3],input_qubit[2]) # number=47
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.cx(input_qubit[0],input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
prog.cx(input_qubit[0],input_qubit[1]) # number=24
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[1]) # number=29
prog.y(input_qubit[4]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[3]) # number=51
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1660.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
fa37545dd00bea660a0e4bb47a3b7a8064cedc48
|
99c159a19be8a206ab7b118e98b7b2f44f57bceb
|
/test/functional/wallet_scriptaddress2.py
|
c8fe4bf5b3fc509154565ff4f96607b69a24be8f
|
[
"MIT"
] |
permissive
|
hypothesize-coin/hypothesize
|
69c299b17d78d2e173eb7be16554ebe8d8d5a736
|
e69e0f2a45ec957783b6c58bebf4028e17c8e7d8
|
refs/heads/master
| 2020-03-30T17:46:16.971179
| 2018-11-09T10:07:53
| 2018-11-09T10:07:53
| 151,400,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,924
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test new Hypothesize multisig prefix functionality.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import decimal
class ScriptAddress2Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.setup_clean_chain = False
self.extra_args = [['-addresstype=legacy'], [], []]
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some blocks
self.nodes[1].generate(101)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 101):
raise AssertionError("Failed to mine 100 blocks")
addr = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount")['address']
assert_equal(multisig_addr[0], 'Q')
# Send to a new multisig address
txid = self.nodes[1].sendtoaddress(multisig_addr, 1)
block = self.nodes[1].generate(3)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr in dest_addrs)
# Spend from the new multisig address
addr3 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount", addr3, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr3)
# Send to an old multisig address. The api addmultisigaddress
# can only generate a new address so we manually compute
# multisig_addr_old beforehand using an old client.
priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Qg89",
"cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"]
addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v",
"mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"]
self.nodes[0].importprivkey(priv_keys[0])
self.nodes[0].importprivkey(priv_keys[1])
multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2")['address']
assert_equal(multisig_addr_new, 'QZ974ZrPrmqMmm1PSVp4m8YEgo3bCQZBbe')
multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYQbmLTWPjq"
## Let's send to the old address. We can then find it in the
## new address with the new client. So basically the old
## address and the new one are the same thing.
txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1)
block = self.nodes[1].generate(1)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr_new in dest_addrs)
assert(multisig_addr_old not in dest_addrs)
# Spend from the new multisig address
addr4 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount2", addr4, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount2", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr4)
if __name__ == '__main__':
ScriptAddress2Test().main()
|
[
"denys@thegcccoin.com"
] |
denys@thegcccoin.com
|
1faf96d3804d561f0d6c470ac72a3a3adb19f2a4
|
ee974d693ca4c4156121f8cb385328b52eaac07c
|
/env/lib/python3.6/site-packages/werkzeug/contrib/__init__.py
|
e3fce34e0fdb049b8508969345b2adda40d8a861
|
[] |
no_license
|
ngonhi/Attendance_Check_System_with_Face_Recognition
|
f4531cc4dee565d0e45c02217f73f3eda412b414
|
92ff88cbc0c740ad48e149033efd38137c9be88d
|
refs/heads/main
| 2023-03-12T07:03:25.302649
| 2021-02-26T15:37:33
| 2021-02-26T15:37:33
| 341,493,686
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:fc6588df0d88c081452380af988e88ddda126f5a77c22ebd4aa8efdd62636da3
size 569
|
[
"Nqk180998!"
] |
Nqk180998!
|
0b8c1b384a192f26f71db6bdaf59854d0cab4854
|
189d36a9798ae2459d8605267021e76aa4f2640f
|
/testcases/rhsm/rhsm_level_2/tc_ID536782_check_stackable_ability_of_product.py
|
075507beb8e545bef3849c21a3baddb05a913a60
|
[] |
no_license
|
shihliu/entitlement-ci
|
967a0e51de39763bbc077844d2266203223c48a2
|
313e6d03eb7448387767f5d9f3b376d120c4d2f0
|
refs/heads/master
| 2021-01-16T21:51:56.426491
| 2017-12-21T07:46:58
| 2017-12-21T07:46:58
| 38,414,026
| 0
| 0
| null | 2015-07-13T08:47:23
| 2015-07-02T05:53:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,250
|
py
|
from utils import *
from testcases.rhsm.rhsmbase import RHSMBase
from utils.exception.failexception import FailException
class tc_ID536782_check_stackable_ability_of_product(RHSMBase):
def test_run(self):
case_name = self.__class__.__name__
logger.info("========== Begin of Running Test Case %s ==========" % case_name)
if self.test_server == "STAGE":
try:
# Register and auto-attach
username = self.get_rhsm_cons("username_socket")
password = self.get_rhsm_cons("password_socket")
self.sub_register(username, password)
facts_value = "echo \'{\"virt.is_guest\": \"True\"}' > /etc/rhsm/facts/custom.facts;subscription-manager facts --update"
self.set_facts(facts_value)
autosubprod = self.get_rhsm_cons("autosubprod")
self.sub_autosubscribe(autosubprod)
# Get entitlement certs
cmd = 'ls /etc/pki/entitlement/ | grep -v key'
(ret, output) = self.runcmd(cmd, "get ent cert")
if ret == 0:
entcert = output.strip().split('\n')[0]
logger.info("It's successful to get entitlement cert")
else:
raise FailException("Test Failed - Failed to get entitlement cert")
# check stacking id in ent cert
cmd = "rct cat-cert /etc/pki/entitlement/%s | grep tacking"%entcert
(ret, output) = self.runcmd(cmd, "check stacking id")
if ret == 0 and 'Stacking ID:' in output:
logger.info("It's successful to check stacking id")
else:
raise FailException("Test Failed - Failed to check stacking id")
self.assert_(True, case_name)
except Exception, e:
logger.error("Test Failed - ERROR Message:" + str(e))
self.assert_(False, case_name)
finally:
self.remove_facts_value()
self.restore_environment()
logger.info("========== End of Running Test Case: %s ==========" % case_name)
if __name__ == "__main__":
unittest.main()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
c35ac964893d13ea8f1ba0eaf4ee0629c8976d60
|
24980684279c29269e62a304e9be49dbda091736
|
/Prime.py
|
ebc4bb41c59d3143688e7b299509cb0ff00b4fd1
|
[] |
no_license
|
ManiNTR/python
|
98dec01afbbabd38e93e7970a575a347e5a1a749
|
559053b82b3f7b944849b21e29787122418f83b8
|
refs/heads/master
| 2020-06-29T10:58:23.795841
| 2019-08-13T14:59:25
| 2019-08-13T14:59:25
| 200,515,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
#Program to check the prime number
num=int(input("Enter the number:"))
prime=True
for i in range(2,num):
if num%i==0:
prime=False
if(prime==True):
print("The given number is prime")
else:
print("The given number is not a prime")
|
[
"noreply@github.com"
] |
ManiNTR.noreply@github.com
|
5b1e5fe965dc759005fb7eb065fad20dffb8b619
|
40a04920dea94179878e25a0804ce4a6b459aca9
|
/PheonixTS/AdvancedPython/PythonAdv if/Student Files/testing-debugging/Exercises/test_string_functions.py
|
87a66cd4edad6b6fd7b46a2a8d0a6dd326670c23
|
[] |
no_license
|
Kryptonian92/pythonAssignments
|
5c7dd9140d07c94b19816ebbcaba579338479136
|
06355e0481307a77e5acd53b86b1fc144e98302a
|
refs/heads/master
| 2021-01-20T15:50:46.355224
| 2017-10-28T19:02:52
| 2017-10-28T19:02:52
| 90,771,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 853
|
py
|
import unittest
from string_functions import *
class TestStringFunctions(unittest.TestCase):
def test_prepend(self):
self.assertEqual(prepend('bar','foo'), 'foobar')
def test_append(self):
self.assertEqual(append('bar','foo'), 'barfoo')
def test_insert(self):
self.assertEqual(insert('wetor','buca',2), 'webucator')
def test_remove_non_ascii_letters(self):
test = remove_non_ascii_letters('HO g+)JH*cM_EQZ<JzG')
self.assertEqual(test,'HOgJHcMEQZJzG')
def test_discover_email(self):
test = discover_email('bill-at-example-dot-com')
self.assertEqual(test, 'bill@example.com')
def test_inits(self):
self.assertEqual(inits('Monty Hall Python'), 'M.H.P.')
if __name__ == '__main__':
unittest.main()
|
[
"ausar_mcgruder@yahoo.com"
] |
ausar_mcgruder@yahoo.com
|
a50d0be5c5aaa0943dd230d84b252248b5367e3e
|
db73f626b4b4fdb9095a5f9ccaa2091a79caf06b
|
/tenant_manage_plat/migrations/0008_auto_20160204_2144.py
|
141b4816b74f7505ac048671cd81e2f8f865931f
|
[] |
no_license
|
Evaxtt/weixin
|
f3b3f8b4adbab9689795684f0a8d25a8e1ed2097
|
bce09d08e456057ea7e67aaaeba7a16ae6eb6ea1
|
refs/heads/master
| 2021-01-17T15:45:38.620631
| 2016-10-24T09:22:03
| 2016-10-24T09:22:03
| 65,252,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-04 13:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tenant_manage_plat', '0007_auto_20160204_2118'),
]
operations = [
migrations.AlterField(
model_name='activityapplicant',
name='apply_time',
field=models.DateTimeField(auto_now_add=True),
),
]
|
[
"394012641@qq.com"
] |
394012641@qq.com
|
9711632a4d0ef32757ea0b00c0c7b901214e65de
|
5a7295c74f5cf8c81a9c4faff4ac73b462e80532
|
/aws-cdk-java/step_three/python_lambdas/errors.py
|
f94e42bfacc0fcdcf86d065c72c80cdcb31cd580
|
[] |
no_license
|
hermanlintvelt/aws-java-serverless
|
840a0b1ed0b7b60d791a4e273c940aea743eb71e
|
8205e0d8961f4ea639c669c58a854bfa7a06dc88
|
refs/heads/main
| 2023-08-25T19:30:27.392857
| 2023-08-15T11:59:47
| 2023-08-15T11:59:47
| 206,782,368
| 4
| 2
| null | 2023-08-15T11:59:48
| 2019-09-06T11:46:36
|
Java
|
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
import json
def any_error(message: str, code: int) -> dict:
return {
'statusCode': code,
'body': json.dumps({'message': message}),
'headers': {
"Access-Control-Allow-Origin": "*", "Access-Control-Allow-Credentials": True,
"Access-Control-Allow-Headers": "Origin,Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,locale",
"Access-Control-Allow-Methods": "POST, OPTIONS, GET",
}
}
def bad_request(exc: ValueError) -> dict:
return {
"statusCode": 400,
"body": json.dumps({'message': f'Request is missing property: {exc.args[0]}'}),
'headers': {
"Access-Control-Allow-Origin": "*", "Access-Control-Allow-Credentials": True,
"Access-Control-Allow-Headers": "Origin,Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,locale",
"Access-Control-Allow-Methods": "POST, OPTIONS, GET",
}
}
def not_found(message: str):
return {
"statusCode": 404,
# Dump camelCase for API
"body": json.dumps(
{'message': message}
),
'headers': {
"Access-Control-Allow-Origin": "*", "Access-Control-Allow-Credentials": True,
"Access-Control-Allow-Headers": "Origin,Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,locale",
"Access-Control-Allow-Methods": "POST, OPTIONS, GET",
}
}
|
[
"herman.lintvelt@gmail.com"
] |
herman.lintvelt@gmail.com
|
5595e1e3cf4fc22afb0d5ead6ebd3f7b56e3bd8f
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/B/bilalr/aje-url_2.py
|
abdec9dd0fe9367167f95964d44ddddc69c81e21
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,684
|
py
|
import scraperwiki
from bs4 import BeautifulSoup
search_page = "http://www.contractsfinder.businesslink.gov.uk/Search%20Contracts/Search%20Contracts%20Results.aspx?site=1000&lang=en&sc=3fc5e794-0cb4-4c10-be10-557f169c4c92&osc=db8f6f68-72d4-4204-8efb-57ceb4df1372&rb=1&ctlPageSize_pagesize=200&ctlPaging_page="
html = scraperwiki.scrape(search_page + "1")
soup = BeautifulSoup(html)
max = soup.find(id="resultsfound")
num = int(max.get_text().strip()[9:13])
if num % 200 != 0:
last_page = (num/200) + 1
else:
last_page = num/200
for n in range(1,last_page + 1):
html_all = scraperwiki.scrape(search_page + str(n))
soup_all = BeautifulSoup(html_all)
links = soup.find_all("a", "notice-title")
for link in links:
url = link["href"]
data = {"URL": url}
scraperwiki.sqlite.save(["URL"], data)import scraperwiki
from bs4 import BeautifulSoup
search_page = "http://www.contractsfinder.businesslink.gov.uk/Search%20Contracts/Search%20Contracts%20Results.aspx?site=1000&lang=en&sc=3fc5e794-0cb4-4c10-be10-557f169c4c92&osc=db8f6f68-72d4-4204-8efb-57ceb4df1372&rb=1&ctlPageSize_pagesize=200&ctlPaging_page="
html = scraperwiki.scrape(search_page + "1")
soup = BeautifulSoup(html)
max = soup.find(id="resultsfound")
num = int(max.get_text().strip()[9:13])
if num % 200 != 0:
last_page = (num/200) + 1
else:
last_page = num/200
for n in range(1,last_page + 1):
html_all = scraperwiki.scrape(search_page + str(n))
soup_all = BeautifulSoup(html_all)
links = soup.find_all("a", "notice-title")
for link in links:
url = link["href"]
data = {"URL": url}
scraperwiki.sqlite.save(["URL"], data)
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
aac3d35e784873fac9577160a091d457835358dd
|
33a50bb13812090a36257078522b798762978c66
|
/aliyun/api/rest/Ecs20130110DescribeImagesRequest.py
|
c9f3b5ab13260b722e7f491d8f89bb91cb26ef5e
|
[] |
no_license
|
aa3632840/quanlin
|
52ac862073608cd5b977769c14a7f6dcfb556678
|
2890d35fa87367d77e295009f2d911d4b9b56761
|
refs/heads/master
| 2021-01-10T22:05:14.076949
| 2014-10-25T02:28:15
| 2014-10-25T02:28:15
| 23,178,087
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
'''
Created by auto_sdk on 2014-09-08 16:48:02
'''
from aliyun.api.base import RestApi
class Ecs20130110DescribeImagesRequest(RestApi):
def __init__(self,domain='ecs.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.ImageId = None
self.ImageOwnerAlias = None
self.PageNumber = None
self.PageSize = None
self.RegionId = None
def getapiname(self):
return 'ecs.aliyuncs.com.DescribeImages.2013-01-10'
|
[
"262708239@qq.com"
] |
262708239@qq.com
|
5bfd032db6f096244473d05becfd8c7c24e371cc
|
53e29bb608afabf536fda6395113c5dece2a953b
|
/ArrangementThree.py
|
51e2d9c9b5f2e372fa259228f522882b4c7d6249
|
[] |
no_license
|
StrawberryFlavor/SmartContract
|
bcb7705243bd7603a25374f9498722450e05c3e2
|
dc1dc4e43ca07f17737b505ad9cad3e351e86965
|
refs/heads/master
| 2021-07-22T02:13:28.406874
| 2020-08-29T16:07:48
| 2020-08-29T16:07:48
| 211,664,030
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,961
|
py
|
Cversion = '1.0.0'
from hashgard.interop.System.Storage import Get, Put, PutArray, GetArray
from hashgard.interop.System.Runtime import GetTxSender, GetTime, GetRand, TimeFormat
from hashgard.vmop.Builtins import concat
from hashgard.libop.String import join, split, int, str
from hashgard.interop.System.Account import IsValid
from hashgard.interop.System.Bank import ContractAccAddressGet, ContractBalanceSend, ContractBalanceInject, BalanceOf
GARD_DENOM = 'agard'
GARD_FACTOR = 1000000000000000000
STAKE_AMOUNT = 200 * GARD_FACTOR # 单注金额
OWNER = "gard1457qyf93ljsh2ep00uc66djd3n95fljk7y492k"
KEY_GAME_PERIOD = "period" # 期数
KEY_GAME_TIME = "game_time" # 期数对应得时间戳
KEY_USER_ISSUE_MULTIPLE = "user_issue_multipe" # 用户指定期数购买的倍数
KEY_GAME_LIST = "period_list" # 期数列表
GMAE_START_TIME = 1572969600
KEY_GAME_TIME_FOR_A_ROUND = 300 # 5 分钟叫号一次
KEY_USER_STAKE = "user_stake" # 用户对期数的投注
KEY_USER_ISSUE = "user_issue" # 用户的投注期数
KEY_USER_WITHDRAW = "user_withdraw" # 用户兑奖信息
KEY_GAME_WITHDRAW_INFO = "game_withdraw_info" # 期数的兑奖信息
KEY_GAME_PRIZE = "game_prize" # 中奖号码
KEY_OWNER = OWNER
KEY_SYSTEM_POOL = "system_pool" # 系统奖池
KEY_STAKE_POOL = "stake_pool" # 用户奖池
def main(operation, args):
if operation == "owner":
return owner()
if operation == "contractAccount":
return contractAccount()
if operation == 'contractKeys':
return contract_keys()
if operation == "inject_system_pool":
if len(args) != 1:
raise Exception("缺少参数")
return inject_system_pool(args[0])
if operation == "withdraw_system_pool":
if len(args) != 1:
raise Exception("缺少参数")
return withdraw_system_pool(args[0])
if operation == "issue":
return issue()
if operation == "init":
return init()
if operation == "query_user_stake":
if len(args) != 2:
raise Exception("缺少参数")
return query_user_stake(args[0], args[1])
if operation == "query_user_stake_list":
if len(args) != 1:
raise Exception("缺少参数")
return query_user_stake_list(args[0])
if operation == "stake":
if len(args) != 3:
raise Exception("缺少参数")
return stake(args[0], args[1], args[2])
if operation == "if_stake":
return if_stake()
if operation == "query_period_prize_number":
if len(args) != 1:
raise Exception("缺少参数")
return query_period_prize_number(args[0])
if operation == "query_user_if_prize":
if len(args) != 2:
raise Exception("缺少参数")
return query_user_if_prize(args[0], args[1])
if operation == "query_user_withdarw":
if len(args) != 1:
raise Exception("缺少参数")
return query_user_withdraw(args[0])
if operation == "query_user_withdraw_issue":
if len(args) != 2:
raise Exception("缺少参数")
return query_user_withdraw_issue(args[0], args[1])
if operation == "withdraw":
if len(args) != 1:
raise Exception("缺少参数")
return withdraw(args[0])
if operation == "query_withdraw_info":
if len(args) != 1:
raise Exception("缺少参数")
return query_withdraw_info(args[0])
if operation == "syspool":
return syspool()
if operation == "syspool":
return syspool()
if operation == "stakepool":
return stakepool()
if operation == "get_period_generation":
return get_period_generation()
if operation == "query_period_list":
return query_period_list()
if operation == "query_user_stake_amount":
if len(args) != 1:
raise Exception("缺少参数")
return query_user_stake_amount(args[0])
if operation == "query_period_time":
if len(args) != 1:
raise Exception("缺少参数")
return query_period_time(args[0])
return False
def contract_keys():
return ["owner:string", "issue:string", "syspool:integer", "stakepool:integer", "contractAccount:string"]
def contractAccount():
return ContractAccAddressGet()
def owner():
return Get(KEY_OWNER)
def stakepool():
return Get(KEY_STAKE_POOL) # 查询用户奖池
def syspool():
return Get(KEY_SYSTEM_POOL) # 查询系统奖池
def inject_system_pool(amount): # 系统奖池注入
sender = GetTxSender()
system_amount = syspool()
if not system_amount:
Put(KEY_SYSTEM_POOL, amount)
else:
system_amount = system_amount + amount
Put(KEY_SYSTEM_POOL, system_amount)
ContractBalanceInject(sender, GARD_DENOM, amount)
return True
def withdraw_system_pool(amount): # 取回系统奖池额度
if GetTxSender() != Get(KEY_OWNER):
raise Exception("请使用合约 owner 地址调用")
if amount < 0:
raise Exception("请输入正确的金额")
balance_amount = syspool() - amount
if balance_amount < 0:
raise Exception("系统奖池余额不足")
ContractBalanceSend(Get(KEY_OWNER), GARD_DENOM, amount)
Put(KEY_SYSTEM_POOL, balance_amount)
return True
def inject_stake_pool(amount): # 用户奖池注入
sender = GetTxSender()
stake_amount = stakepool()
if not stake_amount:
Put(KEY_STAKE_POOL, amount)
else:
stake_amount = stake_amount + amount
Put(KEY_STAKE_POOL, stake_amount)
ContractBalanceInject(sender, GARD_DENOM, amount)
return True
def query_period_time(period): # 获取指定期数得时间戳
key = concat(KEY_GAME_TIME, period)
return Get(key)
def get_period_generation(): # 期数生成器
now_time = GetTime()
now_time = TimeFormat(now_time) # 格式 2019-09-25 17:15:30
time = split(now_time, " ") # 分成两个元素
time_01 = time[0] # 年月日
time_02 = time[1] # 分时秒
time_01 = split(time_01, "-")
time_02 = split(time_02, ":")
a = join("", time_01)
b = concat(time_02[0], time_02[1])
period = concat(a, b) # 拼接
return period
def issue(): # 当期期号
return Get(KEY_GAME_PERIOD)
def init():
if Get(KEY_OWNER):
raise Exception("已经初始化过")
time = GetTime()
Put(KEY_OWNER, OWNER) # Put 合约的 owenr
now_period = get_period_generation()
Put(KEY_GAME_PERIOD, now_period) # Put 最开始的期号
time_key = concat(KEY_GAME_TIME, now_period)
Put(time_key, time) # Put 最开始的期号时间戳
period_ls = [now_period]
PutArray(KEY_GAME_LIST, period_ls) # Put 期数列表
def query_period_list():
return GetArray(KEY_GAME_LIST)
def if_stake():
now_time = GetTime()
now_period = issue() # 当期
old_time = query_period_time(now_period)
if now_time - old_time > KEY_GAME_TIME_FOR_A_ROUND:
return True
return False
def query_user_stake(address, period): # 返回用户对于该期的投注详情,[投注方式(散,和),号码,金额]
if not IsValid(address):
raise Exception("地址格式错误")
key = concat(concat(KEY_USER_STAKE, address), period)
return GetArray(key) # 返回用户投注信息,列表
def query_user_stake_list(address): # 返回用户投注的期数列表
if not IsValid(address):
raise Exception("地址格式错误")
key = concat(KEY_USER_ISSUE, address)
return GetArray(key)
def stake(mode, multipe, number): # 用户投注
ls = []
now_time = GetTime()
for i in range(len(number)):
ls.append(int(number[i]))
if mode == 0: # 散值
for i in range(len(ls)):
if ls[i] > 6:
raise Exception("每个数字在1到6之间")
if mode == 1: # 和值
if int(number) < 3 or int(number) > 18:
raise Exception("最大的和值为18")
if multipe > 99 or multipe < 1:
raise Exception("不支持的倍数")
if if_stake():
num = draw_number() # 开启下一期前,开放上一期的中奖号码
draw_num_key = concat(KEY_GAME_PRIZE, issue())
Put(draw_num_key, num) # Put 该期的中奖号码
update_period = get_period_generation()
Put(KEY_GAME_PERIOD, update_period) # Put 新的期数
time_key = concat(KEY_GAME_TIME, update_period)
Put(time_key, now_time) # Put新期数的时间戳
period_list = []
ls = GetArray(KEY_GAME_LIST)
for i in range(len(ls)):
if len(ls) < 24:
period_list.append(ls[i])
if len(ls) >= 24 and i < 23:
period_list.append(ls[i + 1])
period_list.append(update_period)
PutArray(KEY_GAME_LIST, period_list) # Put 24个期数列表
sender = GetTxSender()
period = issue()
amount = multipe * STAKE_AMOUNT
if BalanceOf(sender, [GARD_DENOM])[0] < amount:
raise Exception("余额不足")
if len(query_user_stake(sender, period)) > 0:
raise Exception("当期已经购买过")
user_stake_key = concat(concat(KEY_USER_STAKE, sender), period)
if mode == 0: # 0 为散号
PutArray(user_stake_key, [str(mode), str(multipe), number, str(amount)])
if mode == 1: # 1 为和值
PutArray(user_stake_key, [str(mode), str(multipe), number, str(amount)])
user_stake_list = []
ls = query_user_stake_list(sender)
for i in range(len(ls)):
if len(ls) < 24:
user_stake_list.append(ls[i])
if len(ls) >= 24 and i < 23:
user_stake_list.append(ls[i + 1])
user_stake_list.append(period)
user_stake_list_key = concat(KEY_USER_ISSUE, sender) # Put 24个用户投注的期数列表
PutArray(user_stake_list_key, user_stake_list)
stake_all_amount = query_user_stake_amount(sender)
stake_amount_key = concat(KEY_USER_STAKE, sender)
if not stake_all_amount:
Put(stake_amount_key, amount)
else:
stake_all_amount = stake_all_amount + amount
Put(stake_amount_key, stake_all_amount) # Put 用户的总共投注金额
inject_stake_pool(amount)
return True
def draw_number(): # 开奖号码生成
number = []
for i in range(1, 7):
number.append(i)
result = []
rd = GetRand(3)
for i in range(len(rd)):
index = int(rd[i]) % len(number)
result.append(str(number[index]))
res = concat(concat(result[0], result[1]), result[2])
return res # 返回一个字符串
def query_period_prize_number(period): # 查询该期得中奖号码
key = concat(KEY_GAME_PRIZE, period)
return Get(key)
def sum_value_match(number): # 和值匹配规则
ls = []
for i in range(len(number)):
ls.append(int(number[i]))
sum = 0
for i in range(len(ls)):
sum = ls[i] + sum
amount = 0 # 应该获得的奖励
if sum == 4:
amount = 8000 * GARD_FACTOR
if sum == 5:
amount = 4000 * GARD_FACTOR
if sum == 6:
amount = 2500 * GARD_FACTOR
if sum == 7:
amount = 1600 * GARD_FACTOR
if sum == 8:
amount = 1200 * GARD_FACTOR
if sum == 9:
amount = 1000 * GARD_FACTOR
if sum == 10:
amount = 900 * GARD_FACTOR
if sum == 11:
amount = 900 * GARD_FACTOR
if sum == 12:
amount = 1000 * GARD_FACTOR
if sum == 13:
amount = 1200 * GARD_FACTOR
if sum == 14:
amount = 1600 * GARD_FACTOR
if sum == 15:
amount = 2500 * GARD_FACTOR
if sum == 16:
amount = 4000 * GARD_FACTOR
if sum == 17:
amount = 8000 * GARD_FACTOR
if sum == 18 or sum == 3:
amount = 24000 * GARD_FACTOR
return amount
def scatter_value_match(number): # 散值匹配规则
ls = []
for i in range(len(number)):
ls.append(int(number[i]))
for i in range(len(ls)):
same_frequency = 0
different_frequency = 0
for l in range(len(ls)):
if ls[i] == ls[l]:
same_frequency = same_frequency + 1
if ls[i] != ls[l]:
different_frequency = different_frequency + 1
if same_frequency == 3: # 3个同号
amount = 24000 * GARD_FACTOR
return amount
if same_frequency == 2: # 2 个同号
amount = 8000 * GARD_FACTOR
return amount
if different_frequency == 2: # 3个异号
amount = 4000 * GARD_FACTOR
return amount
def query_user_if_prize(address, period): # 查询用户指定的期数是否中奖
if not IsValid(address):
raise Exception("地址格式错误")
prize_number = query_period_prize_number(period) # 查询中奖号码
user_info = query_user_stake(address, period) # 查询用户得投注信息
user_numbers = user_info[2] # 用户的投注号码
mul = user_info[1] # 用户的倍数
if user_info[0] == "0" and prize_number == user_numbers: # 散号
amount = scatter_value_match(prize_number)
return amount * int(mul)
if user_info[0] == "1": # 和值
prize_sum = 0
for i in range(len(prize_number)):
prize_sum = int(prize_number[i]) + prize_sum
if prize_sum == int(user_numbers):
amount = sum_value_match(prize_number)
return amount * int(mul)
amount = 0
return amount
def query_user_withdraw(address): # 查询用户总共获取的奖励
if not IsValid(address):
raise Exception("地址格式错误")
key = concat(KEY_USER_WITHDRAW, address)
return Get(key)
def query_user_withdraw_issue(address, period): # 查询用户指定期数的获得奖励
if not IsValid(address):
raise Exception("地址格式错误")
key = concat(concat(KEY_USER_WITHDRAW, address), period)
return Get(key)
def withdraw(period): # 兑奖
sender = GetTxSender()
amount = query_user_if_prize(sender, period)
if amount <= 0:
raise Exception("用户没有中奖")
withdraw_amount = query_user_withdraw_issue(sender, period)
if withdraw_amount > 0:
raise Exception("用户已经兑过奖")
withdraw_user_amount = query_user_withdraw(sender)
user_withdraw_key = concat(KEY_USER_WITHDRAW, sender)
if not withdraw_user_amount:
Put(user_withdraw_key, amount)
else:
withdraw_user_amount = withdraw_user_amount + amount
Put(user_withdraw_key, withdraw_user_amount) # Put 用户总共获得奖励
user_withdraw_period_key = concat(concat(KEY_USER_WITHDRAW, sender), period)
Put(user_withdraw_period_key, amount) # Put 用户该期获得的奖励
sys_amount = syspool() - amount
Put(KEY_SYSTEM_POOL, sys_amount) # 更新系统奖池
withdraw_info_key = concat(KEY_GAME_WITHDRAW_INFO, period)
withdraw_list_info = query_withdraw_info(period)
withdraw_ls = []
if len(withdraw_list_info) > 0:
for i in range(len(withdraw_list_info)):
withdraw_ls.append(withdraw_list_info[i])
withdraw_ls.append(sender)
PutArray(withdraw_info_key, withdraw_ls) # 提交该期的总共兑奖人
ContractBalanceSend(sender, GARD_DENOM, amount) # 给用户转钱
return True
def query_withdraw_info(period): # 查询该期的兑奖人信息
withdraw_info_key = concat(KEY_GAME_WITHDRAW_INFO, period)
return GetArray(withdraw_info_key)
def query_user_stake_amount(address): # 查询用户总共投注的钱
if not IsValid(address):
raise Exception("地址格式错误")
key = concat(KEY_USER_STAKE, address)
return Get(key)
|
[
"w.he@hashgard.com"
] |
w.he@hashgard.com
|
bdf8366f56067eeaa3cb91a176054b81a24d653e
|
de4b0eca24faec44bb03b66c53770e417ad84410
|
/custom_LSTM.py
|
4b656b23c27ff051803574f88450937e7ff6e06a
|
[] |
no_license
|
MarySherry/AML
|
5718dfe4a0c876dcd3bafd19e2e0fc3ddd73fd5e
|
2909dfe6230230a2035a5e5d7140ee23470c236d
|
refs/heads/master
| 2020-09-29T20:47:07.611669
| 2019-12-10T13:01:42
| 2019-12-10T13:01:42
| 227,119,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,246
|
py
|
import tensorflow as tf
import pandas
import numpy as np
from sklearn.metrics import accuracy_score
import sys
# Set parameters
letter_embedding_size = 5
lstm_hidden_size = 38
epochs = 100
minibatch_size = 256
# Load data
p_train_data = pandas.read_csv("train_eng.csv")
p_test_data = pandas.read_csv("test_eng.csv")
# Convert data to numpy arrays
train = p_train_data.to_numpy()
test = p_test_data.to_numpy()
# Sort by name length
# np.random.shuffle(train)
train = np.stack(sorted(list(train), key=lambda x: len(x[0])))
def transform_data(data, max_len):
"""
Transform the data into machine readable format. Substitute character with
letter ids, replace gender according to the mapping M->0, F->1
:param data: ndarray where first column is names, and the second is gender
:param max_len: maximum length of a name
:return: names, labels, vocab
where
- names: ndarray with shape [?,max_len]
- labels: ndarray with shape [?,1]
- vocab: dictionary with mapping from letters to integer IDs
"""
unique = list(set("".join(data[:,0])))
unique.sort()
vocab = dict(zip(unique, range(1,len(unique)+1))) # start from 1 for zero padding
classes = list(set(data[:,1]))
classes.sort()
class_map = dict(zip(classes, range(len(unique))))
names = list(data[:,0])
labels = list(data[:,1])
def transform_name(name):
point = np.zeros((1, max_len), dtype=int)
name_mapped = np.array(list(map(lambda l: vocab[l], name)))
point[0,0: len(name_mapped)] = name_mapped
return point
transform_label = lambda lbl: np.array([[class_map[lbl]]])
names = list(map(transform_name, names))
labels = list(map(transform_label, labels))
names = np.concatenate(names, axis=0)
labels = np.concatenate(labels, axis=0)
return names, labels, vocab
def get_minibatches(names, labels, mb_size):
"""
Split data in minibatches
:param names: ndarray of shape [?, max_name_len]
:param labels: ndarray of shape [?, 1]
:param mb_size: minibatch size
:return: list of batches
"""
batches = []
position = 0
while position + mb_size < len(labels):
batches.append((names[position: position + mb_size], labels[position: position + mb_size]))
position += mb_size
batches.append((names[position:], labels[position:]))
return batches
# Find longest name length
max_len = p_train_data['Name'].str.len().max()
train_data, train_labels, voc = transform_data(train, max_len)
test_data, test_labels, _ = transform_data(test, max_len)
batches = get_minibatches(train_data, train_labels, minibatch_size)
def LSTM_model(emb_size, vocab_size, lstm_hidden_size, T, learning_rate=0.001):
with tf.name_scope('LSTM_model'):
pad_vector = tf.zeros(shape=(1, emb_size), dtype=tf.float32, name="zero_padding")
symbol_embedding = tf.get_variable('symbol_embeddings_lstm_custom', shape=(vocab_size, emb_size), dtype=tf.float32)
symbol_embedding = tf.concat([pad_vector, symbol_embedding], axis=0)
input_ = tf.placeholder(shape=[None, T], dtype=tf.int32)
labels_ = tf.placeholder(shape=[None, 1], dtype=tf.float32)
embedded = tf.nn.embedding_lookup(symbol_embedding, input_)
lstm = tf.nn.rnn_cell.LSTMCell(lstm_hidden_size)
outputs, _ = tf.nn.dynamic_rnn(cell=lstm, inputs=embedded, dtype=tf.float32)
output = outputs[:, -1, :]
output = tf.layers.dropout(output, 0.2)
logits = tf.layers.dense(output, 1)
classify = tf.nn.sigmoid(logits)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels_), axis=0)
#train = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(loss)
train = tf.contrib.opt.LazyAdamOptimizer(learning_rate).minimize(loss)
print("trainable parameters:", np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
return {
'train': train,
'input': input_,
'labels': labels_,
'loss': loss,
'classify': classify
}
accuracies_lstm_custom = []
def run_LSTM_model():
terminals = LSTM_model(letter_embedding_size, len(voc), lstm_hidden_size, max_len)
train_ = terminals['train']
input_ = terminals['input']
labels_ = terminals['labels']
loss_ = terminals['loss']
classify_ = terminals['classify']
def evaluate(tf_session, tf_loss, tf_classify, data, labels):
"""
Evaluate loss and accuracy on a single minibatch
:param tf_session: current opened session
:param tf_loss: tensor for calculating loss
:param tf_classify: tensor for calculating sigmoid activations
:param data: data from the current batch
:param labels: labels from the current batch
:return: loss_value, accuracy_value
"""
loss_val, predict = tf_session.run([tf_loss, tf_classify], {
input_: data,
labels_: labels
})
acc_val = accuracy_score(labels, np.where(predict > 0.5, 1, 0))
return loss_val, acc_val
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for batch in batches:
names, labels = batch
sess.run([train_], {
input_: names,
labels_: labels
})
# Performance on the first training batch
# but the first batch contains only the shortest names
# comparing different batches can be used to see how zero paddign affects the performance
names, labels = batches[0]
train_loss, train_acc = evaluate(sess, loss_, classify_, names, labels)
# Performance on the test set
test_loss, test_acc = evaluate(sess, loss_, classify_, test_data, test_labels)
accuracies_lstm_custom.append(test_acc)
print("Epoch %d, train loss %.5f, train acc %.5f, test loss %.5f, test accuracy %.5f" % (
e, train_loss, train_acc, test_loss, test_acc))
run_LSTM_model()
|
[
"noreply@github.com"
] |
MarySherry.noreply@github.com
|
86e46441a8a60ee107f08dd91c85d958dd07d7dc
|
8ff2c3bf95ad793680854e012c1b2a0a47c6b053
|
/.ipynb_checkpoints/game_main_train-checkpoint.py
|
bc7aebbdccbcfb8102f5e912c89bc57a095691de
|
[] |
no_license
|
zlxzgtc/majiangAI
|
9fa61484b0fb1414418b587a9b7b38ee8bb8d57b
|
df73484ba43fa6365c5f3dab4a3923ac46468491
|
refs/heads/main
| 2023-02-15T09:26:26.201169
| 2021-01-17T15:27:38
| 2021-01-17T15:27:38
| 329,600,513
| 4
| 1
| null | 2021-01-14T13:43:14
| 2021-01-14T11:53:09
|
Python
|
UTF-8
|
Python
| false
| false
| 7,592
|
py
|
from random import shuffle
from copy import deepcopy
import numpy as np
import utils
import player
import game_table
import hu_judge
class Game():
def __init__(self, players=['ai', 'computer', 'computer', 'computer'], banker=0, round=1):
self.finished = False
self.players = []
for i in range(4):
self.players.append(player.Player(i, players[i]))
self.banker = banker # 庄家
self.round = round # 游戏轮数
self.hu_score = 10 # 胡牌得分,庄家三倍
self.hu_id = -1 # -1表示没人胡
self.now_turn = banker
self.no_hu = False
def start(self):
for i in range(self.round):
s = ""
s = s + "--------------------第" + str(i + 1) + "局--------------------\n"
self.game_table = game_table.Gametable(False) # 创建牌堆
self.now_turn = (self.banker + i) % 4
self.banker = (self.banker + i) % 4
self.no_hu = False
for k in range(4): # 玩家上局手牌清空
self.players[k].game_init()
for k in range(0, 16):
for j in range(4):
self.players[j].add_tiles(self.game_table.give_pile())
# for k in range(4):
# print(k, ' 的牌:', utils.get_Tiles_names(self.players[k].tiles))
self.play()
# s += "--------------------游戏结束--------------------\n"
s += self.print_score() + "\n"
# print(s)
f = "train.txt"
with open(f, "a") as file:
file.write(s)
def play(self, banker=0):
k = 0
self.finished = False
while not self.finished: # 游戏未结束,四名玩家轮流出牌
k += 1
# print("当前----第", k, "轮")
if k != 1: # 除了第一个人打出的牌,其余都要判断是否能吃
c = self.player_think_eat(last_tile)
if c != 3:
last_tile = self.player_think_out()
self.next_player()
continue
if self.mo(self.now_turn) == -1: # 摸牌
self.no_hu = True
# print("流局")
break
if self.player_think_hu():
break
last_tile = self.player_think_out()
self.game_table.put_pile(last_tile)
self.next_player()
if self.others_think_hu(last_tile):
break
pong_id = self.others_think_pong(last_tile)
if pong_id != -1:
self.next_player(pong_id % 4)
last_tile = self.player_think_out()
self.next_player()
# print("当前打出的牌:", utils.get_Cnt_names(self.gametable.out_pile))
# 游戏结束计算得分
self.count_score()
self.print_score()
ai_player = self.players[0]
ai_player.train(ai_player.old_out_env, ai_player.last_act, self.env(0), True, 0)
if ai_player.last_act_eat != -1:
ai_player.train(ai_player.old_eat_env, ai_player.last_act_eat, self.env(0), True, 1)
ai_player.last_act_eat = -1
if ai_player.last_act_pong != -1:
ai_player.train(ai_player.old_pong_env, ai_player.last_act_pong, self.env(0), True, 2)
ai_player.last_act_pong = -1
def env(self, id):
my_tiles = utils.get_cnt(self.players[id].tiles)
out_tiles = self.game_table.out_pile
my_eat_pong = np.add(utils.get_cnt(self.players[id].eat_tiles), utils.get_cnt(self.players[id].pong_tiles))
others_eat_pong = []
other_info = [] # 所有玩家手牌数量 4 剩余牌数量 1 向听数 1
for i in range(4):
other_info.append(len(self.players[i].tiles))
if i != id:
others_eat_pong = np.hstack((others_eat_pong, np.add(utils.get_cnt(self.players[i].eat_tiles),
utils.get_cnt(self.players[i].pong_tiles))))
other_info.append(len(self.game_table.Tiles))
other_info.append(self.players[id].hu_dis)
return np.hstack((my_tiles, out_tiles, my_eat_pong, others_eat_pong, other_info))
def count_score(self):
if self.no_hu: # 流局不计算得分
return
for player in self.players:
if player.id == self.hu_id:
player.score += self.hu_score * 3
else:
player.score -= self.hu_score
def print_score(self):
s = ""
for player in self.players:
s += " 玩家" + str(player.id) + " " + str(player.score)
return s
def next_player(self, next_id=-1):
if next_id == -1:
self.now_turn = (self.now_turn + 1) % 4
else:
self.now_turn = next_id % 4
def mo(self, player_id):
t = self.game_table.give_pile()
if t == -1: # 无牌可摸,流局
self.finished = True
else:
self.players[player_id].add_tiles(t)
if self.players[player_id].type == 'ai':
self.players[player_id].last_act = 0
return t
def player_think_eat(self, last_tile):
if self.players[self.now_turn].type == 'ai':
t = self.players[self.now_turn].think_eat(last_tile, self.env(self.now_turn))
if t != 3:
self.players[self.now_turn].last_act = 1
else:
t = self.players[self.now_turn].think_eat(last_tile)
return t
def player_think_out(self):
if self.players[self.now_turn].type == 'ai':
t = self.players[self.now_turn].out_tiles(env=self.env(self.now_turn))
else:
t = self.players[self.now_turn].out_tiles()
return t
def player_think_hu(self): # 玩家是否自摸胡
self.players[self.now_turn].hu_dis = hu_judge.hu_distance(self.players[self.now_turn].tiles)
if self.players[self.now_turn].hu_dis == 0: # 判断是否胡
# print("玩家" + str(self.now_turn) + "自摸胡了:" + utils.get_Tiles_names(self.players[self.now_turn].tiles))
self.hu_id = self.now_turn
self.finished = True
return self.finished
def others_think_hu(self, last_tile):
for j in range(3):
# 首先判断有没有人胡这张牌
if hu_judge.hu_distance(self.players[(self.now_turn + j) % 4].tiles, last_tile) == 0:
# print("玩家" + str((self.now_turn + j) % 4) + "胡了:" + utils.get_Tiles_names(
# self.players[(self.now_turn + j) % 4].tiles) + utils.get_tile_name(last_tile))
self.finished = True
self.hu_id = (self.now_turn + j) % 4
return True
return False
# 然后判断是否有人碰 ,返回碰的人的id,无则返回-1
def others_think_pong(self, last_tile):
pong_id = -1
for j in range(3):
player = self.players[(self.now_turn + j) % 4]
if player.type == 'ai':
c = player.think_pong(last_tile, env=self.env(self.now_turn))
if c == 1:
self.players[self.now_turn].last_act = 2
else:
c = player.think_pong(last_tile)
if c != 0:
pong_id = (self.now_turn + j) % 4
return pong_id
game = Game(round=1000)
game.start()
|
[
"2780311834@qq.com"
] |
2780311834@qq.com
|
8b59d398d6fc73d704bba06861f961b7c8dcd1d7
|
d051f3fe9fda31b72fa0ddce67aa1f4293c7c37c
|
/models/statespace/transient.py
|
c7f1cd6b2d9d15a35f8331140a415813ecf897ea
|
[
"BSD-3-Clause"
] |
permissive
|
davmre/sigvisa
|
4e535215b6623310d8f5da64258f6fa9a378f9fd
|
91a1f163b8f3a258dfb78d88a07f2a11da41bd04
|
refs/heads/master
| 2021-03-24T10:24:52.307389
| 2018-01-05T19:33:23
| 2018-01-05T19:33:23
| 2,321,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,891
|
py
|
import numpy as np
import scipy.stats
import copy
from sigvisa.models.statespace import StateSpaceModel
class TransientCombinedSSM(StateSpaceModel):
"""
State space model consisting of a bunch of submodels that come and go, each model
being active for a given interval of time. The observation is taken to be
a (scalar) linear combination of the outputs of whatever submodels are currently
active.
"""
def __init__(self, components, obs_noise=0.0):
"""
components: list of tuples (ssm, start_idx, npts, scale), where
ssm: StateSpaceModel object
start_idx: the first timestep at which this ssm should be active.
npts: number of timesteps for which this ssm should be active.
scale: either None, or an array of length >npts containing a scale factor for each timestep.
"""
# save basic info
self.n_ssms = len(components)
self.obs_noise = obs_noise
self.ssms = []
self.ssm_starts = []
self.ssm_ends = []
self.scales = []
for (ssm, start_idx, npts, scale) in components:
if scale is not None:
assert(len(scale) >= npts)
self.ssms.append(ssm)
self.ssm_starts.append(start_idx)
self.ssm_ends.append(start_idx+npts)
self.scales.append(scale)
# compute a list of changepoints, with the set of ssms active at each point
self.changepoints = []
self.active_sets = []
starts = [(st, i, True) for (i, st) in enumerate(self.ssm_starts)]
ends = [(et, i, False) for (i, et) in enumerate(self.ssm_ends)]
events = sorted(starts+ends)
active_set = [ ]
t_prev = events[0][0]
for (t, i_ssm, start) in events:
if t != t_prev:
self.changepoints.append(t_prev)
self.active_sets.append(copy.copy(active_set))
t_prev = t
if start:
active_set.append(i_ssm)
else:
active_set.remove(i_ssm)
self.changepoints.append(t)
self.active_sets.append(active_set)
self.changepoints = np.array(self.changepoints, dtype=int)
# compute dimension of each active set to find the max dimension
self.max_dimension = np.max([int(np.sum([self.ssms[i].max_dimension for i in s])) for s in self.active_sets])
# set up caches and tmp arrays
self.active_ssm_cache1_k = -1
self.active_ssm_cache1_v = []
self.active_ssm_cache2_k = -1
self.active_ssm_cache2_v = []
self.ssm_tmp = np.empty((self.n_ssms,), dtype=int)
# storage for arbitrary temp arrays. Keys are array sizes (so
# you can just get a tmp array of the required size).
self.tmp_arrays = dict()
def active_ssms(self, k):
"""Return the (integer) indices of the ssms active at the given
timestep. It caches the previous two calls, so assuming the
filter is running in sequence, timesteps k and k-1 should
almost always be in cache.
When the cache misses, we perform a binary search on the list
of changepoints, so this is approximately O(log n) in the total
number of ssms. It's probably possible to do better than this...
"""
if k == self.active_ssm_cache1_k:
return self.active_ssm_cache1_v
elif k == self.active_ssm_cache2_k:
return self.active_ssm_cache2_v
i = np.searchsorted(self.changepoints, k, side="right")-1
active_ssms = self.active_sets[i]
self.active_ssm_cache2_k = self.active_ssm_cache1_k
self.active_ssm_cache2_v = self.active_ssm_cache1_v
self.active_ssm_cache1_k = k
self.active_ssm_cache1_v = active_ssms
return active_ssms
def apply_transition_matrix(self, x, k, x_new):
# first, loop over ssms active at the *previous*
# timestep in order to cache the location of each
# ssm in the previous state space.
j = 0
old_ssm_indices = self.active_ssms(k-1)
for i_ssm in old_ssm_indices:
ssm = self.ssms[i_ssm]
state_size = ssm.max_dimension
self.ssm_tmp[i_ssm] = j
j += state_size
# now apply the transition to the current time
i = 0
ssm_indices = self.active_ssms(k)
for i_ssm in ssm_indices:
ssm = self.ssms[i_ssm]
state_size = ssm.max_dimension
if self.ssm_starts[i_ssm] == k:
# new ssms just get filled in as zero
# (prior means will be added by the
# transition_bias operator)
x_new[i:i+state_size] = 0.0
#print " new ssm %d active from %d to %d" % (i_ssm, i, i+state_size)
else:
# this ssm is persisting from the
# previous timestep, so just run the
# transition
j = self.ssm_tmp[i_ssm]
ssm.apply_transition_matrix(x[j:j+state_size], k-self.ssm_starts[i_ssm], x_new[i:i+state_size])
#print " transitioning ssm %d, prev %d, in state %d to %d (sidx %d eidx %d)" % (i_ssm, j, i, i+state_size, self.ssm_starts[i_ssm], self.ssm_ends[i_ssm])
i += state_size
return i
def transition_bias(self, k, x):
i = 0
ssm_indices = self.active_ssms(k)
for j in ssm_indices:
ssm = self.ssms[j]
state_size = ssm.max_dimension
if self.ssm_starts[j] == k:
x[i:i+state_size] += ssm.prior_mean()
else:
ssm.transition_bias(k-self.ssm_starts[j], x[i:i+state_size])
i += state_size
def transition_noise_diag(self, k, noise):
i = 0
ssm_indices = self.active_ssms(k)
for j in ssm_indices:
ssm = self.ssms[j]
state_size = ssm.max_dimension
if self.ssm_starts[j] == k:
noise[i:i+state_size] = ssm.prior_vars()
else:
ssm.transition_noise_diag(k-self.ssm_starts[j], noise[i:i+state_size])
i += state_size
def apply_observation_matrix(self, x, k, result=None):
"""
We define the observation for the combined SSM as a (weighted) sum of the
observations from the currently active components.
"""
i = 0
# vector case
ssm_indices = self.active_ssms(k)
if len(x.shape)==1:
r = 0
for j in ssm_indices:
ssm, scale = self.ssms[j], self.scales[j]
state_size = ssm.max_dimension
ri = ssm.apply_observation_matrix(x[i:i+state_size], k-self.ssm_starts[j])
if scale is not None:
r += ri * scale[k-self.ssm_starts[j]]
else:
r += ri
i += state_size
return r
# matrix case
else:
assert(len(x.shape)==2)
try:
rr = self.tmp_arrays[len(result)]
except KeyError:
rr = np.empty((len(result),))
self.tmp_arrays[len(result)] = rr
result[:] = 0
for j in ssm_indices:
ssm, scale = self.ssms[j], self.scales[j]
state_size = ssm.max_dimension
ssm.apply_observation_matrix(x[i:i+state_size,:], k-self.ssm_starts[j], rr)
#print "TSSM step %d applying obs matrix on ssm %d state_size %d n %d scale %f result[0] %f\n" % (k, j, state_size, len(result), scale[k-self.ssm_starts[j]] if scale is not None else 1.0, rr[0])
if scale is not None:
rr *= scale[k-self.ssm_starts[j]]
result += rr
i += state_size
def obs_vector_debug(self, k):
H = np.zeros((self.max_dimension,))
ssm_indices = self.active_ssms(k)
i = 0
for j in ssm_indices:
v = self.ssms[j].obs_vector_debug(k-self.ssm_starts[j])
if self.scales[j] is not None:
v *= self.scales[j][k-self.ssm_starts[j]]
H[i:i+len(v)] = v
i += len(v)
return H
def observation_bias(self, k):
bias = 0.0
ssm_indices = self.active_ssms(k)
for j in ssm_indices:
kk = k-self.ssm_starts[j]
b = self.ssms[j].observation_bias(kk)
if self.scales[j] is not None:
b *= self.scales[j][kk]
bias += b
return bias
def observation_noise(self, k):
return self.obs_noise
def stationary(self, k):
"""
The combined model is stationary as long as *all* active
models are stationary, the set of active models hasn't changed,
and there are no scaling factors active (since scaling factors
are nonstationary in general).
"""
s1 = self.active_ssms(k)
if k > 0:
s2 = self.active_ssms(k-1)
if s2 != s1:
return False
for j in s1:
if self.scales[j] is not None:
return False
if not self.ssms[j].stationary(k-self.ssm_starts[j]):
return False
return True
def component_means(self, z):
"""
Given an observed signal, decompose it into component signals
corresponding to the state space models. The return value
"means" is a list, indexed in the same way as the component
ssms, in which each entry is an array, of length equal to the
activation time for that component, containing the mean
observations from the (filtered) mean states of that
component.
"""
# pre-allocate space by initializing each component to its prior mean.
# This is only really relevant if a component starts before time 0, in
# which case those unobserved timesteps will never be updated and so
# will remain at the prior mean.
means = []
for i in range(self.n_ssms):
means.append(self.ssms[i].mean_obs(self.ssm_ends[i]-self.ssm_starts[i]))
# run the Kalman filter, and compute the observation generated by each
# ssm at each timestep.
for k, (x, U, d) in enumerate(self.filtered_states(z)):
ssm_indices = self.active_ssms(k)
i=0
for j in ssm_indices:
ssm = self.ssms[j]
state_size = ssm.max_dimension
ix = k-self.ssm_starts[j]
means[j][ix] = ssm.apply_observation_matrix(x[i:i+state_size], ix)
means[j][ix] += ssm.observation_bias(ix)
i += state_size
return means
def prior_mean(self):
"""
The prior mean of the combined model is just the concatenation
of prior mean for all submodels active at step 0. In the
special case that a model's start time is negative, we
propagate its prior mean through the transition model for
the appropriate number of steps to get the (exact) prior mean at stepx 0.
"""
priors = []
for i in self.active_ssms(0):
ssm = self.ssms[i]
prior = ssm.prior_mean()
if self.ssm_starts[i] < 0:
p2 = prior.copy()
for k in range(-self.ssm_starts[i]):
state_size = ssm.apply_transition_matrix(p2, k+1, prior)
ssm.transition_bias(k, prior)
p2 = prior
priors.append(prior)
return np.concatenate(priors)
def prior_vars(self):
"""
The prior variance of the combined model is just the concatenation
of prior variances for all submodels active at step 0. In the
special case that a model's start time is negative, we
propagate its prior variance through the transition model for
the appropriate number of steps to get an (approximate)
diagonal variance at step 0.
"""
priors = []
for i in self.active_ssms(0):
ssm = self.ssms[i]
prior = ssm.prior_vars()
if self.ssm_starts[i] < 0:
P = np.diag(prior)
P2 = P.copy()
for k in range(-self.ssm_starts[i]):
ssm.transition_covariance(P2, k+1, P)
ssm.transition_noise_diag(k+1, prior)
np.fill_diagonal(P, np.diag(P) + prior)
P2 = P
# since the interface only supports independent
# priors, return a diagonal approximation of the true
# prior
prior = np.diag(P)
priors.append(prior)
return np.concatenate(priors)
def component_state_indices(self, k, component_idx):
ssms = self.active_ssms(k)
i = 0
for ssm in ssms:
next_i = i + self.ssms[ssm].max_dimension
if ssm == component_idx:
return i, next_i
i = next_i
raise ValueError("component %d is not active at timestep %d" % (component_idx, k))
def filtered_cssm_coef_marginals(self, z, component_idx):
# return the marginal means and variances on the basis
# coefficients for a CompactSupportSSM component.
ssm = self.ssms[component_idx]
start = self.ssm_starts[component_idx]
end = self.ssm_ends[component_idx]
coef_means = np.empty((ssm.n_basis,))
coef_vars = np.empty((ssm.n_basis,))
for k, (x, U, d) in enumerate(self.filtered_states(z)):
if k < start: continue
if k >= end: break
i1, i2 = self.component_state_indices(k, component_idx)
P = np.dot(d*U, U.T)
ssm.extract_coefs(x[i1:i2], P[i1:i2,i1:i2], k-start, coef_means, coef_vars)
return coef_means, coef_vars
def all_filtered_cssm_coef_marginals(self, z):
cssms = []
marginals = dict()
for i, ssm in enumerate(self.ssms):
if "extract_coefs" in dir(ssm):
cssms.append(i)
# initialize marginals to the prior
marginals[i] = (ssm.coef_means.copy(), ssm.coef_vars.copy() )
for k, (x, U, d) in enumerate(self.filtered_states(z)):
for i in cssms:
ssm = self.ssms[i]
start = self.ssm_starts[i]
end = self.ssm_ends[i]
if k < start or k >= end: continue
i1, i2 = self.component_state_indices(k, i)
P = np.dot(d*U, U.T)
ssm.extract_coefs(x[i1:i2], P[i1:i2,i1:i2], k-start, marginals[i][0], marginals[i][1])
return marginals
|
[
"dmoore@cs.berkeley.edu"
] |
dmoore@cs.berkeley.edu
|
04a05adbe948f7e5ed39e3b098955fa05f5a068e
|
c33ec6f52ef4ecb174c07765b6e8a8548f29daec
|
/Code/manage.py
|
4a49a92ed28826b19bfd75f1b85c31ab5f46e1ea
|
[] |
no_license
|
mayerll/design
|
ebfc449fc787dfcef19bd29d1857411ec888a48e
|
1b489827c7e917d6e0e6be96cf59a0b2ae4a3d32
|
refs/heads/main
| 2023-08-04T02:18:05.799487
| 2021-10-01T01:23:31
| 2021-10-01T01:23:31
| 412,280,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
print(sys.path)
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cloud.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"root@iMac-od-Bopeng.local"
] |
root@iMac-od-Bopeng.local
|
f832a1d9ccc528d16cb002adc2534143cc56f5bc
|
c839961aeab22795200d9edef9ba043fe42eeb9c
|
/data/script1191.py
|
176b0a9cf6075a6a1703f257277e6e864d61d8c6
|
[] |
no_license
|
StevenLOL/kaggleScape
|
ad2bb1e2ed31794f1ae3c4310713ead1482ffd52
|
18bede8420ab8d2e4e7c1eaf6f63280e20cccb97
|
refs/heads/master
| 2020-03-17T05:12:13.459603
| 2018-05-02T19:35:55
| 2018-05-02T19:35:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,614
|
py
|
# coding: utf-8
# I found kagglegym_emulation to be very helpfull (https://www.kaggle.com/slothouber/two-sigma-financial-modeling/kagglegym-emulation). What this script does is validating it against the actual kagglegym. I used some snippets from this script https://www.kaggle.com/sankhamukherjee/two-sigma-financial-modeling/prediction-model-elastic-net.
#
# Vote up if you find it meaningful :)
# In[ ]:
import pandas as pd
import numpy as np
from sklearn.metrics import r2_score
from sklearn.linear_model import ElasticNetCV
import kagglegym
import math
# In[ ]:
# kagglegym_emulation code
def r_score(y_true, y_pred):
r2 = r2_score(y_true, y_pred)
r = np.sign(r2) * np.sqrt(np.abs(r2))
return max(-1, r)
class Observation(object):
def __init__(self, train, target, features):
self.train = train
self.target = target
self.features = features
class Environment(object):
def __init__(self):
with pd.HDFStore("../input/train.h5", "r") as hfdata:
self.timestamp = 0
fullset = hfdata.get("train")
self.unique_timestamp = fullset["timestamp"].unique()
# Get a list of unique timestamps
# use the first half for training and
# the second half for the test set
n = len(self.unique_timestamp)
i = int(n/2)
timesplit = self.unique_timestamp[i]
self.n = n
self.unique_idx = i
self.train = fullset[fullset.timestamp < timesplit]
self.test = fullset[fullset.timestamp >= timesplit]
# Needed to compute final score
self.full = self.test.loc[:, ['timestamp', 'y']]
self.full['y_hat'] = 0.0
self.temp_test_y = None
def reset(self):
timesplit = self.unique_timestamp[self.unique_idx]
self.unique_idx = int(self.n / 2)
self.unique_idx += 1
subset = self.test[self.test.timestamp == timesplit]
# reset index to conform to how kagglegym works
target = subset.loc[:, ['id', 'y']].reset_index(drop=True)
self.temp_test_y = target['y']
target.loc[:, 'y'] = 0.0 # set the prediction column to zero
# changed bounds to 0:110 from 1:111 to mimic the behavior
# of api for feature
features = subset.iloc[:, :110].reset_index(drop=True)
observation = Observation(self.train, target, features)
return observation
def step(self, target):
timesplit = self.unique_timestamp[self.unique_idx-1]
# Since full and target have a different index we need
# to do a _values trick here to get the assignment working
y_hat = target.loc[:, ['y']]
self.full.loc[self.full.timestamp == timesplit, ['y_hat']] = y_hat._values
if self.unique_idx == self.n:
done = True
observation = None
reward = r_score(self.temp_test_y, target.loc[:, 'y'])
score = r_score(self.full['y'], self.full['y_hat'])
info = {'public_score': score}
else:
reward = r_score(self.temp_test_y, target.loc[:, 'y'])
done = False
info = {}
timesplit = self.unique_timestamp[self.unique_idx]
self.unique_idx += 1
subset = self.test[self.test.timestamp == timesplit]
# reset index to conform to how kagglegym works
target = subset.loc[:, ['id', 'y']].reset_index(drop=True)
self.temp_test_y = target['y']
# set the prediction column to zero
target.loc[:, 'y'] = 0
# column bound change on the subset
# reset index to conform to how kagglegym works
features = subset.iloc[:, 0:110].reset_index(drop=True)
observation = Observation(self.train, target, features)
return observation, reward, done, info
def __str__(self):
return "Environment()"
def make():
return Environment()
# In[ ]:
# predictive model wrapper, also see https://www.kaggle.com/sankhamukherjee/two-sigma-financial-modeling/prediction-model-elastic-net
class fitModel():
def __init__(self, model, train, columns):
# first save the model ...
self.model = model
self.columns = columns
# Get the X, and y values,
y = np.array(train.y)
X = train[columns]
self.xMeans = X.mean(axis=0) # Remember to save this value
self.xStd = X.std(axis=0) # Remember to save this value
X = np.array(X.fillna( self.xMeans ))
X = (X - np.array(self.xMeans))/np.array(self.xStd)
# fit the model
self.model.fit(X, y)
return
def predict(self, features):
X = features[self.columns]
X = np.array(X.fillna( self.xMeans ))
X = (X - np.array(self.xMeans))/np.array(self.xStd)
return self.model.predict(X)
# In[ ]:
def list_match(list_a, list_b):
for i, j in zip(list_a, list_b):
if i != j:
return False
return True
# In[ ]:
# Validaiton of kagglegym_emulation
env = kagglegym.make()
env_test = make()
# Check observations
observation = env.reset()
observation_test = env_test.reset()
assert list_match(observation.train.id.values, observation_test.train.id.values)
elastic_net = ElasticNetCV()
columns = ['technical_30', 'technical_20', 'fundamental_11', 'technical_19']
model = fitModel(elastic_net, observation.train.copy(), columns)
model_test = fitModel(elastic_net, observation_test.train.copy(), columns)
while True:
prediction = model.predict(observation.features.copy())
prediction_test = model_test.predict(observation_test.features.copy())
assert list_match(prediction, prediction_test)
target = observation.target
target_test = observation_test.target
target['y'] = prediction
target_test['y'] = prediction_test
timestamp = observation.features["timestamp"][0]
if timestamp % 100 == 0:
print(timestamp)
observation, reward, done, info = env.step(target)
observation_test, reward_test, done_test, info_test = env_test.step(target)
assert done == done_test
assert math.isclose(reward, reward_test, abs_tol=5e-05)
if done:
assert math.isclose(info['public_score'],info_test['public_score'], abs_tol=1e-07)
print('Info:',info['public_score'],'Info-test:',info_test['public_score'])
break
# **VALIDATED SUCCESSFULLY !!!**
|
[
"adithyagirish@berkeley.edu"
] |
adithyagirish@berkeley.edu
|
761cfa12dd0b5e6365b02d4330959c319cd91793
|
09f2850b1fc2d2572a9619a0bcb2f9ac63f7c4a5
|
/stock/common/monkey_patch.py
|
428f450a29a67bc4b0c3aef6c7a3f9ed6f403224
|
[] |
no_license
|
lijielife/stock-59
|
5cf34ae090997dfe211b9b4764a44fc009baf735
|
2e87f8636784b079fd236dcc3c7f5a0f937e259a
|
refs/heads/master
| 2020-07-12T15:09:31.063580
| 2015-10-03T14:37:35
| 2015-10-03T14:37:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : lhr (airhenry@gmail.com)
# @Link : http://about.me/air.henry
def as_method_of(cls):
def as_method_of_cls(func):
setattr(cls,func.__name__,func)
return as_method_of_cls
def as_staticmethod_of(cls):
def as_method_of_cls(func):
setattr(cls,func.__name__,staticmethod(func))
return as_method_of_cls
if __name__ == '__main__':
pass
|
[
"airhenry@gmail.com"
] |
airhenry@gmail.com
|
a609024657e14b3862f7977456f4092abd104bdc
|
a6b954681dea98853ad38b21193b6ab3cbb06c30
|
/ticTacToe/migrations/0001_initial.py
|
443393eb77eed7894e0682c2ee1eabbc54b6a68f
|
[] |
no_license
|
alexdeathway/GameZilla
|
267d3eb723b5bc8804269332caa35e6529a7cd17
|
5110dc1ce3f9053790a0688a68caf350903a68a6
|
refs/heads/master
| 2023-03-13T02:09:49.784423
| 2021-03-04T10:47:14
| 2021-03-04T10:47:14
| 334,889,720
| 0
| 0
| null | 2021-03-04T10:47:15
| 2021-02-01T09:03:35
|
Python
|
UTF-8
|
Python
| false
| false
| 4,000
|
py
|
# Generated by Django 3.0.5 on 2020-11-29 14:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
("game", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="TAC",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("game_id", models.SlugField()),
("started", models.BooleanField(default=False)),
("max_player", models.IntegerField(default=0)),
(
"board",
jsonfield.fields.JSONField(
default=[[0, 0, 0], [0, 0, 0], [0, 0, 0]]
),
),
("current", models.IntegerField(blank=True, null=True)),
("players_entered", models.IntegerField(default=0)),
("time_stamp", models.FloatField(blank=True, null=True)),
("round", models.IntegerField(blank=True, null=True)),
("zero_active", models.BooleanField(default=False)),
("zero_entered", models.BooleanField(default=False)),
("cross_active", models.BooleanField(default=False)),
("cross_entered", models.BooleanField(default=False)),
("winning_chances", models.IntegerField(default=8)),
(
"cross",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="cross",
to=settings.AUTH_USER_MODEL,
),
),
(
"current_player",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
(
"room",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to="game.Room"
),
),
(
"zero",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="zero",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="TACMessage",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.TextField()),
(
"game",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="message",
to="ticTacToe.TAC",
),
),
(
"user",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
|
[
"niteshsinha1707@gmail.com"
] |
niteshsinha1707@gmail.com
|
1e67b3bef014e1357c1e2730c2f1e476dadccd06
|
3d3cd15c67bae669d644a15343d66f66038bb89b
|
/backend/app/routes.py
|
962c3f40a27773884bdf2f21475dc6c3ab6f73da
|
[] |
no_license
|
ni3ol/the-pizza-project
|
0e771778ade80f663cbfbd038652830d246aaa52
|
f025b220b185258cb2b38a3533ffd98f8706e514
|
refs/heads/master
| 2023-01-13T10:05:46.452189
| 2020-05-30T11:23:11
| 2020-05-30T11:23:11
| 188,875,990
| 1
| 0
| null | 2023-01-03T22:46:59
| 2019-05-27T16:22:50
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,638
|
py
|
import requests
from flask import render_template, request, jsonify
from main import app
from config import (
API_CREDENTIALS,
TRELLO_API_BASE_URL,
PIZZA_PROJECT_BOARD_ID,
TO_DO_LIST_ID
)
import cards
@app.route('/api/boardlists')
def get_lists():
payload = {**API_CREDENTIALS}
response = requests.get(
f'{TRELLO_API_BASE_URL}/boards/{PIZZA_PROJECT_BOARD_ID}/lists',
params=payload)
board_lists = response.json()
list_names = [board_list['name'] for board_list in board_lists]
flask_response = jsonify(list_names)
return flask_response
@app.route('/api/cards', methods=['POST'])
def post_card():
if request.method == 'POST':
card = request.get_json()['card']
print(card)
payload = {'name': card,
'idList': TO_DO_LIST_ID, **API_CREDENTIALS}
requests.post(
f'{TRELLO_API_BASE_URL}/cards', params=payload
)
return 'Card posted in TODO list'
@app.route('/webhooks/cards', methods=['HEAD', 'POST'])
def on_card_update():
print(request.get_json())
response = request.get_json()
if response['action']['type'] == 'updateCard':
entities = response['action']['display']['entities']
card_name = entities['card']['text']
list_before = entities['listBefore']['text']
list_after = entities['listAfter']['text']
card = cards.update_card(card_name, list_before, list_after)
print(card)
cards.add_card_to_history(card)
return 'Ok', 200
@app.route('/cards', methods=['GET'])
def get_cards():
return jsonify(cards.get_card_history())
|
[
"nvojacek@gmail.com"
] |
nvojacek@gmail.com
|
c9972195d1fbff55b1cdf1078d3338786bc38d76
|
a49731805f9b30a1294d67fc078b780fd2f2a91d
|
/fitmodel/test.py
|
44f46913ad4fa76bbc6b779faa1ef6893ce4ab2b
|
[] |
no_license
|
yw5aj/HS2014
|
cf74dd18d5902799da1aacf6f8c875afd98a84f9
|
9901aa37e3801f7654489100e2fc3cbf63b9ccaa
|
refs/heads/master
| 2021-06-06T08:56:26.294778
| 2015-09-29T15:17:49
| 2015-09-29T15:17:49
| 25,499,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 34
|
py
|
import numpy as np
print range(5)
|
[
"yw5aj@virginia.edu"
] |
yw5aj@virginia.edu
|
aada8e89982dca7cc3a4f46040ae3b6001ad380f
|
d7967d1a9b2f7ca8eba10ba2cf7cac94f203ab75
|
/explanation_util.py
|
61308c1603115743e1cf00fb0ff02b39f476fdfa
|
[] |
no_license
|
sayanbanerjee32/Local-explainability-of-NER-task-Masters-thesis
|
183a2c85962a9c8531ca95b5f70abd14f920b699
|
006634b41d594b5723ab3e6adae947e7ee1c570c
|
refs/heads/master
| 2023-06-12T08:10:10.898774
| 2021-07-08T16:07:29
| 2021-07-08T16:07:29
| 307,369,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,415
|
py
|
import numpy as np
import pandas as pd
from keras.preprocessing.sequence import pad_sequences
import re
import copy
from lime.lime_text import LimeTextExplainer
import shap
from ner_util import format_aux_input, \
pred2label, \
sent2features
def get_entity_prediction(validate_pred, idx2tag, root_tags = ['PER','LOC','ORG','MISC']):
pred_df_col_names = [idx2tag.get(id) for id in sorted(idx2tag.keys())]
entity_prob_arr_list = []
entity_full_list = root_tags.copy()
entity_full_list.append('O')
for val_sent_pred in validate_pred:
val_sent_pred_df = pd.DataFrame(val_sent_pred, columns= pred_df_col_names)
#print(val_sent_pred_df.shape)
val_sent_pred_df['pred_entity'] = val_sent_pred_df.idxmax(axis=1)
entity_prob_dict = {}
avg_entity_prob_dict = {}
# for each row:
for _, row in val_sent_pred_df.iterrows():
# if an entity prob is highest:
if row['pred_entity'] is not 'O':
for root_tag in root_tags:
b_tag = 'B-' + root_tag
i_tag = 'I-' + root_tag
#if B tag prob is higher:
if row[b_tag] > row[i_tag]:
# first check if avg dict any of element left to be added to main dict
if avg_entity_prob_dict:
avg_list = avg_entity_prob_dict.get(root_tag, [])
if len(avg_list) > 0:
entity_avg = sum(avg_list)/len(avg_list)
# extend the original list by the number of probabilities being averaged
entity_prob_dict.setdefault(root_tag, []).extend([entity_avg] * len(avg_list))
# reset the list for doing average to emty list
avg_entity_prob_dict[root_tag] = []
# then add the b-tag prob in the list for averaging
avg_entity_prob_dict.setdefault(root_tag, []).append(row[b_tag])
else:
# if i-tag is higher keep the i-tag porib for averaging
avg_entity_prob_dict.setdefault(root_tag, []).append(row[i_tag])
# need to retain probability of O for averaging
avg_entity_prob_dict.setdefault('O', []).append(row['O'])
else: # if row['pred_entity'] is 'O'
# first check if avg dict any of element left to be added to main dict
if avg_entity_prob_dict:
# for every root tag calculate seperately
for root_tag in root_tags:
avg_list = avg_entity_prob_dict.get(root_tag, [])
if len(avg_list) > 0:
entity_avg = sum(avg_list)/len(avg_list)
# extend the original list by the number of probabilities being averaged
entity_prob_dict.setdefault(root_tag, []).extend([entity_avg] * len(avg_list))
# reset the list for doing average to emty list
avg_entity_prob_dict[root_tag] = []
# similarly averaging will need to happen for O tag
avg_list = avg_entity_prob_dict.get('O', [])
if len(avg_list) > 0:
entity_avg = sum(avg_list)/len(avg_list)
# extend the original list by the number of probabilities being averaged
entity_prob_dict.setdefault('O', []).extend([entity_avg] * len(avg_list))
# reset the list for doing average to emty list
avg_entity_prob_dict['O'] = []
# as O is encountered reset avg dict
avg_entity_prob_dict = {}
# for root tags keep the max of B and I tags - no avg
for root_tag in root_tags:
b_tag = 'B-' + root_tag
i_tag = 'I-' + root_tag
tag_prob = max(row[b_tag],row[i_tag])
# if the dict does not have list for the tag, then titialise with a dict
entity_prob_dict.setdefault(root_tag, []).append(tag_prob)
# keep single probability for O tag
entity_prob_dict.setdefault('O', []).append(row['O'])
#prev_entity = row['pred_entity']
# if avg dict hve some residual while processing last row.
if avg_entity_prob_dict:
# for every root tag calculate seperately
for root_tag in root_tags:
avg_list = avg_entity_prob_dict.get(root_tag, [])
if len(avg_list) > 0:
#if len(avg_list) > 1: ready_to_break = True
entity_avg = sum(avg_list)/len(avg_list)
# extend the original list by the number of probabilities being averaged
entity_prob_dict.setdefault(root_tag, []).extend([entity_avg] * len(avg_list))
# reset the list for doing average to emty list
avg_entity_prob_dict[root_tag] = []
# similarly averaging will need to happen for O tag
avg_list = avg_entity_prob_dict.get('O', [])
if len(avg_list) > 0:
entity_avg = sum(avg_list)/len(avg_list)
# extend the original list by the number of probabilities being averaged
entity_prob_dict.setdefault('O', []).extend([entity_avg] * len(avg_list))
# reset the list for doing average to emty list
avg_entity_prob_dict['O'] = []
# convert to dataframe
entity_prob_df = pd.DataFrame(entity_prob_dict)
entity_prob_df = entity_prob_df[entity_full_list]
#print(entity_prob_df.columns)
entity_prob_arr_list.append(entity_prob_df.to_numpy())
# finally return numpy nd array
entity_prob_mat = np.array(entity_prob_arr_list)
# new id to entity
idx2ent = {i: t for i, t in enumerate(entity_full_list)}
return entity_prob_mat, idx2ent
def get_explanation_instances(validate_entity_prob_mat,validate_true_entity_prob_mat,
idx2ent, idx2ent_true,
select_tags = ['LOC','MISC'],
selection_prob = 0.95):
expl_selected_dict ={}
for sent_indx, (pred_i, true_pred_i) in enumerate(zip(validate_entity_prob_mat,validate_true_entity_prob_mat)):
for word_indx, (p, true_p) in enumerate(zip(pred_i, true_pred_i)):
# predicted
p_i = np.argmax(p)
max_p = max(p)
pred_tag = idx2ent[p_i].replace("PAD", "O")
# actual
actual_p_i = np.argmax(true_p)
actual_tag = idx2ent_true[actual_p_i].replace("PAD", "O")
if pred_tag in select_tags and max_p >= selection_prob:
p_i_next = np.argsort(-p)[1] #bn.argpartition(-p, 0)[1]
p_next = p[p_i_next]
pred_tag_next = idx2ent[p_i_next].replace("PAD", "O")
if expl_selected_dict.get((sent_indx, pred_tag,max_p)) is None:
expl_selected_dict[(sent_indx, pred_tag,max_p)] = {'word_indx':[word_indx],
'p_i':p_i,
'p_i_next':p_i_next,
'pred_tag_next':pred_tag_next,
'p_next':p_next,
'is_accurate': actual_tag == pred_tag,
'actual_tag': [actual_tag]}
else:
expl_selected_dict[(sent_indx, pred_tag,max_p)]['word_indx'].append(word_indx)
expl_selected_dict[(sent_indx, pred_tag,max_p)]['actual_tag'].append(actual_tag)
expl_selected_dict[(sent_indx, pred_tag,max_p)]['is_accurate'] = bool(expl_selected_dict[(sent_indx,
pred_tag,max_p)]['is_accurate']* (actual_tag == pred_tag))
return expl_selected_dict
class NER_KSHAPExplainerGenerator(object):
def __init__(self, model, word2idx, tag2idx, max_len,
sent_getter_id_dict, sent_word_getter_id_dict, sentences,
trained_preprocess_transform, num_word_next, num_word_prev,
root_tags = ['PER','LOC','ORG','MISC']):
self.model = model
self.word2idx = word2idx
self.idx2word = {v: k for k,v in word2idx.items()}
self.tag2idx = tag2idx
self.idx2tag = {v: k for k,v in tag2idx.items()}
self.max_len = max_len
self.sent_getter_id_dict = sent_getter_id_dict
self.sent_word_getter_id_dict = sent_word_getter_id_dict
self.sentences = sentences
self.trained_preprocess_transform = trained_preprocess_transform
self.root_tags = root_tags
self.num_word_prev = num_word_prev
self.num_word_next = num_word_next
def preprocess(self, texts):
#print(texts)
X = [[self.word2idx.get(w, self.word2idx["<UNK>"]) for w in t.split()]
for t in texts]
X = pad_sequences(maxlen=self.max_len, sequences=X,
padding="post", value=self.word2idx["<PAD>"])
# trying to find out the text from validation sentences,
# assumtion we are using validation sentences for explanation
X_sents_idx = [self.sent_getter_id_dict.get(text) for text in texts]
# ideally it should match with only one sentence
# othere are perturbed sentences of the original sentence
X_sent_idx = [x for x in X_sents_idx if x is not None]
if len(X_sent_idx) > 0: # this means the sentence is from validation set
# get the tuple of POS and chunk for the words in that sentence
original_sent_word_dict = self.sent_word_getter_id_dict.get(X_sent_idx[0],{})
else: # generating reference / random data
original_sent_word_dict = {}
# even for purturbed sentence, use the POS and CHUNK for the words that are still unchanged in the sentence
X_sents = [[original_sent_word_dict.get((i,word),('','unk','unk','unk')) for i,
word in enumerate(text.split())]for text in texts]
X_aux = [sent2features(X_sent, self.num_word_prev, self.num_word_next, self.max_len) for X_sent in X_sents]
X_aux_input, _ = format_aux_input(X_aux, max_len = self.max_len,
preproc_transform = self.trained_preprocess_transform)
#oh_encoder = trained_oh_encoder, standard_transform = trained_standard_transform)
flat_input = self._flatten_processed_input((X, X_aux_input))
# prediction for feature enrichment, so that predicted tag be used for contect words
pred_for_feature_enrichment = self.model.predict([X, X_aux_input], verbose=1)
entity_prob_mat, idx2ent = get_entity_prediction(pred_for_feature_enrichment, self.idx2tag)
entity_tags_list = pred2label(entity_prob_mat, idx2ent)
# creating flat_feature_list (list of list), this will contain the words and CRF features for all those words repeated
# for number of word, aspiration is to add te words in the CRF feature names
flat_feature_list = self._flatten_feature_list(X, X_sents, texts, entity_tags_list)
return flat_input, flat_feature_list
def _flatten_crf_features(self,word_list):
crf_feature_list_all_words = []
# for every word add CRF feature list
for i, word in enumerate(word_list):
if word == '': word = 'UNK'
# crf_feature_list = [str(word) +'_'+ str(feature) for feature in \
# self.trained_preprocess_transform.get('transformed_feature_list')]
crf_feature_list = []
for feature in self.trained_preprocess_transform.get('transformed_feature_list'):
# split based on the ':' between relative word index and the feature iside CRF feature names
split_feature = re.findall("^(\+\d+|^-\d+)\:(.*)",feature)
if len(split_feature) > 0:
# extract main word and context word
context_word_relative_indx = int(split_feature[0][0])
# check of out of index
#print(i, context_word_relative_indx)
if (i + context_word_relative_indx) >= 0 and (i + context_word_relative_indx) < len(word_list):
context_word = word_list[i + context_word_relative_indx]
else:
context_word = 'OutOfIndex'
processed_feature_name = str(word) +':'+ split_feature[0][0] + \
':' + context_word + '__' + split_feature[0][1]
crf_feature_list.append(processed_feature_name)
else:
crf_feature_list.append(str(word) +'__'+ str(feature))
crf_feature_list_all_words.extend(crf_feature_list)
return crf_feature_list_all_words
def _enrich_word_features(self,word_feature_list,sent_pos_chunk, entity_tags, seperator_char = '>'):
enriched_word_feature_list = []
for word_position, word in enumerate(word_feature_list):
# enrich feature with word prostion
enriched_feature = str(word) + seperator_char + str(word_position)+ seperator_char
# enrich feature with POS tag
if word_position < len(sent_pos_chunk):
enriched_feature += sent_pos_chunk[word_position][1]
enriched_feature += seperator_char
# enrich feature with predicted NER tag
if word_position < len(entity_tags):
enriched_feature += entity_tags[word_position]
enriched_word_feature_list.append(enriched_feature)
return enriched_word_feature_list
def _flatten_feature_list(self, word_indx_nd_array, sentences_pos_chunk, texts, entity_tags_list):
feature_nd_list = []
for word_indx, single_sent_pos_chunk, single_sentence, entity_tags in \
zip(word_indx_nd_array, sentences_pos_chunk, texts, entity_tags_list):
# get the original words as well
org_text_words_list = [t for t in single_sentence.split()]
# get the words from word indices
embd_word_list = [self.idx2word.get(indx,'NOT_EXPECTED') for indx in word_indx]
# adding original word as well, so that we can distingusih between known and unknown embedding words
word_list = []
for emb_indx, word in enumerate(embd_word_list):
if emb_indx < len(org_text_words_list):
if word != org_text_words_list[emb_indx]:
# for unknown words with respect to embedding, keep original version as well
word_list.append(str(org_text_words_list[emb_indx]) + '|' + str(word))
else:
word_list.append(word)
else:
word_list.append(word)
feature_list = word_list.copy()
enriched_word_feature_list = self._enrich_word_features(feature_list, single_sent_pos_chunk, entity_tags)
crf_feature_list = self._flatten_crf_features(word_list)
enriched_word_feature_list.extend(crf_feature_list)
feature_nd_list.append(enriched_word_feature_list)
return feature_nd_list
def _flatten_processed_input(self, input_tup):
X = input_tup[0]
X_aux_input = input_tup[1]
X_aux_input_flat = X_aux_input.transpose(0,1,2).reshape(X_aux_input.shape[0],-1)
X_all_array = np.column_stack((X,X_aux_input_flat))
return X_all_array
def _unflatten_flat_input(self, flat_input):
X = flat_input[:,0:self.max_len]
X_aux_input = flat_input[:, self.max_len:].reshape(flat_input.shape[0],self.max_len, -1)
return X, X_aux_input
def _get_entity_i_prediction(self, validate_pred, entity_word_indx_list):
pred_df_col_names = [self.idx2tag.get(id) for id in sorted(self.idx2tag.keys())]
entity_prob_arr_list = []
entity_full_list = self.root_tags.copy()
entity_full_list.append('O')
for val_sent_pred in validate_pred:
val_sent_pred_df = pd.DataFrame(val_sent_pred, columns= pred_df_col_names)
entity_prob_dict = {}
avg_entity_prob_dict = {}
# for each row:
for row_indx, row in val_sent_pred_df.iterrows():
# row index in word index list
if row_indx in entity_word_indx_list:
for root_tag in self.root_tags:
b_tag = 'B-' + root_tag
i_tag = 'I-' + root_tag
# row index in first of entty word index
if row_indx == entity_word_indx_list[0]:
# then add the b-tag prob in the list for averaging
avg_entity_prob_dict.setdefault(root_tag, []).append(row[b_tag])
else:
# for subsequent rows as i-tag probability
avg_entity_prob_dict.setdefault(root_tag, []).append(row[i_tag])
# need to retain probability of O for averaging
avg_entity_prob_dict.setdefault('O', []).append(row['O'])
# if avg dict hve some residual while processing last row.
if avg_entity_prob_dict:
# for every root tag calculate seperately
for root_tag in self.root_tags:
avg_list = avg_entity_prob_dict.get(root_tag, [])
if len(avg_list) > 0:
#if len(avg_list) > 1: ready_to_break = True
entity_avg = sum(avg_list)/len(avg_list)
# extend the original list by the number of probabilities being averaged
entity_prob_dict.setdefault(root_tag, []).extend([entity_avg] * len(avg_list))
# reset the list for doing average to emty list
avg_entity_prob_dict[root_tag] = []
# similarly averaging will need to happen for O tag
avg_list = avg_entity_prob_dict.get('O', [])
if len(avg_list) > 0:
entity_avg = sum(avg_list)/len(avg_list)
# extend the original list by the number of probabilities being averaged
entity_prob_dict.setdefault('O', []).extend([entity_avg] * len(avg_list))
# reset the list for doing average to emty list
avg_entity_prob_dict['O'] = []
# convert to dataframe
entity_prob_df = pd.DataFrame(entity_prob_dict)
entity_prob_df = entity_prob_df[entity_full_list]
#print(entity_prob_df.columns)
entity_prob_arr_list.append(entity_prob_df.to_numpy())
# finally return numpy nd array
entity_prob_mat = np.array(entity_prob_arr_list)
#print(entity_prob_mat)
return entity_prob_mat
def get_predict_function(self, word_index_list):
def predict_func(flat_input):
#print(flat_input)
X, X_aux_input = self._unflatten_flat_input(flat_input)
#print(X.shape)
#print(X_aux_input.shape)
p = self.model.predict([X,X_aux_input])
# revise predicted probability vector for entity probabilities
p_entity = self._get_entity_i_prediction(p, word_index_list)
return p_entity[:,0,:]
return predict_func
class NER_LIMEExplainerGenerator(object):
def __init__(self, model, word2idx, tag2idx, max_len,
sent_getter_id_dict, sent_word_getter_id_dict, sentences,
trained_preprocess_transform, num_word_next,num_word_prev,
root_tags = ['PER','LOC','ORG','MISC']):
self.model = model
self.word2idx = word2idx
self.tag2idx = tag2idx
self.idx2tag = {v: k for k,v in tag2idx.items()}
self.max_len = max_len
self.sent_getter_id_dict = sent_getter_id_dict
self.sent_word_getter_id_dict = sent_word_getter_id_dict
self.sentences = sentences
self.trained_preprocess_transform = trained_preprocess_transform
self.root_tags = root_tags
self.num_word_next = num_word_next
self.num_word_prev = num_word_prev
def _preprocess(self, texts):
#print(texts)
X = [[self.word2idx.get(w, self.word2idx["<UNK>"]) for w in t.split()]
for t in texts]
X = pad_sequences(maxlen=self.max_len, sequences=X,
padding="post", value=self.word2idx["<PAD>"])
X_sents_idx = [self.sent_getter_id_dict.get(text) for text in texts]
X_sent_idx = [x for x in X_sents_idx if x is not None]
original_sent_word_dict = self.sent_word_getter_id_dict[X_sent_idx[0]]
X_sents = [[original_sent_word_dict.get((i,word),('','unk','unk','unk')) for i,
word in enumerate(text.split())]for text in texts]
X_aux = [sent2features(X_sent, self.num_word_prev, self.num_word_next, self.max_len) for X_sent in X_sents]
X_aux_input, _ = format_aux_input(X_aux, max_len = self.max_len,
preproc_transform = self.trained_preprocess_transform)
#oh_encoder = trained_oh_encoder, standard_transform = trained_standard_transform)
return X, X_aux_input
def get_entity_i_prediction(self, validate_pred, entity_word_indx_list):
pred_df_col_names = [self.idx2tag.get(id) for id in sorted(self.idx2tag.keys())]
entity_prob_arr_list = []
entity_full_list = self.root_tags.copy()
entity_full_list.append('O')
for val_sent_pred in validate_pred:
val_sent_pred_df = pd.DataFrame(val_sent_pred, columns= pred_df_col_names)
entity_prob_dict = {}
avg_entity_prob_dict = {}
# for each row:
for row_indx, row in val_sent_pred_df.iterrows():
# row index in word index list
if row_indx in entity_word_indx_list:
for root_tag in self.root_tags:
b_tag = 'B-' + root_tag
i_tag = 'I-' + root_tag
# row index in first of entty word index
if row_indx == entity_word_indx_list[0]:
# then add the b-tag prob in the list for averaging
avg_entity_prob_dict.setdefault(root_tag, []).append(row[b_tag])
else:
# for subsequent rows as i-tag probability
avg_entity_prob_dict.setdefault(root_tag, []).append(row[i_tag])
# need to retain probability of O for averaging
avg_entity_prob_dict.setdefault('O', []).append(row['O'])
# if avg dict hve some residual while processing last row.
if avg_entity_prob_dict:
# for every root tag calculate seperately
for root_tag in self.root_tags:
avg_list = avg_entity_prob_dict.get(root_tag, [])
if len(avg_list) > 0:
#if len(avg_list) > 1: ready_to_break = True
entity_avg = sum(avg_list)/len(avg_list)
# extend the original list by the number of probabilities being averaged
entity_prob_dict.setdefault(root_tag, []).extend([entity_avg] * len(avg_list))
# reset the list for doing average to emty list
avg_entity_prob_dict[root_tag] = []
# similarly averaging will need to happen for O tag
avg_list = avg_entity_prob_dict.get('O', [])
if len(avg_list) > 0:
entity_avg = sum(avg_list)/len(avg_list)
# extend the original list by the number of probabilities being averaged
entity_prob_dict.setdefault('O', []).extend([entity_avg] * len(avg_list))
# reset the list for doing average to emty list
avg_entity_prob_dict['O'] = []
# convert to dataframe
entity_prob_df = pd.DataFrame(entity_prob_dict)
entity_prob_df = entity_prob_df[entity_full_list]
#print(entity_prob_df.columns)
entity_prob_arr_list.append(entity_prob_df.to_numpy())
# finally return numpy nd array
entity_prob_mat = np.array(entity_prob_arr_list)
#print(entity_prob_mat)
return entity_prob_mat
def get_predict_function(self, word_index_list):
def predict_func(texts):
X, X_aux_input = self._preprocess(texts)
# print(X.shape)
# print(X_aux_input.shape)
p = self.model.predict([X,X_aux_input])
# revise prodicted probability vector for entity probabilities
p_entity = self.get_entity_i_prediction(p, word_index_list)
return p_entity[:,0,:]
return predict_func
|
[
"sayanbanerjee32@gmail.com"
] |
sayanbanerjee32@gmail.com
|
4177ee205902616799bc9b611c0e7e18be7c78b3
|
85d812a90b74069e751cc2cd661bcd29f1573b78
|
/app/core/models.py
|
6fec4ba55624bc8af035617f897dd27d1a087a7e
|
[] |
no_license
|
olumide95/Recipe-API-Python
|
6b524bb6ff910a2b3d3d2d3bad8fa274963b4d37
|
bd7959ce8fd8d23d11b36a9e64e3648acffd5a2a
|
refs/heads/master
| 2022-11-22T21:44:46.507778
| 2020-06-20T11:18:06
| 2020-06-20T11:18:06
| 273,131,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,204
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager,\
PermissionsMixin
class UserManager(BaseUserManager):
# create new user
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('Email is Required')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
# create new super user
def create_super_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('Email is Required')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.is_admin = True
user.is_super = True
user.set_password(password)
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
name = models.TextField(max_length=255)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
|
[
"olugbemiro.olumide@gmail.com"
] |
olugbemiro.olumide@gmail.com
|
7373b189067a4772f1283cf415a3978c55fa4fe9
|
00af03827c29c507211141facc8eb2317337a1ef
|
/tests/test_usage.py
|
e2c3c2de241a70789e7e30b42c4b20183127bb6d
|
[
"ISC"
] |
permissive
|
gcross/paycheck
|
ed2fcf3719d454f2f0d20f18674c729875e83245
|
3a98d1a94a3c499087104142d5ee71f09529e633
|
refs/heads/master
| 2021-01-16T22:48:32.421571
| 2018-10-18T14:39:53
| 2018-10-18T14:39:53
| 271,126
| 4
| 2
|
ISC
| 2018-10-18T14:39:55
| 2009-08-06T22:41:13
|
Python
|
UTF-8
|
Python
| false
| false
| 629
|
py
|
import unittest
from paycheck import with_checker
class TestUsage(unittest.TestCase):
@with_checker()
def test_defaults(self, i=int, f=float):
self.assertTrue(isinstance(i, int))
self.assertTrue(isinstance(f, float))
@with_checker(int)
def test_mixed(self, i, f=float):
self.assertTrue(isinstance(i, int))
self.assertTrue(isinstance(f, float))
@with_checker
def test_without_parentheses(self, i=int, f=float):
self.assertTrue(isinstance(i, int))
self.assertTrue(isinstance(f, float))
tests = [TestUsage]
if __name__ == '__main__':
unittest.main()
|
[
"gcross@phys.washington.edu"
] |
gcross@phys.washington.edu
|
adf34968726ce7db596b72abdb414288e7fcf7b8
|
f2d0ce02cb7b7632a726a006a58cd5fc3d6f3733
|
/osb11/wsgi.py
|
3cad70ff04f1272ace459e1835fe031f77bab8ae
|
[] |
no_license
|
Daviddager/mavengui
|
8a5a03a8f0b438060fba508f4cd13bb6b67488e8
|
c2496c6e8a8a989917cf03c1521e4e1ce4166bca
|
refs/heads/master
| 2020-12-02T17:47:46.710639
| 2017-08-31T02:47:56
| 2017-08-31T02:47:56
| 96,428,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
"""
WSGI config for osb11 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "osb11.settings")
application = get_wsgi_application()
|
[
"drdagerm@gmail.com"
] |
drdagerm@gmail.com
|
af7609702b76b1b0c3f412b0749b333f5f3b9e71
|
5f6425e9d83b57b864e48f227e1dc58356a555c0
|
/utils/palettes/3rd-party/adapted_solarized_dark.py
|
72cf00d4c363e0ff3e9595a1ff5f99db611cde79
|
[
"MIT"
] |
permissive
|
jan-warchol/selenized
|
b374fa7822f281b16aa8b52e34bd1e585db75904
|
df1c7f1f94f22e2c717f8224158f6f4097c5ecbe
|
refs/heads/master
| 2023-06-22T09:37:02.962677
| 2022-09-12T20:24:40
| 2022-09-12T20:24:40
| 45,570,283
| 663
| 58
|
MIT
| 2023-04-18T09:33:22
| 2015-11-04T22:00:52
|
Emacs Lisp
|
UTF-8
|
Python
| false
| false
| 790
|
py
|
# Solarized dark with monotones and accents mapped to ANSI codes according to
# selenized convention
name = 'Selenized solarized dark'
palette = {
"bg_0": "#002b36",
"fg_0": "#839496",
"bg_1": "#073642",
"red": "#dc322f",
"green": "#859900",
"yellow": "#b58900",
"blue": "#268bd2",
"magenta": "#d33682",
"cyan": "#2aa198",
"dim_0": "#586e75",
"orange": "#cb4b16",
"violet": "#6c71c4",
"bg_2": "#073642",
"br_red": "#dc322f",
"br_green": "#859900",
"br_yellow": "#b58900",
"br_blue": "#268bd2",
"br_magenta": "#d33682",
"br_cyan": "#2aa198",
"fg_1": "#93a1a1",
"br_orange": "#cb4b16",
"br_violet": "#6c71c4",
}
|
[
"jan.warchol@gmail.com"
] |
jan.warchol@gmail.com
|
c4de5d0f4cf4ce49b52eeea7efa02da2e5c7df6b
|
310bafd1ce54303618c3f3c2955694fab2233efd
|
/day-15/testdemocalc/mus/TestBankAddUser.py
|
8c8017cbaaaa3ef9db03dfcc8de14865fc205d23
|
[] |
no_license
|
caodongxue/python_all-caodongxue
|
73495b68817857178336bc0d86b26f6634027cb9
|
b065cb60907125f15b99618f69ea0d612baff4fe
|
refs/heads/main
| 2023-01-31T00:23:25.374626
| 2020-12-17T12:33:30
| 2020-12-17T12:33:30
| 322,277,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,371
|
py
|
import unittest
from demo.bank import bank_addUser
'''
1.搞清楚哪些是要测的业务逻辑部分。
2.写核心代码
3.业务:
用户满了:3
已经存在:2
开户成功:1
username,password,country,province,street,door,money
'''
class TestBankAddUser(unittest.TestCase):
def testAddUser(self):
username = "jason"
password = "admin"
country = "中国"
province = "安徽省"
street = "幸福大道"
door = "s001"
money = 4515
status = 1 # 期望结果
# 实际结果
s = bank_addUser(username,password,country,province,street,door,money)
#断言
self.assertEqual(status,s)
def testAddUser1(self):
status = 3 # 期望结果
password = "admin"
country = "中国"
province = "安徽省"
street = "幸福大道"
door = "s001"
money = 4515
# 先添加100用户
for i in range(100):
username = "jason" + str(i)
# 实际结果
bank_addUser(username, password, country, province, street, door, money)
# 实际测试
s = bank_addUser("李四",password,country,province,street,door,money)
# 断言
self.assertEqual(status, s)
|
[
"noreply@github.com"
] |
caodongxue.noreply@github.com
|
8a7d25c0eabce992dca705a7ec51147f50c82e07
|
ea94ae6b63391e148e816c867de38326d76fe14a
|
/apps/user/migrations/0002_alter_user_phone_number.py
|
367b1e2cbc972db4171d2152efa3fa4424cd86b7
|
[] |
no_license
|
divitrao/oscar_api
|
7d8a254411a500468cd488bd69f21393d3de8f42
|
c98f85239319490e55e4bb538768747c39fcc06a
|
refs/heads/master
| 2023-08-25T01:21:14.540602
| 2021-11-03T16:48:27
| 2021-11-03T16:48:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
# Generated by Django 3.2.9 on 2021-11-03 05:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='phone_number',
field=models.IntegerField(unique=True),
),
]
|
[
"divit@yodaplus.com"
] |
divit@yodaplus.com
|
a3106dfa1242dd7e9828cfb745691ddf06217f5f
|
cc0f22e0078b777e1781216ab03d9af4a7d9c1ad
|
/corpusholder.py
|
4dbc5e1d2cfcd4b2a3853a6c023b528ac5921a93
|
[] |
no_license
|
aazarov/NLP_SentimentAnalysis_HyperparametersSearch
|
4432af0e922d7aa0dd742637b67316d2e33397c2
|
05a351717654a2a15b5d88679226e3a0c297520e
|
refs/heads/main
| 2023-01-31T01:33:22.180352
| 2020-12-17T09:13:40
| 2020-12-17T09:13:40
| 315,881,037
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,641
|
py
|
from itertools import chain
from collections import defaultdict
from collections import Counter
from numpy import asarray
from sklearn.utils import shuffle
import re
from nltk.corpus import stopwords
import torch
from torch.nn.utils.rnn import PackedSequence, pack_sequence
from torch.utils.data import Dataset, DataLoader
# holds the language i.e. the vocabulary and words to index mappings
# max_vocab_size controls how many most popular words are kept; rest are assigned to UNK_IDX
class Lang:
def __init__(self, texts, max_vocab_size=None):
tokenized_texts = [[word for word in text.split()] for text in texts]
counts = Counter(chain(*tokenized_texts))
max_vocab_size = max_vocab_size or len(counts)
common_pairs = counts.most_common(max_vocab_size)
self.UNK_IDX = 0
self.EOS_IDX = 1
self.itos = ["<UNK>", "<EOS>"] + [pair[0] for pair in common_pairs]
self.stoi = {token: i for i, token in enumerate(self.itos)}
print("Lang=%i" % len(self))
def __iter__(self):
return iter(self.itos)
def __len__(self):
return len(self.itos)
# the data holder class to be wrapped into torch Dataloader
class Dataset:
def __init__(self, texts, labels, lang):
self.texts = texts
self.labels = labels
self.lang = lang
def __getitem__(self, item):
sentence = self.texts[item]
indexes = [self.lang.stoi.get(word, self.lang.UNK_IDX) for word in sentence.split()]
return indexes + [self.lang.EOS_IDX], self.labels[item]
def __len__(self):
return len(self.texts)
def collate_fn(self, batch):
return pack_sequence([torch.tensor(pair[0]) for pair in batch], enforce_sorted=False), torch.tensor(
[pair[1] for pair in batch])
# holds the corpus and its corresponding labels
# splits for train, validation and test as per defined fractions
# provides the DataLoader-s for training and evaluation
class CorpusHolder:
def __init__(self, texts, labels, batch_size=128, max_vocab_size=30000, max_length=1000, val_size=0.1, test_size=0.1):
self.max_vocab_size = max_vocab_size
self.max_length = max_length
self.batch_size=batch_size
self.lang, self.texts, self.labels = self.prepare_data(texts, labels)
train_last_index = int(len(self.texts) * (1 - val_size - test_size) )
val_last_index = int(len(self.texts) * (1 - test_size) )
self.train_texts, self.train_labels = self.texts[:train_last_index], self.labels[:train_last_index]
self.val_texts, self.val_labels = self.texts[train_last_index:val_last_index], self.labels[train_last_index:val_last_index]
self.train_dataset = Dataset(self.train_texts, self.train_labels, self.lang)
self.val_dataset = Dataset(self.val_texts, self.val_labels, self.lang)
self.test_dataset = Dataset(self.texts[val_last_index:], self.labels[val_last_index:], self.lang)
print('CorpusHolder train_dataset=%i val_dataset=%i test_dataset=%i' % (len(self.train_dataset), len(self.val_dataset), len(self.test_dataset)))
self.train_dataloader = DataLoader(self.train_dataset, batch_size=batch_size,
shuffle=True, num_workers=8, collate_fn=self.train_dataset.collate_fn)
self.val_dataloader = DataLoader(self.val_dataset, batch_size=batch_size,
shuffle=True, num_workers=8, collate_fn=self.val_dataset.collate_fn)
self.test_dataloader = DataLoader(self.test_dataset, batch_size=batch_size,
shuffle=True, num_workers=8, collate_fn=self.test_dataset.collate_fn)
self.budgeted_train_dataloaders = {}
self.budgeted_val_dataloaders = {}
def get_budgeted_dataloader(self, texts, labels, storage, budget, storagetype):
if not budget in storage:
budgeted_texts, budgeted_labels = shuffle(texts, labels)
budget_last_index = int(len(texts) * (budget / 100.0) )
# lets avoid extra small n samples, if possible
if len(texts) > 1000 and budget_last_index < 1000:
budget_last_index = 1000
budgeted_texts, budgeted_labels = budgeted_texts[:budget_last_index], budgeted_labels[:budget_last_index]
budgeted_dataset = Dataset(budgeted_texts[:budget_last_index], budgeted_labels[:budget_last_index], self.lang)
storage[budget] = DataLoader(budgeted_dataset, batch_size=self.batch_size,
shuffle=True, num_workers=8, collate_fn=budgeted_dataset.collate_fn)
print('produced budgeted=%.2f %s dataset=%i' % (budget, storagetype, budget_last_index))
return storage[budget]
def get_budgeted_train_dataloader(self, budget):
if budget == 100.0:
return self.train_dataloader
return self.get_budgeted_dataloader(self.train_texts, self.train_labels,
self.budgeted_train_dataloaders, budget, 'train')
def get_budgeted_val_dataloader(self, budget):
if budget == 100.0:
return self.val_dataloader
return self.get_budgeted_dataloader(self.val_texts, self.val_labels,
self.budgeted_val_dataloaders, budget, 'val')
def clean_text(self, text):
text = text.lower()
# Replace contractions with their longer forms
contractions = { "ain't": "am not", "aren't": "are not", "can't": "cannot", "can't've": "cannot have",
"'cause": "because", "could've": "could have", "couldn't": "could not", "couldn't've": "could not have",
"doesn't": "does not", "don't": "do not", "hadn't": "had not", "hadn't've": "had not have",
"hasn't": "has not", "haven't": "have not", "he'd": "he would", "he'd've": "he would have",
"he'll": "he will", "he's": "he is", "how'd": "how did", "how'll": "how will",
"how's": "how is", "i'd": "i would", "i'll": "i will", "i'm": "i am", "didn't": "did not",
"i've": "i have", "isn't": "is not", "it'd": "it would", "it'll": "it will",
"it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not",
"might've": "might have", "mightn't": "might not", "must've": "must have", "mustn't": "must not",
"needn't": "need not", "oughtn't": "ought not", "shan't": "shall not", "sha'n't": "shall not",
"she'd": "she would", "she'll": "she will", "she's": "she is", "should've": "should have",
"shouldn't": "should not", "that'd": "that would", "that's": "that is", "there'd": "there had",
"there's": "there is", "they'd": "they would", "they'll": "they will", "they're": "they are",
"they've": "they have", "wasn't": "was not", "we'd": "we would", "we'll": "we will",
"we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will",
"what're": "what are", "what's": "what is", "what've": "what have", "where'd": "where did",
"where's": "where is", "who'll": "who will", "who's": "who is", "won't": "will not",
"wouldn't": "would not", "you'd": "you would", "you'll": "you will", "you're": "you are"}
text = text.split()
new_text = []
for word in text:
if word in contractions:
new_text.append(contractions[word])
else:
new_text.append(word)
text = " ".join(new_text)
# format & remove punctuation
word_pattern = re.compile("[\w']+")
words = word_pattern.findall(text)
# deal with stopwords
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
text = " ".join(words)
return text
def prepare_data(self, texts, labels):
texts = [self.clean_text(text) for text in texts]
# now validate texts length
new_texts = []
new_labels = []
for i in range(len(texts)):
text = texts[i]
words = text.split()
if len(text) > 0 and len(words) > 0 and len(words[0]) > 0 and len(words) < self.max_length:
new_texts.append(text)
new_labels.append(labels[i])
self.lang = Lang(new_texts, max_vocab_size=self.max_vocab_size)
return self.lang, new_texts, new_labels
|
[
"azarov.alexey@gmail.com"
] |
azarov.alexey@gmail.com
|
7088906d9e21682ec196b3de383756e59736e76e
|
2061d5ccea97aee9b92e267cf6f47a9a7700c67f
|
/exec/catkin_ws/src/sub2/sub2/path_pub.py
|
7b16ca445825f936bbbda9177882bdd896d5873f
|
[] |
no_license
|
lovelyunsh/ggobugi
|
23630286453b00d3200b2eee547b8447ede449de
|
19de6b0070f533917f1a665b69fe0eec0749ba0b
|
refs/heads/master
| 2023-06-13T04:02:05.245161
| 2021-07-11T09:25:30
| 2021-07-11T09:25:30
| 384,886,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,932
|
py
|
import rclpy
from rclpy.node import Node
from geometry_msgs.msg import Twist,PoseStamped
from squaternion import Quaternion
from nav_msgs.msg import Odometry,Path
from math import pi,cos,sin,sqrt
import tf2_ros
import os
# path_pub 노드는 make_path 노드에서 만든 텍스트 파일을 읽어와 전역 경로(/global_path)로 사용하고,
# 전역 경로 중 로봇과 가장 가까운 포인트를 시작점으로 실제 로봇이 경로 추종에 사용하는 경로인 지역 경로(/local_path)를 만들어주는 노드입니다.
# 노드 로직 순서
# 1. publisher, subscriber 만들기
# 2. 만들어 놓은 경로 데이터를 읽기 모드로 open
# 3. 경로 데이터를 읽어서 Path 메시지에 데이터를 넣기
# 4. 주기마다 실행되는 타이머함수 생성, local_path_size 설정
# 5. global_path 중 로봇과 가장 가까운 포인트 계산
# 6. local_path 예외 처리
# 7. global_path 업데이트 주기 재설정
class pathPub(Node):
def __init__(self):
super().__init__('path_pub')
# 로직 1. publisher, subscriber 만들기
self.global_path_pub = self.create_publisher(Path, 'global_path', 10)
self.local_path_pub = self.create_publisher(Path, 'local_path', 10)
self.subscription = self.create_subscription(Odometry,'/odom',self.listener_callback,10)
self.odom_msg=Odometry()
self.is_odom=False
## 전역경로 메시지
self.global_path_msg=Path()
self.global_path_msg.header.frame_id='/map'
'''
로직 2. 만들어 놓은 경로 데이터를 읽기 모드로 open
지난 번에 만든 make_path와 비슷하게 절대 경로를 생성했습니다.
대신 make_path보다 파일명이 알바벳 1개가 짧으므로 -48 대신 -47로 문자열을 끊으면 됩니다.
'''
now_path = os.path.abspath(__file__)
full_path = now_path[:-47] + 'src/sub2/path/path.txt'
self.f = open(full_path, 'r')
'''
로직 3. 경로 데이터를 읽어서 Path 메시지에 데이터를 넣기
파일의 x,y를 각각 읽어와서 global_pose 로 지정합니다.
'''
lines = self.f.readlines()
for line in lines :
tmp = line.split()
read_pose = PoseStamped()
read_pose.pose.position.x = float(tmp[0])
read_pose.pose.position.y = float(tmp[1])
read_pose.pose.orientation.w= 1.0
self.global_path_msg.poses.append(read_pose)
self.f.close()
# 로직 4. 주기마다 실행되는 타이머함수 생성, local_path_size 설정
time_period=0.02
self.timer = self.create_timer(time_period, self.timer_callback)
self.local_path_size=30
self.count=0
def listener_callback(self,msg):
self.is_odom=True
self.odom_msg=msg
def timer_callback(self):
if self.is_odom ==True:
local_path_msg=Path()
local_path_msg.header.frame_id='/map'
x=self.odom_msg.pose.pose.position.x
y=self.odom_msg.pose.pose.position.y
print(x,y)
current_waypoint=-1
'''
로직 5. global_path 중 로봇과 가장 가까운 포인트 계산
가장 가까운 좌표를 찾아서 거리를 구합니다.
그 때의 인덱스를 current_waypoint로 지정합니다.
'''
min_dis= float('inf')
for i,waypoint in enumerate(self.global_path_msg.poses) :
distance = sqrt(pow(x-waypoint.pose.position.x,2)+pow(y-waypoint.pose.position.y,2))
if distance < min_dis :
min_dis= distance
current_waypoint = i
'''
로직 6. local_path 예외 처리
current_waypoint 부터 local_path_size 개수만큼의 경로의 x, y의 좌표를 저장합니다.
만약 현재위치에서 global_path_msg의 끝까지 local_path_size개수보다 적게 남았다면
global_path_msg 끝까지 local_path_msg 로 지정합니다.
'''
if current_waypoint != -1 :
if current_waypoint + self.local_path_size < len(self.global_path_msg.poses):
for num in range(current_waypoint, current_waypoint + self.local_path_size):
tmp_pose = PoseStamped()
tmp_pose.pose.position.x = self.global_path_msg.poses[num].pose.position.x
tmp_pose.pose.position.y = self.global_path_msg.poses[num].pose.position.y
tmp_pose.pose.orientation.w = 1.0
local_path_msg.poses.append(tmp_pose)
else :
for num in range(current_waypoint, len(self.global_path_msg.poses)):
tmp_pose = PoseStamped()
tmp_pose.pose.position.x = self.global_path_msg.poses[num].pose.position.x
tmp_pose.pose.position.y = self.global_path_msg.poses[num].pose.position.y
tmp_pose.pose.orientation.w = 1.0
local_path_msg.poses.append(tmp_pose)
self.local_path_pub.publish(local_path_msg)
# 로직 7. global_path 업데이트 주기 재설정
# 10번 실행할 때마다 global_path_pub를 1번 퍼블리싱하도록 주기를 늦춥니다.
if self.count%10==0 :
self.global_path_pub.publish(self.global_path_msg)
self.count+=1
def main(args=None):
rclpy.init(args=args)
path_pub = pathPub()
rclpy.spin(path_pub)
path_pub.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
[
"lovelyunsh@naver.com"
] |
lovelyunsh@naver.com
|
8d528dbb4e558b13890b46dbb917d8bf39391ff8
|
74047f69c5ebd83f6807978ccbed1fa7bdaaa3ca
|
/hackerrank/16Whats'sYouar_name.py
|
dce3f30ebdcf12c478643ebd2c2e581d66b655af
|
[] |
no_license
|
Ashwinbicholiya/Python
|
9b9e6731e3dcb0ccd298cf502f0a82962a52c7f0
|
11e748f105d77f2830a3dd48e3b72f80f3d8d18d
|
refs/heads/master
| 2023-01-02T22:29:53.079325
| 2020-10-30T08:49:17
| 2020-10-30T08:49:17
| 261,976,363
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
def print_full_name(a, b):
print("Hello",a,b,end="! You just delved into python.")
if __name__ == '__main__':
first_name = input()
last_name = input()
print_full_name(first_name, last_name)
|
[
"ashwinbicholiya@gmail.com"
] |
ashwinbicholiya@gmail.com
|
44581f682eadb599a6020821c63ad5255dc830d6
|
1a0501254cf9ae6edc6b21bc2acada59f2949f9d
|
/dev_algorithms/sensfunc/pca_utils.py
|
0c16300dc55dc0b51c0a9f77fc08d9e058830933
|
[] |
no_license
|
pypeit/PypeIt-development-suite
|
50e73b27f59910dbcd33c365b33a05cf2bbaddc1
|
38cd140fd11f84169e0ec81197a1363d218717cd
|
refs/heads/main
| 2023-08-30T15:40:15.912065
| 2023-07-20T13:36:47
| 2023-07-20T13:36:47
| 57,143,048
| 7
| 10
| null | 2023-09-13T20:05:22
| 2016-04-26T16:11:32
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,322
|
py
|
import numpy as np
import scipy
from sklearn import mixture
def init_pca(filename,wave_grid,redshift):
# Read in the pickle file from coarse_pca.create_coarse_pca
# The relevant pieces are the wavelengths (wave_pca_c), the PCA components (pca_comp_c),
# and the Gaussian mixture model prior (mix_fit)
wave_pca_c, cont_all_c, pca_comp_c, coeffs_c,
mean_pca, covar_pca, diff_pca, mix_fit, chi2, dof = pickle.load(open(filename,'rb'))
num_comp = pca_comp_c.shape[0] # number of PCA components
# Interpolate PCA components onto wave_grid
pca_interp = scipy.interpolate.interp1d(wave_pca_c*(1+redshift),pca_comp_c,
bounds_error=False, axis=1)
nord = wave_grid.shape[1]
pca_comp_new = np.zeros(wave_grid.shape)
for ii in range(nord):
pca_comp_new[ii] = pca_interp(wave_grid[:,ii])
# Construct the PCA dict
pca_dict = {'n_components': num_comp, 'components': pca_comp_new,
'prior': mix_fit, 'coeffs': coeffs_c}
return pca_dict
def eval_pca(theta,pca_dict):
C = pca_dict['components']
norm = theta[0]
A = theta[1:]
return norm*np.exp(np.dot(np.append(1.0,A),C))
def eval_pca_prior(theta,pca_dict):
gmm = pca_dict['prior']
A = theta[1:]
return gmm.score_samples(A.reshape(1,-1))
|
[
"davies@physics.ucsb.edu"
] |
davies@physics.ucsb.edu
|
bf3cf876db7defb907e789131f7c20ac13e5f551
|
522abc504683433e903fccfdd16b039cad5d8c8c
|
/suite-api-tool/metrics_table.py
|
e5fc498e08c02eda42638c30d05d0610ff23e30a
|
[] |
no_license
|
lucid281/python-vrops-api-tool
|
f8f73647d512ab08d292adc4e58c926aa63df348
|
37a22f7215a4c82006bca5d1a79af4053e334411
|
refs/heads/master
| 2021-03-19T16:50:28.890764
| 2016-12-07T14:33:22
| 2016-12-07T14:33:22
| 76,059,427
| 0
| 0
| null | 2016-12-09T18:15:56
| 2016-12-09T18:15:56
| null |
UTF-8
|
Python
| false
| false
| 1,766
|
py
|
from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem
from easy_table import EasyTable
import time
class MetricsTable(EasyTable):
def __init__(self, clipboard):
super(MetricsTable, self).__init__(clipboard)
self.reInit()
def reInit(self):
self.setSelectionBehavior(QTableWidget.SelectRows)
self.setColumnCount(2)
headers = list()
headers.append("Name")
headers.append("Value")
for i, header in enumerate(headers):
header_item = QTableWidgetItem()
header_item.setText(header)
self.setHorizontalHeaderItem(i, header_item)
def addMetrics(self, metrics):
for metric in metrics:
self.addMetric(metric)
self.resizeColumnsToContents()
def addMetric(self, metric):
row_index = self.rowCount()
self.insertRow(self.rowCount())
metric_name = QTableWidgetItem()
metric_value = QTableWidgetItem()
metric_timestamp = QTableWidgetItem()
metric_name.setText(metric['key'])
metric_value.setText(str(metric.get('value', '')))
self.setItem(row_index, 0, metric_name)
self.setItem(row_index, 1, metric_value)
if(metric.get('timestamp', None) is not None):
if self.columnCount() < 3:
self.setColumnCount(3)
timestamp_header = QTableWidgetItem()
timestamp_header.setText("Timestamp")
self.setHorizontalHeaderItem(2, timestamp_header)
timestamp = time.strftime(
'%Y-%m-%d %H:%M:%S',
time.localtime(metric['timestamp'] / 1000))
metric_timestamp.setText(timestamp)
self.setItem(row_index, 2, metric_timestamp)
|
[
"keith.stouffer@bluemedora.com"
] |
keith.stouffer@bluemedora.com
|
116eda60e59049793ffcbcd9626a083b67de10f2
|
77a2a754ed5f120b05082a41926214352c92397e
|
/Basic18_function_2.py
|
1b82683b287f262e1850eb7aaf83d58c21f3c1a3
|
[] |
no_license
|
apotree/Python
|
dd3bfd91d6f1efa2248cc565ac02912d5203c732
|
de370f4f56dd5954650fb1b52558c7b4b82315cd
|
refs/heads/master
| 2020-11-26T13:25:05.698994
| 2020-01-13T15:02:51
| 2020-01-13T15:02:51
| 229,085,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
import re
import sys
from urllib.request import urlopen
def urlopenRead():
f = urlopen("https://www.hanbit.co.kr/store/books/full_book_list.html")
bytes_content = f.read()
return bytes_content
def urlprocess(bytes_content):
scanned_text = bytes_content[:1024].decode("ascii", errors="replace")
match = re.search(r'charset=["\']?([\w-]+)', scanned_text)
if match:
encoding = match.group(1)
else:
encoding = "utf-8"
return encoding
def urlPrint(encoding, bytes_content):
print("encoding : ", encoding, file=sys.stderr)
text = bytes_content.decode(encoding)
print(text)
def main():
bytes_content = urlopenRead()
encoding = urlprocess(bytes_content)
urlPrint(encoding, bytes_content)
main()
|
[
"noreply@github.com"
] |
apotree.noreply@github.com
|
116c19a853a43a404640ba3646429f4fd9880b0a
|
86f39c676edc656988317149b9ac2d28f0337462
|
/OceanColor/utils.py
|
f5c7bcac31626cc5b0b2e67fd7b0552b55c97e60
|
[
"BSD-3-Clause"
] |
permissive
|
castelao/OceanColor
|
5813f22de58a49688c2d0a64c6a0c3d35c8dec98
|
343de1c537f326d90f350dbaef3712781316b33b
|
refs/heads/main
| 2023-05-24T00:29:18.119622
| 2022-07-07T15:04:54
| 2022-07-07T15:04:54
| 318,619,654
| 13
| 1
|
NOASSERTION
| 2022-11-21T18:11:03
| 2020-12-04T19:46:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,079
|
py
|
"""Miscellaneous utils such as flag mask decoding
"""
import logging
import os
module_logger = logging.getLogger("OceanColor.utils")
def oceancolorrc():
"""Path to custom configuration
Define the path to the user custom configuration, such as EarthData's
username to be used.
The default path is at the user's home directory .config/oceancolor, but
that can be modified by defining an environment variable OCEANCOLOR_DIR.
Example
-------
>>> import os.path
>>> print(os.path.join(oceancolorrc(), 'main.ini'))
/Users/guilherme/.config/oceancolor/main.ini
"""
path = os.path.expanduser(os.getenv("OCEANCOLOR_DIR", "~/.config/oceancolor"))
return path
def decode_L2_flagmask(flag_mask: int):
"""Decode Ocean Color flag mask
Some Ocean Color products use bitwise quality flags. This function converts
those bits parsed as an integer into a list of flag labels. For instance,
the binary 0010 values 2 in decimal and means that the second flag (LAND)
is active.
Parameters
----------
flag_mask : int
The bitwise flag parsed as uint
Returns
-------
list of str
List of flags activated byt the given `flag_mask`
References
----------
Flags reference:
https://oceancolor.gsfc.nasa.gov/atbd/ocl2flags/
Examples
--------
>>> decode_L2_flagmask(1073741828)
['PRODWARN', 'PRODFAIL']
Notes
-----
Some key flags used for L3 products:
- ATMFAIL: Atmospheric correction failure
- LAND: Pixel is over land
- HIGLINT: Sunglint: reflectance exceeds threshold
- HILT: Observed radiance very high or saturated
- HISATZEN: Sensor view zenith angle exceeds threshold
- STRAYLIGHT: Probable stray light contamination
- CLDICE: Probable cloud or ice contamination
- COCCOLITH: Coccolithophores detected
- HISOLZEN: Solar zenith exceeds threshold
- LOWLW: Very low water-leaving radiance
- CHLFAIL: Chlorophyll algorithm failure
- NAVWARN: Navigation quality is suspect
- MAXAERITER: Maximum iterations reached for NIR iteration
- CHLWARN: Chlorophyll out-of-bounds
- ATMWARN: Atmospheric correction is suspect
- NAVFAIL: Navigation failure
- HIPOL: High degree of polarization determined
"""
# Full labels list and values
# flag_masks = 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, 67108864, 134217728, 268435456, 536870912, 1073741824, -2147483648 ;
flag_labels = "ATMFAIL LAND PRODWARN HIGLINT HILT HISATZEN COASTZ SPARE STRAYLIGHT CLDICE COCCOLITH TURBIDW HISOLZEN SPARE LOWLW CHLFAIL NAVWARN ABSAER SPARE MAXAERITER MODGLINT CHLWARN ATMWARN SPARE SEAICE NAVFAIL FILTER SPARE BOWTIEDEL HIPOL PRODFAIL SPARE" ;
flag_labels = flag_labels.split()
flags = []
for i, b in enumerate(bin(flag_mask)[:1:-1]):
if b == '1':
flags.append(flag_labels[i])
return flags
|
[
"noreply@github.com"
] |
castelao.noreply@github.com
|
08eeac8944557c1ac603baffeb311e2264f589bc
|
20cb496059c08c2cf657a96079367c18bc3d1f17
|
/Exrop.py
|
8e7e545e6a553a35dab27af87cf893c6307a1794
|
[] |
no_license
|
idkwim/exrop
|
3310b5d16a76a64dbf8314364895c3568d876f60
|
aec69a955a4a419bb25c3708ee96813d6f427560
|
refs/heads/master
| 2020-12-22T18:22:53.332438
| 2020-01-27T00:03:07
| 2020-01-27T00:03:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,557
|
py
|
from ChainBuilder import ChainBuilder
from RopChain import RopChain
from os import popen
def parseRopGadget(filename):
cmd = 'ROPgadget --binary {} --only "pop|xchg|add|sub|xor|mov|ret|jmp|call|leave" --dump | tail -n +3 | head -n -2'.format(filename)
with popen(cmd) as fp:
sample_gadgets = dict()
datas = fp.read().strip().split("\n")
datas.sort(key=len) # sort by length
for data in datas:
addr,insns = data.split(" : ")
insstr,opcode_hex = insns.split(" // ")
opcode = bytes.fromhex(opcode_hex)
addr = int(addr, 16)
sample_gadgets[addr] = (insstr,opcode)
return sample_gadgets
class Exrop(object):
def __init__(self, binary):
self.binary = binary
self.chain_builder = ChainBuilder()
def find_gadgets(self, cache=False):
if cache:
fcname = "./{}.exrop_cache".format(self.binary.replace("/", "_"))
try:
with open(fcname, "rb") as fc:
objpic = fc.read()
self.chain_builder.load_analyzed_gadgets(objpic)
return
except FileNotFoundError:
fc = open(fcname, "wb")
gadgets = parseRopGadget(self.binary)
self.chain_builder.load_list_gadget_string(gadgets)
self.chain_builder.analyzeAll()
if cache:
objpic = self.chain_builder.save_analyzed_gadgets()
fc.write(objpic)
fc.close()
def load_raw_gadgets(self, gadgets):
pass
def stack_pivot(self, addr, avoid_char=None):
self.chain_builder.solve_pivot(addr, avoid_char)
ropchain = self.chain_builder.build_chain()
return ropchain
def set_regs(self, regs, next_call=None, avoid_char=None):
self.chain_builder.set_regs(regs)
self.chain_builder.solve_chain(avoid_char)
ropchain = self.chain_builder.build_chain(next_call)
return ropchain
def set_writes(self, writes, next_call=None, avoid_char=None):
self.chain_builder.set_writes(writes)
self.chain_builder.solve_chain_write(avoid_char=avoid_char)
ropchain = self.chain_builder.build_chain(next_call)
return ropchain
def set_string(self, strs, next_call=None):
BSIZE = 8
writes = dict()
for addr,sstr in strs.items():
tmpaddr = 0
for i in range(0, len(sstr), BSIZE):
tmpstr = int.from_bytes(bytes(sstr[i:i+BSIZE]+"\x00", 'utf-8'), 'little')
writes[addr+tmpaddr] = tmpstr
tmpaddr += BSIZE
return self.set_writes(writes, next_call)
def func_call(self, func_addr, args, rwaddr=None, convention="sysv"):
order_reg = ["rdi", "rsi", "rdx", "rcx", "r8", "r9"]
regsx86_64 = ["rax", "rbx", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"]
regs = dict()
ropchain = RopChain()
for i in range(len(args)):
arg = args[i]
if isinstance(arg, str) and arg not in regsx86_64:
assert rwaddr, "Please define read write addr"
arg += "\x00"
chain = self.set_string({rwaddr:arg})
ropchain.merge_ropchain(chain)
regs[order_reg[i]] = rwaddr
rwaddr += len(arg)
continue
regs[order_reg[i]] = arg
chain = self.set_regs(regs, func_addr)
ropchain.merge_ropchain(chain)
return ropchain
|
[
"n0psledbyte@gmail.com"
] |
n0psledbyte@gmail.com
|
b4b84e70e368be74afd3f48be1eb91d8890fec44
|
f5da6740463adf52e847c99976099509120b3c1e
|
/write_funcs.py
|
d85ce2b5c4d830b526e7f0f77bd7150b065113c4
|
[] |
no_license
|
Jamespage6978/Coffee_Log
|
17a1c5718438db809936d360711924d3f48da651
|
eea80227822a46e1306d0854929c4b604cde1e18
|
refs/heads/master
| 2020-12-14T23:00:41.239201
| 2020-01-24T14:17:16
| 2020-01-24T14:17:16
| 234,901,823
| 1
| 0
| null | 2020-01-24T14:05:28
| 2020-01-19T13:06:12
|
Python
|
UTF-8
|
Python
| false
| false
| 756
|
py
|
from config import configuration as config
from read_funcs import log_ToDict
#
import os, json, pandas
def write_html():
Log_df = log_ToDict()
pandas.set_option('colheader_justify', 'center')
if config['Switches']['location'] == 'local':
path = "Coffee_Log.html"#f"{config['Local']['location']}Coffe_Log.html"
html_string = '''<html>
<head><title>HTML Pandas Dataframe with CSS</title></head>
<link rel="stylesheet" type="text/css" href="static\df_style.css"/>
<body>
{table}
</body>
</html>.
'''
with open(path,'w') as outfile:
outfile.write(html_string.format(table=Log_df.to_html(classes='mystyle')))
#outfile.write(Log_df.to_html())
if __name__ == "__main__":
write_html()
|
[
"JPAGE8@jaguarlandrover.com"
] |
JPAGE8@jaguarlandrover.com
|
4d391c601c627eb4ae4a91d2a90a2777a56b3591
|
96ce221c32910f0c0ea327fa33c36f2e5472f33b
|
/chrombpnet/evaluation/invivo_footprints/run_tfmodisco.py
|
e0d5da4557c51043cd29edc7f7644353da297d3b
|
[
"MIT"
] |
permissive
|
kundajelab/chrombpnet
|
ba6eaac10ccb63bdf4831dfb96b10fd9220a5ec7
|
d3684388512ad2248ac1c2d1dfd36167b634e816
|
refs/heads/master
| 2023-08-22T01:47:11.764016
| 2023-07-25T18:15:34
| 2023-07-25T18:15:34
| 388,665,623
| 48
| 14
|
MIT
| 2023-08-28T06:21:21
| 2021-07-23T03:28:33
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 9,973
|
py
|
import h5py
import os
import numpy as np
import tqdm
from collections import OrderedDict
import modisco
import click
import pickle as pkl
import modisco.visualization
import deepdish
def import_shap_scores_part2(
shap_scores_hdf5, peak_table, center_cut_size=None, chrom_set=None,
remove_non_acgt=True
):
scores = deepdish.io.load(shap_scores_hdf5)
#open_file = open(shap_scores_hdf5, 'rb')
#score_dict = pkl.load(open_file)
flank=center_cut_size
hyp_impscores = []
impscores = []
onehot_seqs = []
coords = []
score_type="profile_shap"
center = scores['shap']['seq'].shape[-1]//2
start = center - flank
end = center + flank
for i in scores['shap']['seq']:
hyp_impscores.append(i[:,start:end].transpose())
for i in scores['projected_shap']['seq']:
impscores.append(i[:,start:end].transpose())
for i in scores['raw']['seq']:
onehot_seqs.append(i[:,start:end].transpose())
assert(scores['shap']['seq'].shape[0]==peak_table.shape[0])
#print(peak_table.head())
for i in range(peak_table.shape[0]):
coords.append(np.array([peak_table.loc[i,"peak_chrom"], int(peak_table.loc[i,"peak_start"].item()), int(peak_table.loc[i,"peak_end"].item())], dtype=object))
#print(coords[-1])
hyp_impscores = np.array(hyp_impscores)
onehot_seqs = np.array(onehot_seqs)
impscores = np.array(impscores)
coords=np.array(coords)
return hyp_impscores, impscores, onehot_seqs, coords
def import_shap_scores(
shap_scores_hdf5, hyp_score_key, center_cut_size=None, chrom_set=None,
remove_non_acgt=True
):
"""
Imports the SHAP scores generated/saved by `make_shap_scores.py`, and
returns the hypothetical importance scores, actual importance scores, and
one-hot encoded sequences.
Arguments:
`shap_scores_hdf5`: path to HDF5 of SHAP scores generated by
`make_shap_scores.py`
`hyp_score_key`: the key that specifies hypothetical importance scores
in `shap_scores_hdf5`
`center_cut_size`: if specified, keeps only scores/sequences of this
centered length; by default uses the entire length given in the
SHAP scores
`chrom_set`: list of chromosomes to restrict to; if None, use all
chromosomes available in the SHAP scores
`remove_non_acgt`: if True, remove any sequences (after being cut down
to size) which have a base other than ACGT (e.g. N)
Returns the hypothetical importance scores, actual importance scores,
corresponding one-hot encoded input sequences, and coordinates. The first
three are N x L x 4 arrays, and the last is an N x 3 object array.
where L is the cut size (or default size).
"""
score_reader = h5py.File(shap_scores_hdf5, "r")
# For defining shapes
num_seqs, input_length, _ = score_reader[hyp_score_key].shape
if not center_cut_size:
center_cut_size = input_length
cut_start = (input_length // 2) - (center_cut_size // 2)
cut_end = cut_start + center_cut_size
# For batching up data loading
batch_size = min(1000, num_seqs)
num_batches = int(np.ceil(num_seqs / batch_size))
# Read in hypothetical scores and input sequences in batches
hyp_scores = np.empty((num_seqs, center_cut_size, 4))
act_scores = np.empty((num_seqs, center_cut_size, 4))
one_hot_seqs = np.empty((num_seqs, center_cut_size, 4))
coords = np.empty((num_seqs, 3), dtype=object)
for i in tqdm.trange(num_batches, desc="Importing SHAP scores"):
batch_slice = slice(i * batch_size, (i + 1) * batch_size)
hyp_score_batch = score_reader[hyp_score_key][
batch_slice, cut_start:cut_end
]
one_hot_seq_batch = score_reader["input_seqs"][
batch_slice, cut_start:cut_end
]
chrom_batch = score_reader["coords_chrom"][batch_slice].astype(str)
start_batch = score_reader["coords_start"][batch_slice]
end_batch = score_reader["coords_end"][batch_slice]
hyp_scores[batch_slice] = hyp_score_batch
one_hot_seqs[batch_slice] = one_hot_seq_batch
act_scores[batch_slice] = hyp_score_batch * one_hot_seq_batch
coords[batch_slice, 0] = chrom_batch
coords[batch_slice, 1] = start_batch
coords[batch_slice, 2] = end_batch
score_reader.close()
if chrom_set:
mask = np.isin(coords[:, 0], chrom_set)
hyp_scores, act_scores, one_hot_seqs, coords = \
hyp_scores[mask], act_scores[mask], one_hot_seqs[mask], coords[mask]
if remove_non_acgt:
# Remove any examples in which the input sequence is not all ACGT
mask = np.sum(one_hot_seqs, axis=(1, 2)) == center_cut_size
hyp_scores, act_scores, one_hot_seqs, coords = \
hyp_scores[mask], act_scores[mask], one_hot_seqs[mask], coords[mask]
return hyp_scores, act_scores, one_hot_seqs, coords
def import_tfmodisco_results(
tfm_results_path, hyp_scores, one_hot_seqs, center_cut_size
):
"""
Imports the TF-MoDISco results object.
Arguments:
`tfm_results_path`: path to HDF5 containing TF-MoDISco results
`hyp_scores`: hypothetical importance scores used for this run, an
N x L x 4 array
`one_hot_seqs`: input sequences used for this run, an N x L x 4 array
`center_cut_size`: centered cut size of SHAP scores used; the input
sequences may already have length L equal to this, or if they are
longer the sequences will be cut
Although this function is not used to run TF-MoDISco in this script, it can
be useful for importing the results later (in conjuction with
`import_shap_scores`).
"""
assert hyp_scores.shape == one_hot_seqs.shape
input_length = hyp_scores.shape[1]
if input_length != center_cut_size:
# Everything not cut to `center_cut_size`
assert input_length > center_cut_size
cut_start = (input_length // 2) - (center_cut_size // 2)
cut_end = cut_start + center_cut_size
hyp_scores = hyp_scores[:, cut_start:cut_end]
one_hot_seqs = one_hot_seqs[:, cut_start:cut_end]
act_scores = hyp_scores * one_hot_seqs
track_set = modisco.tfmodisco_workflow.workflow.prep_track_set(
task_names=["task0"],
contrib_scores={"task0": act_scores},
hypothetical_contribs={"task0": hyp_scores},
one_hot=one_hot_seqs
)
with h5py.File(tfm_results_path,"r") as f:
return modisco.tfmodisco_workflow.workflow.TfModiscoResults.from_hdf5(f, track_set=track_set)
@click.command()
@click.argument("shap_scores_hdf5", nargs=1)
@click.option(
"--hyp-score-key", "-k", default="hyp_scores",
help="Key in `shap_scores_hdf5` that corresponds to the hypothetical importance scores; defaults to 'hyp_scores'"
)
@click.option(
"--outfile", "-o", required=True,
help="Where to store the HDF5 with TF-MoDISco results"
)
@click.option(
"--seqlet-outfile", "-s", default=None,
help="If specified, save the seqlets here in a FASTA file"
)
@click.option(
"--plot-save-dir", "-p", default=None,
help="If specified, save the plots here instead of CWD/figures"
)
@click.option(
"--center-cut-size", "-c", default=400,
help="Size of input sequences to compute explanations for; defaults to 400"
)
@click.option(
"--chrom-set", "-r", default=None,
help="A comma-separated list of chromosomes to limit TF-MoDISco to; by default uses all available in the SHAP scores"
)
def main(
shap_scores_hdf5, hyp_score_key, outfile, seqlet_outfile, plot_save_dir,
center_cut_size, chrom_set
):
"""
Takes the set of importance scores generated by `make_shap_scores.py` and
runs TF-MoDISco on them.
"""
if chrom_set:
chrom_set = chrom_set.split(",")
hyp_scores, act_scores, input_seqs, _ = import_shap_scores(
shap_scores_hdf5, hyp_score_key, center_cut_size, chrom_set
)
task_to_hyp_scores, task_to_act_scores = OrderedDict(), OrderedDict()
task_to_hyp_scores["task0"] = hyp_scores
task_to_act_scores["task0"] = act_scores
# Construct workflow pipeline
tfm_workflow = modisco.tfmodisco_workflow.workflow.TfModiscoWorkflow(
sliding_window_size=21,
flank_size=10,
target_seqlet_fdr=0.05,
seqlets_to_patterns_factory=modisco.tfmodisco_workflow.seqlets_to_patterns.TfModiscoSeqletsToPatternsFactory(
embedder_factory=modisco.seqlet_embedding.advanced_gapped_kmer.AdvancedGappedKmerEmbedderFactory(),
trim_to_window_size=30,
initial_flank_to_add=10,
final_min_cluster_size=30
)
)
# Move to output directory to do work
cwd = os.getcwd()
os.makedirs(os.path.dirname(outfile), exist_ok=True)
os.chdir(os.path.dirname(outfile))
tfm_results = tfm_workflow(
task_names=list(task_to_act_scores.keys()),
contrib_scores=task_to_act_scores,
hypothetical_contribs=task_to_hyp_scores,
one_hot=input_seqs,
plot_save_dir=plot_save_dir
)
os.chdir(cwd)
print("Saving results to %s" % outfile)
with h5py.File(outfile, "w") as f:
tfm_results.save_hdf5(f)
if seqlet_outfile:
print("Saving seqlets to %s" % seqlet_outfile)
seqlets = \
tfm_results.metacluster_idx_to_submetacluster_results[0].seqlets
bases = np.array(["A", "C", "G", "T"])
with open(seqlet_outfile, "w") as f:
for seqlet in seqlets:
sequence = "".join(
bases[np.argmax(seqlet["sequence"].fwd, axis=-1)]
)
example_index = seqlet.coor.example_idx
start, end = seqlet.coor.start, seqlet.coor.end
f.write(">example%d:%d-%d\n" % (example_index, start, end))
f.write(sequence + "\n")
if __name__ == "__main__":
main()
|
[
"annashcherbina@gmail.com"
] |
annashcherbina@gmail.com
|
dd75fec5a435ab11787e0058c7914dcc869756a6
|
4e8876d7b29cf9fb05849da77553b8a7e3783bdc
|
/src/plugins/processing/algs/gdal/aspect.py
|
3296c51fcd427c594057708043cbd596f6888b7f
|
[] |
no_license
|
hydrology-tep/hep-qgis-plugin-lite
|
48477f504b6fc1a9a9446c7c7f5666f4b2ccfee7
|
781cbaa1b3e9331de6741dd44a22322048ab176c
|
refs/heads/master
| 2021-03-27T17:01:18.284421
| 2018-06-27T12:09:58
| 2018-06-27T12:09:58
| 70,825,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,875
|
py
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
aspect.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class aspect(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
ZEVENBERGEN = 'ZEVENBERGEN'
TRIG_ANGLE = 'TRIG_ANGLE'
ZERO_FLAT = 'ZERO_FLAT'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Aspect')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Analysis')
self.addParameter(ParameterRaster(self.INPUT, self.tr('Input layer')))
self.addParameter(ParameterNumber(
self.BAND, self.tr('Band number'), 1, 99, 1))
self.addParameter(ParameterBoolean(
self.COMPUTE_EDGES, self.tr('Compute edges'), False))
self.addParameter(ParameterBoolean(self.ZEVENBERGEN,
self.tr("Use Zevenbergen&Thorne formula (instead of the Horn's one)"),
False))
self.addParameter(ParameterBoolean(self.TRIG_ANGLE,
self.tr('Return trigonometric angle (instead of azimuth)'), False))
self.addParameter(ParameterBoolean(self.ZERO_FLAT,
self.tr('Return 0 for flat (instead of -9999)'), False))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Aspect')))
def getConsoleCommands(self):
arguments = ['aspect']
arguments.append(unicode(self.getParameterValue(self.INPUT)))
output = unicode(self.getOutputValue(self.OUTPUT))
arguments.append(output)
arguments.append('-of')
arguments.append(GdalUtils.getFormatShortNameFromFilename(output))
arguments.append('-b')
arguments.append(unicode(self.getParameterValue(self.BAND)))
if self.getParameterValue(self.COMPUTE_EDGES):
arguments.append('-compute_edges')
if self.getParameterValue(self.ZEVENBERGEN):
arguments.append('-alg')
arguments.append('ZevenbergenThorne')
if self.getParameterValue(self.TRIG_ANGLE):
arguments.append('-trigonometric')
if self.getParameterValue(self.ZERO_FLAT):
arguments.append('-zero_for_flat')
return ['gdaldem', GdalUtils.escapeAndJoin(arguments)]
|
[
"joaa@localhost.localdomain"
] |
joaa@localhost.localdomain
|
65d996dee76c2c05d9fbf3c120fa1a74bde4c2e1
|
9805edf2b923c74cf72a3cfb4c2c712255256f15
|
/python/075_sort_colors.py
|
73cf05fef89358bf173c0d3d70846bc4b36c98cd
|
[
"MIT"
] |
permissive
|
jixinfeng/leetcode-soln
|
5b28e49c2879cdff41c608fc03628498939b0e99
|
24cf8d5f1831e838ea99f50ce4d8f048bd46c136
|
refs/heads/master
| 2022-10-12T17:02:53.329565
| 2022-10-06T03:21:56
| 2022-10-06T03:21:56
| 69,371,757
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,058
|
py
|
"""
Given an array with n objects colored red, white or blue, sort them so that
objects of the same color are adjacent, with the colors in the order red, white
and blue.
Here, we will use the integers 0, 1, and 2 to represent the color red, white,
and blue respectively.
Note:
You are not suppose to use the library's sort function for this problem.
Follow up:
A rather straight forward solution is a two-pass algorithm using counting
sort. First, iterate the array counting number of 0's, 1's, and 2's, then
overwrite array with total number of 0's, then 1's and followed by 2's.
Could you come up with an one-pass algorithm using only constant space?
"""
class Solution(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
count = collections.Counter(nums)
loc = 0
for color in count:
for i in range(count[color]):
nums[loc] = color
loc += 1
|
[
"ufjfeng@users.noreply.github.com"
] |
ufjfeng@users.noreply.github.com
|
9fa62a43186e25880e889b1f0f912ed7aa4dd2c1
|
21d5b313142910c7e00e97f99cf8257fe370007c
|
/client_raspberrypi/test_only/test.py
|
8c95084bf4dce29115f97583575b4a87d648b1a5
|
[] |
no_license
|
pym7857/self-driving-rc-car
|
1d08aa8c3da9723577ac5d7ac1cab94d5bade748
|
05f022552d729712dc9df762079d0a1f5105acf9
|
refs/heads/master
| 2022-12-16T15:38:27.487287
| 2020-08-27T03:02:53
| 2020-08-27T03:02:53
| 274,446,475
| 1
| 0
| null | 2020-06-23T15:48:19
| 2020-06-23T15:48:18
| null |
UTF-8
|
Python
| false
| false
| 3,783
|
py
|
import socket
import cv2
import numpy
from queue import Queue
from _thread import *
from time import *
import RPi.GPIO as GPIO
from time import sleep
# ====================== dist sensor ======================
GPIO.setmode(GPIO.BCM)
GPIO_TRIGGER = 19
GPIO_ECHO = 26
print("Ultrasonic Distance Measurement")
# 초음파를 내보낼 트리거 핀은 출력 모드로, 반사파를 수신할 에코 피은 입력 모드로 설정한다.
GPIO.setup(GPIO_TRIGGER,GPIO.OUT)
GPIO.setup(GPIO_ECHO,GPIO.IN)
# ====================== dist sensor ======================
# Motor state
STOP = 0
FORWARD = 1
# Motor channel
CHLU = 0
CHLD = 1
CHRU = 2
CHRD = 3
# Drive state
S = 0
F = 1
B = 2
FR = 3
FL = 4
FS = 5
# PIN input output setting
OUTPUT = 1
INPUT = 0
# PIN setting
HIGH = 1
LOW = 0
# Real PIN define
# PWM PIN(BCM PIN)
ENLD = 5
ENRU = 24
ENRD = 25
ENLU = 6
# GPIO PIN
IN1 = 16
IN2 = 12# Left Down
IN3 = 4
IN4 = 17 # Right Up
IN5 = 21
IN6 = 20 # Right Down
IN7 = 27
IN8 = 22 # Left Up
# GPIO Library Setting
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# PIN setting algorithm
def setPinConfig(EN, INF, INO): # EN, OFF, ON
GPIO.setup(EN, GPIO.OUT)
GPIO.setup(INF, GPIO.OUT)
GPIO.setup(INO, GPIO.OUT)
# Activate PWM in 100khz
pwm = GPIO.PWM(EN, 100)
# First, PWM is stop
pwm.start(0)
return pwm
# Motor control algorithm
def setMotorControl(pwm, INO, INF, speed, stat):
# Motor speed control to PWM
pwm.ChangeDutyCycle(speed)
# Forward
if stat == FORWARD:
GPIO.output(INO, HIGH)
GPIO.output(INF, LOW)
# STOP
elif stat == STOP:
GPIO.output(INO, LOW)
GPIO.output(INF, LOW)
# Motor control easily
def setMotor(ch, speed, stat):
if ch == CHLD:
setMotorControl(pwmLD, IN1, IN2, speed, stat)
elif ch == CHRU:
setMotorControl(pwmRU, IN3, IN4, speed, stat)
elif ch == CHRD:
setMotorControl(pwmRD, IN5, IN6, speed, stat)
elif ch == CHLU:
setMotorControl(pwmLU, IN7, IN8, speed, stat)
# Motor Pin Setting(global var)
pwmLD = setPinConfig(ENLD, IN1, IN2) #in 100Hz
pwmRU = setPinConfig(ENRU, IN3, IN4) #in 100Hz
pwmRD = setPinConfig(ENRD, IN5, IN6) #in 100Hz
pwmLU = setPinConfig(ENLU, IN7, IN8) #in 100Hz
#print('ENLU, ENLD, ENRU, ENRD = ',ENLU, ENLD, ENRU, ENRD)
# Drive algorithm
def setdrive(drv,T):
if drv == S:
setMotor(CHLU, 80, STOP) #setSpeed=80
setMotor(CHLD, 80, STOP)
setMotor(CHRU, 80, STOP)
setMotor(CHRD, 80, STOP)
sleep(T)
elif drv == F:
setMotor(CHLU, 60, FORWARD)
setMotor(CHLD, 60, FORWARD)
setMotor(CHRU, 60, FORWARD)
setMotor(CHRD, 60, FORWARD)
sleep(T)
elif drv == FL:
setMotor(CHLU, 50, STOP)
setMotor(CHLD, 30, FORWARD)
setMotor(CHRU, 65, FORWARD)
setMotor(CHRD, 65, FORWARD)
sleep(T)
elif drv == FR:
setMotor(CHLU, 65, FORWARD)
setMotor(CHLD, 65, FORWARD)
setMotor(CHRU, 50, STOP)
setMotor(CHRD, 30, FORWARD)
sleep(T)
elif drv == FS:
setMotor(CHLU, 45, FORWARD)
setMotor(CHLD, 45, FORWARD)
setMotor(CHRU, 45, FORWARD)
setMotor(CHRD, 45, FORWARD)
sleep(T)
cap = cv2.VideoCapture(0) # 960x540
#cap = cv2.VideoCapture('testvideo.mp4') # 1920x1080 -> 에러
def threaded():
setdrive(F,5)
setdrive(S,3)
setdrive(F,5)
start = time()
start_new_thread(threaded,())
while(cap.isOpened()):
if(time() - start > 5 and time() - start < 8):
print('human')
else:
print('None')
ret,image = cap.read()
cv2.imshow('image',image) # 결과 이미지 출력
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
[
"49298852+yudazzi@users.noreply.github.com"
] |
49298852+yudazzi@users.noreply.github.com
|
1dd28a6a0e85a2ae3e21d709c6bfd648c22531c2
|
8cfc0164a94a4beae3add41a28b0eb1dcbbd6217
|
/tests/test_update_status.py
|
84b578271c5667921aa86fceb47b9fbf3d5fa5ab
|
[
"MIT"
] |
permissive
|
voxbone-workshop/voxbone-workshop-cachet-uptime-robot
|
f2fa202ffa23626d337908f033e7ad9d3c9a389e
|
bbc414d2393f2f5c5733478d887b5e57b8106577
|
refs/heads/master
| 2022-12-17T04:44:55.714904
| 2020-09-24T11:15:00
| 2020-09-24T11:15:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,734
|
py
|
import unittest.mock as mock
import pytest
import update_status
class TestMonitor(object):
def test_send_data_to_cachet_updates_the_component_status(self, monitor, uptimerobot_monitor):
website_config = monitor.monitor_list[uptimerobot_monitor['id']]
with mock.patch('update_status.CachetHq') as cachet:
monitor.sync_metric = lambda x, y: None
monitor.send_data_to_cachet(uptimerobot_monitor)
cachet().update_component.assert_called_with(
website_config['component_id'],
int(uptimerobot_monitor['status'])
)
@pytest.mark.skip
def test_send_data_to_cachet_updates_data_metrics(self, monitor, uptimerobot_monitor):
website_config = monitor.monitor_list[uptimerobot_monitor['id']]
with mock.patch('update_status.CachetHq') as cachet:
monitor.sync_metric = lambda x, y: None
monitor.send_data_to_cachet(uptimerobot_monitor)
cachet().set_data_metrics.assert_called_with(
uptimerobot_monitor['custom_uptime_ratio'],
mock.ANY,
website_config['metric_id']
)
def test_sync_metric(self, monitor, cachet, uptimerobot_monitor, cachet_metric):
future_date = 999999999999
cachet_metric['created_at'] = '2017-01-01 00:00:00'
cachet_metric_unixtime = 1483228800
cachet_mock = mock.create_autospec(cachet)
cachet_mock.get_last_metric_point.return_value = cachet_metric
assert len(uptimerobot_monitor['response_times']) >= 3, \
'We need at least 3 response times to run the tests'
uptimerobot_monitor['response_times'][-1]['datetime'] = future_date
uptimerobot_monitor['response_times'][-2]['datetime'] = future_date
monitor.sync_metric(uptimerobot_monitor, cachet_mock)
expected_response_times = [
x for x in uptimerobot_monitor['response_times']
if x['datetime'] > cachet_metric_unixtime
]
assert cachet_mock.set_data_metrics.call_count == len(expected_response_times)
for response_time in expected_response_times:
cachet_mock.set_data_metrics.assert_any_call(
response_time['value'],
response_time['datetime'],
mock.ANY
)
@pytest.fixture
def monitor_list():
return {
'6516846': {
'cachet_api_key': 'CACHET_API_KEY',
'cachet_url': 'http://status.example.org',
'metric_id': '1',
'component_id': '1',
},
}
@pytest.fixture
def cachet_metric():
return {
'id': 1,
'metric_id': 1,
'value': 100,
'created_at': '2017-08-25 17:17:14',
'updated_at': '2017-08-25 17:17:14',
'counter': 1,
'calculated_value': 100,
}
@pytest.fixture
def uptimerobot_monitor(monitor_list):
monitors_ids = [m for m in monitor_list.keys()]
id = monitors_ids[0]
return {
'url': 'monitor_url',
'friendly_name': 'friendly_name',
'id': id,
'status': '2', # UP,
'custom_uptime_ratio': '100',
'response_times': [
{'datetime': 1, 'value': 609},
{'datetime': 2, 'value': 625},
{'datetime': 3, 'value': 687},
{'datetime': 4, 'value': 750},
{'datetime': 5, 'value': 750},
{'datetime': 6, 'value': 922},
]
}
@pytest.fixture
def monitor(monitor_list):
api_key = 'UPTIME_ROBOT_API_KEY'
return update_status.Monitor(monitor_list, api_key)
@pytest.fixture
def cachet():
return update_status.CachetHq(
cachet_api_key='CACHET_API_KEY',
cachet_url='CACHET_URL'
)
|
[
"info@dutchmasterserver.nl"
] |
info@dutchmasterserver.nl
|
a9e138243d1bba91d7e0b6494adc04abef165626
|
9d5ae8cc5f53f5aee7247be69142d9118769d395
|
/409. Longest Palindrome.py
|
d0591fb2730737228ef30913c8ada08dc22b0d14
|
[] |
no_license
|
BITMystery/leetcode-journey
|
d4c93319bb555a7e47e62b8b974a2f77578bc760
|
616939d1599b5a135747b0c4dd1f989974835f40
|
refs/heads/master
| 2020-05-24T08:15:30.207996
| 2017-10-21T06:33:17
| 2017-10-21T06:33:17
| 84,839,304
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
import collections
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
c = collections.Counter(s)
flag = any([c[letter] % 2 == 1 for letter in c]) # there is a letter appears odd times
res = 0
for letter in c:
res += c[letter] if c[letter] % 2 == 0 else c[letter] - 1
return res + 1 if flag else res
|
[
"noreply@github.com"
] |
BITMystery.noreply@github.com
|
0f446d9f7a66f9e41e806f1ef156a722ba7f5b9d
|
80ec8e8134ce879cb8b90211f14022de722d303a
|
/data_load_util.py
|
3305877806ce45456f1e9d8cce1b604241e21130
|
[] |
no_license
|
riturajkush/ImageCaptionGenerator
|
a4ed34fe75ba118dea8d91bae6ae669a49429edb
|
bb343fe0c1cb53e65d48617d0adeb474f0559536
|
refs/heads/master
| 2021-09-18T14:25:52.054496
| 2018-07-14T23:17:07
| 2018-07-14T23:17:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,379
|
py
|
from numpy import array
from pickle import load
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input, Dense, LSTM, Embedding, Dropout
from keras.layers.merge import add
from nltk.translate.bleu_score import corpus_bleu
from numpy import argmax
from keras.applications.vgg16 import VGG16
from keras.preprocessing.image import load_img, img_to_array
from keras.applications.vgg16 import preprocess_input
# Load document
def load_doc(filename):
# Read only mode
file = open(filename, 'r')
text = file.read()
file.close()
return text
# Load Dataset
def load_set(filename):
doc = load_doc(filename)
dataset = list()
for line in doc.split('\n'):
# skip empty lines
if len(line) < 1:
continue
# get the image identifier
identifier = line.split('.')[0]
dataset.append(identifier)
return set(dataset)
# Load Cleaned Descriptions
def load_clean_descriptions(filename, dataset):
doc = load_doc(filename)
descriptions = dict()
for line in doc.split('\n'):
# Split by whitespace
tokens = line.split()
# Split ID and Description
image_id, image_desc = tokens[0], tokens[1:]
# Skip images if they don't belong to the dataset
if image_id in dataset:
# Create list
if image_id not in descriptions:
descriptions[image_id] = list()
# Wrap description in tokens
desc = 'startseq ' + ' '.join(image_desc) + ' endseq'
# Store
descriptions[image_id].append(desc)
return descriptions
# Load Photo Features
def load_photo_features(filename, dataset):
# Load All
all_features = load(open(filename, 'rb'))
# Filter
features = {k: all_features[k] for k in dataset}
return features
# Description dictionary to List
def to_lines(descriptions):
all_desc = list()
for key in descriptions.keys():
[all_desc.append(d) for d in descriptions[key]]
return all_desc
# Fit KERAS tokenizer
def create_tokenizer(descriptions):
lines = to_lines(descriptions)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
# Max Length of Description with most words
def get_max_length(descriptions):
lines = to_lines(descriptions)
return max(len(d.split()) for d in lines)
# Create sequences of images, input sequences and output words for an image
def create_sequences(tokenizer, max_length, descriptions, photos, vocab_size):
X1, X2, y = list(), list(), list()
# Iterate through every image identifier
for key, desc_list in descriptions.items():
# Iterate through each description for the image
for desc in desc_list:
# Encode
seq = tokenizer.texts_to_sequences([desc])[0]
# Split one sequence into multiple X,y pairs
for i in range(1, len(seq)):
# Split into I/O pair
in_seq, out_seq = seq[:i], seq[i]
# Pad input sequence
in_seq = pad_sequences([in_seq], maxlen=max_length)[0]
# Encode
out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]
# Store
X1.append(photos[key][0])
X2.append(in_seq)
y.append(out_seq)
return array(X1), array(X2), array(y)
# Define Model
def define_model(vocab_size, max_length):
# Feature Extractor
inputs1 = Input(shape=(4096,))
fe1 = Dropout(0.5)(inputs1)
fe2 = Dense(256, activation='relu')(fe1)
# Sequence Model
inputs2 = Input(shape=(max_length,))
se1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2)
se2 = Dropout(0.5)(se1)
se3 = LSTM(256)(se2)
# Decoder model
decoder1 = add([fe2, se3])
decoder2 = Dense(256, activation='relu')(decoder1)
outputs = Dense(vocab_size, activation='softmax')(decoder2)
# Combine [image, seq] [word]
model = Model(inputs=[inputs1, inputs2], outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# summarize model
print(model.summary())
plot_model(model, to_file='model.png', show_shapes=True)
return model
# Integer -> Word Mapping
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
# Generate Image Description
def generate_desc(model, tokenizer, photo, max_length):
# seed the generation process
in_text = 'startseq'
# Iterate over whole sequence length
for i in range(max_length):
# integer encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0]
# pad input
sequence = pad_sequences([sequence], maxlen=max_length)
# predict next word
yhat = model.predict([photo, sequence], verbose=0)
# convert probability to integer
yhat = argmax(yhat)
# map integer to word
word = word_for_id(yhat, tokenizer)
# stop if we cannot map the word
if word is None:
break
# append as input for generating the next word
in_text += ' ' + word
# stop if we predict the end of the sequence
if word == 'endseq':
break
return in_text
# Evaluate model performance
def evaluate_model(model, descriptions, photos, tokenizer, max_length):
actual, predicted = list(), list()
# step over the whole set
for key, desc_list in descriptions.items():
# generate description
yhat = generate_desc(model, tokenizer, photos[key], max_length)
# store actual and predicted
references = [d.split() for d in desc_list]
actual.append(references)
predicted.append(yhat.split())
# calculate BLEU score
print('BLEU-1: %f' % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0)))
print('BLEU-2: %f' % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0)))
print('BLEU-3: %f' % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0)))
print('BLEU-4: %f' % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25)))
# Load Training Set
def load_training_set():
print('\nLoading Train Set\n')
# load training dataset (6K)
filename = 'data/Flickr8k_text/Flickr_8k.trainImages.txt'
train = load_set(filename)
print('Dataset:\t' + str(len(train)))
# Descriptions
train_descriptions = load_clean_descriptions('data/descriptions.txt', train)
print('Descriptions (Train):\t' + str(len(train_descriptions)))
# Photo features
train_features = load_photo_features('data/features.pkl', train)
print('Photos (Train):\t' + str(len(train_features)))
# Prepare tokenizer
tokenizer = create_tokenizer(train_descriptions)
vocab_size = len(tokenizer.word_index) + 1
print('Vocabulary Size:\t' + str(vocab_size))
# Get maximum sequence length
max_length = get_max_length(train_descriptions)
print('Description Length:\t' + str(max_length))
# Prepare sequences
X1train, X2train, ytrain = create_sequences(tokenizer, max_length, train_descriptions, train_features,
vocab_size=vocab_size)
return X1train, X2train, ytrain, vocab_size, max_length, tokenizer
# Load Test Set
def load_test_set(vocab_size, max_length, tokenizer):
print('\nLoading Test Set\n')
# Load Test set
filename = 'data/Flickr8k_text/Flickr_8k.devImages.txt'
test = load_set(filename)
print('Dataset:\t' + str(len(test)))
# Descriptions
test_descriptions = load_clean_descriptions('data/descriptions.txt', test)
print('Descriptions (Test):\t' + str(len(test_descriptions)))
# Photo features
test_features = load_photo_features('data/features.pkl', test)
print('Photos (Test):\t' + str(len(test_features)))
# Prepare sequences
X1test, X2test, ytest = create_sequences(tokenizer, max_length, test_descriptions, test_features,
vocab_size=vocab_size)
return X1test, X2test, ytest, test_descriptions, test_features
# Extract photo feature
def extract_features(filename):
model = VGG16()
model.layers.pop()
model = Model(inputs=model.inputs, outputs=model.layers[-1].output)
# Load photo
image = load_img(filename, target_size=(224, 224))
image = img_to_array(image)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image = preprocess_input(image)
feature = model.predict(image, verbose=0)
return feature
# Init Data Function
def init_data_load():
print('\nData Load Initialized\n')
X1train, X2train, ytrain, vocab_size, max_length, tokenizer = load_training_set()
X1test, X2test, ytest, test_descriptions, test_features = load_test_set(vocab_size, max_length, tokenizer)
print('\nData Load Ended\n')
return X1train, X2train, ytrain, vocab_size, max_length, tokenizer,\
X1test, X2test, ytest, test_descriptions, test_features
if __name__ == "__main__":
init_data_load()
|
[
"athite@uncc.edu"
] |
athite@uncc.edu
|
fb91b13645150b24c04ab1b24ee854b199663fe0
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_101/ch19_2020_03_18_01_37_43_330625.py
|
0877da9fb9fd60863c40c5ca83a1ee479aa9f09e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
def classifica_triangulo(x,y,z):
if x==y==z:
return "equilátero"
elif x!=y!=z:
return "escaleno"
else:
return "isósceles"
|
[
"you@example.com"
] |
you@example.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.