blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
39d3c65e4d93784ae062ff36808bf9d14da2a29c
|
3133bae8bc6cd35e81cba6a7695ee9a3831035e3
|
/scenario_util.py
|
f282589c221ae71718a00b400aba36ffb55e0d07
|
[
"Apache-2.0"
] |
permissive
|
chrisgarcia001/Jepson-2014-2015
|
79e93f18fd5de107e852119ebd24274ae77947c6
|
d257d5f66d82c31d884bf8be4513ff70420aa04b
|
refs/heads/master
| 2021-01-17T07:33:53.862615
| 2019-06-18T20:35:43
| 2019-06-18T20:35:43
| 42,882,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,580
|
py
|
# --------------------------------------------------------------------------------------
# Author: cgarcia@umw.edu
# About: This file contains utility functions and classes used specifically in
# running scenarios and generating result reports
# --------------------------------------------------------------------------------------
import util as ut
from scipy import stats
import math
from knn import *
#-------------------------- STATISTICAL FUNCTIONS ------------------------
# Performs 2-sample proportion test of form:
# H0: p1 = p2, H1: p1 != p2
# Sample 1 and sample 2 are lists of 0's and 1'sample
# Returns a p-value
def proportion_test(sample_1, sample_2):
n1 = float(len(sample_1))
n2 = float(len(sample_2))
p1 = float(sum(sample_1)) / n1
p2 = float(sum(sample_2)) / n2
z = (p1 - p2) / math.sqrt(((p1 * (1.0 - p1)) / n1) + ((p2 * (1.0 - p2)) / n2))
return stats.norm().cdf(1.0 - z)
# Get simple mean of the values.
def mean(vals):
return float(sum(vals)) / float(len(vals))
#-------------------------- UTILITY CLASSES ------------------------------
# This is a basic logger which prints output to the command line and
# writes the log file to the specified output file.
class BasicLogger(object):
def __init__(self):
self.lines = []
def log(self, line, level='standard'):
if level.lower() == 'report':
self.lines.append(str(line))
print(line)
def write(self, output_file):
ut.write_file("\n".join(self.lines), output_file)
# This is a simple class to record and accumulate artifacts
# generated in a scenario
class ScenarioRecorder(object):
def __init__(self):
self.records = {}
# Add a new value to the specified key's value list
def add(self, key, val):
if not(self.records.has_key(key)):
self.records[key] = []
self.records[key].append(val)
# Set a key's value
def set(self, key, val):
self.records[key] = val
# Get whatever is corresponding to the key
def get(self, key):
if self.records.has_key(key):
return self.records[key]
return 'NA'
# If the key holds a list of lists, join them all together into
# into one master list before returning.
def get_flatten(self, key):
try:
return reduce(lambda x, y: x + y, self.records[key])
except:
return get(key)
# Get the keys for this recorder. If a prefix is specified,
# Get keys which start with the prefix.
def keys(self, prefix = None):
if not(prefix == None):
return filter(lambda x: x.startswith(prefix), self.records.keys())
return self.records.keys()
#-------------------------- UTILITY FUNCTIONS ----------------------------
# A solver is a function f: user -> msg
# Each element in solvers is a (solver, solver name) pair
def execute_trial(train_data, test_users, data_gen, solvers, recorder,
trial_name = None, measures_per_user = 1,
logger = None):
results = []
if trial_name == None:
trial_name = ''
else:
trial_name = ': ' + trial_name
logger_f = logger.log if logger != None else lambda x, y: None
logger_f = logger.log
logger_f('Executing comparison trial' + str(trial_name), 'standard')
for (f, solver_name) in solvers:
logger_f(" Starting solver: " + solver_name, 'standard')
start_time = ut.curr_time()
msgs = map(f, test_users)
elapsed = ut.curr_time() - start_time
resps = []
for i in range(measures_per_user):
resps += data_gen.gen_responses(test_users, msgs)
correct_frac = float(sum(resps)) / float(measures_per_user * len(resps))
results.append((solver_name, correct_frac, elapsed, resps))
add = lambda att, val: recorder.add(solver_name + '.' + str(att), val)
add('correct_frac', correct_frac)
add('responses', resps)
recorder.add('elapsed_time', elapsed)
logger_f(" Results (correct%, elapsed time): " + str((correct_frac, elapsed)), 'standard')
# A trial_initializer_f is a function which takes a recorder and logger as input and returns a tuple:
# (train_data, test_users, data_generator, [(solver_f, name)])
# An analyzer_f is a procedure which takes these args (in order):
# 1) a recorder
# 2) a logger,
# 3) a list solver names with the following convention:
# Control solvers start with control_ and treatment solvers start with solver_
def run_trials(trial_initializer_f, analyzer_f, num_trials, recorder, logger):
recorder.set('num_trials', num_trials)
main_start_time = ut.curr_time()
for t in range(1, num_trials + 1):
trial_start = ut.curr_time()
logger.log('Starting new trial, initializing...', 'standard')
train_data, test_users, data_generator, solvers = trial_initializer_f(recorder, logger)
logger.log(' Time initializing: ' + str(ut.curr_time() - trial_start) + ' sec.', 'standard')
execute_trial(train_data, test_users, data_generator, solvers, recorder,
trial_name = 'Trial ' + str(t), logger = logger)
main_elapsed = ut.curr_time() - main_start_time
recorder.set('main.elapsed_time', main_elapsed)
analyzer_f(recorder, logger, map(lambda (x, y): y, solvers))
# For a list of test users and test messages, return the n best-performing.
# Used for a control case to compare other algorithms to.
# **NOTE: param msgs can be either 1) an integer, or 2) a list of pre-made messages
# If it is an integer, the specified number of random messages will be generated.
def n_best_messages(users, data_gen, msgs, n):
if type(msgs) == type(0):
msgs = data_gen.gen_random_inters(msgs)
rows = zip(*data_gen.gen_crossprod_rows(users, msgs))
mcount = lambda m: sum(map(lambda x: x[2], filter(lambda y: y[1] == m, rows)))
pos_count = lambda y: sum(map(lambda x: x[2], filter(lambda z: y == z[1], tups)))
results = map(lambda msg: (msg, mcount(msg)), msgs)
return map(lambda (msg, _): msg, ut.top_n(results, n, lambda y: y[1]))
# Build (solver, name) pairs for each of the 3 standard controls
# which can go into execute_trial.
# **NOTE: param msgs can be either 1) an integer, or 2) a list of pre-made messages
# If it is an integer, the specified number of random messages will be generated.
def build_std_control_solvers(calibration_users, data_gen, msgs = 100, top_n = 15):
b = data_gen
if(type(msgs)) == type(0):
msgs = n_best_messages(calibration_users, b, msgs, msgs)
best_msgs = n_best_messages(calibration_users, b, msgs, top_n)
# Control 1: select a random message each time
ctrl_1 = lambda u: rd.sample(msgs, 1)[0]
# Control 2: Always give the best performing out of the 100
ctrl_2 = lambda u: best_msgs[0]
# Control 3: randomly select one of the top 15 messages for each user
ctrl_3 = lambda u: rd.sample(best_msgs, 1)[0]
solvers = [(ctrl_1, 'control_1'),
(ctrl_2, 'control_2'),
(ctrl_3, 'control_3')]
return solvers
# Builds all KNN solvers in (solver, name) pairs, which can go
# which can go into execute_trial.
def build_all_knn_optims(train_data, calibration_users, data_gen, recorder,
min_k = 1, max_k = 15):
b = data_gen
op = KNNOptimizer()
op.set_data_rows(train_data)
op.set_similarity_f(match_count)
asf_1 = build_weighted_mode_selector(lambda x: 1)
asf_2 = build_weighted_mode_selector(lambda x: 10**x)
asf_3 = build_weighted_max_pos_proportion_selector(lambda x: 1)
asf_4 = build_weighted_max_pos_proportion_selector(lambda x: 10**x)
response_f = lambda u, m: b.gen_response(u, m)
k1 = op.find_best_k(calibration_users, min_k, max_k, asf_1, response_f)
k2 = op.find_best_k(calibration_users, min_k, max_k, asf_2, response_f)
k3 = op.find_best_k(calibration_users, min_k, max_k, asf_3, response_f)
k4 = op.find_best_k(calibration_users, min_k, max_k, asf_4, response_f)
recorder.add('solver_1.k', k1)
recorder.add('solver_2.k', k2)
recorder.add('solver_3.k', k3)
recorder.add('solver_4.k', k4)
print('k1, k2: ' + str((k1, k2)))
f_1 = lambda u: op.optimize(u, k1, asf_1)
f_2 = lambda u: op.optimize(u, k2, asf_2)
f_3 = lambda u: op.optimize(u, k3, asf_3)
f_4 = lambda u: op.optimize(u, k4, asf_4)
solvers = [(f_1, 'solver_1'),
(f_2, 'solver_2'),
(f_3, 'solver_3'),
(f_4, 'solver_4')
]
return solvers
# Builds standard (mode-based) KNN solvers in (solver, name) pairs, which can go
# which can go into execute_trial.
def build_std_knn_optims(train_data, calibration_users, data_gen, recorder,
min_k = 1, max_k = 15):
b = data_gen
op = KNNOptimizer()
op.set_data_rows(train_data)
op.set_similarity_f(match_count)
asf_1 = build_weighted_mode_selector(lambda x: 1)
asf_2 = build_weighted_mode_selector(lambda x: 10**x)
response_f = lambda u, m: b.gen_response(u, m)
k1 = op.find_best_k(calibration_users, min_k, max_k, asf_1, response_f)
k2 = op.find_best_k(calibration_users, min_k, max_k, asf_2, response_f)
recorder.add('solver_1.k', k1)
recorder.add('solver_2.k', k2)
print('k1, k2: ' + str((k1, k2)))
f_1 = lambda u: op.optimize(u, k1, asf_1)
f_2 = lambda u: op.optimize(u, k2, asf_2)
solvers = [(f_1, 'solver_1'),
(f_2, 'solver_2')
]
return solvers
def standard_analyzer_f(recdr, logr, solver_names):
log = lambda *x: logr.log(' '.join(map(lambda y: str(y), x)), 'report')
key = lambda x, y = None: str(x) + '.' + (y) if y != None else str(x)
get = lambda prefix, att = None: recdr.get(key(prefix, att))
fget = lambda prefix, att = None: recdr.get_flatten(key(prefix, att))
pt = lambda s1, s2: proportion_test(fget(s1), fget(s2))
ctrls = filter(lambda x: x.startswith('control'), solver_names)
tmts = filter(lambda x: x.startswith('solver'), solver_names)
all = ctrls + tmts
log('-------------------- RESULTS ------------------------')
log('Number of trials: ', get('num_trials'))
for s in tmts:
log(s + ' avg. k: ', mean(get(s, 'k')))
for s in ctrls:
log(s + ' avg. success %: ', mean(get(s, 'correct_frac')),
', (min, max) success %: ', (min(get(s, 'correct_frac')), max(get(s, 'correct_frac'))))
for s in tmts:
log(s + ' avg. success %: ', mean(get(s, 'correct_frac')),
', (min, max) success %: ', (min(get(s, 'correct_frac')), max(get(s, 'correct_frac'))))
for c in ctrls:
for s in tmts:
log(s + ' vs. ' + c + ' (p-val): ', pt(s + '.responses', c + '.responses'))
for i in range(len(tmts) - 1):
for j in range(1, len(tmts)):
log(tmts[i] + ' vs. ' + tmts[j] + ' (p-val): ',
pt(tmts[i] + '.responses', tmts[j] + '.responses'))
log(tmts[j] + ' vs. ' + tmts[i] + ' (p-val): ',
pt(tmts[j] + '.responses', tmts[i] + '.responses'))
for s in tmts:
for c in ctrls:
log('Avg ' + s + '/ ' + c + ' ratio: ', max(get(s, 'correct_frac')) / max(get(c, 'correct_frac')))
log('-------------------- TOTAL ELAPSED TIME: ', get('main', 'elapsed_time'), ' sec.')
|
[
"chrisgarcia001@gmail.com"
] |
chrisgarcia001@gmail.com
|
15d7ea48d653c4315a963b1fbdfb91c15ea276e2
|
b41e47e8cfad55bb7af75bc603b96968058e0163
|
/logloader.py
|
55eb63f504ef1613d9737db5555f1e154e735fa6
|
[
"Apache-2.0"
] |
permissive
|
xaviercallens/xLogReducer
|
bb4d2c59b648570741bd98a5107c83e3e748e5ba
|
40005419022c02454eca7b027d76b99b3dfad543
|
refs/heads/main
| 2023-08-30T15:09:46.591898
| 2021-11-11T02:22:57
| 2021-11-11T02:22:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,656
|
py
|
import sys
import pandas as pd
import re
import multiprocessing as mp
from itertools import groupby, count, chain
import numpy as np
import json
import os
import io
import time
class LogLoader(object):
def __init__(self, headLength, isMulti, headRegex, maxLength):
self.headLength = headLength
self.isMulti = isMulti
if (headRegex):
self.headRegex = re.compile(headRegex)
self.maxLength = maxLength
self.splitregex = re.compile(r'(\s+|\|)')
def formalize_message(self, lines):
def get_content(line):
count = 0
in_head = False
for idx, i in enumerate(line):
#print(i)
if not self.splitregex.search(i):
if not (in_head):
count += 1
in_head = True
else:
in_head = False
if (self.headLength + 1 == count):
return line[idx:].strip()
return line.strip()
#print("{}: count -> {}".format(line, count))
def get_head(line_seg, headers, delimer):
head_count = 0
for idx, se in enumerate(line_seg):
if (head_count >= self.headLength):
break
if (idx % 2 == 0):
headers[head_count].append(se)
else:
delimer[head_count].append(se)
head_count += 1
def get_segment(line):
temp_seg = []
spliter = ""
for i in self.splitregex.split(line):
if i == "":
continue
if (self.splitregex.search(i)):
spliter += i
else:
temp_seg.append(spliter)
spliter = ""
return temp_seg
log_messages = []
count = 0
fail_count = 0
headers = dict()
header_delimer = dict()
for i in range(0, self.headLength):
headers[i] = []
header_delimer[i] = []
if (self.isMulti):
start = True
now_res = ""
for line in lines:
if not line.strip():
fail_count += 1
continue
line_seg = self.splitregex.split(line.strip())
match = self.headRegex.search(line_seg[0])
content_line = get_content(line)
if match: #New start
get_head(line_seg, headers, header_delimer)
if(start):
start = False
now_res = content_line
else:
if (len(now_res) > self.maxLength):
fail_count += 1
continue
log_messages.append(now_res)
now_res = content_line
count += 1
else: #Continue
if(start):
fail_count += 1
continue
else:
now_res += "\n" + line.strip()
else:
for line in lines:
line_seg = self.splitregex.split(line.strip())
if not line.strip():
fail_count += 1
continue
get_head(line_seg, headers, header_delimer)
content_line = get_content(line)
if (len(content_line) > self.maxLength):
fail_count += 1
continue
log_messages.append(content_line)
count += 1
return log_messages, fail_count, headers, header_delimer
def load_to_dataframe(self, log_filepath):
""" Function to transform log file to dataframe
"""
print('Loading log messages to dataframe...')
t1 = time.time()
lines = []
with open(log_filepath, 'r', encoding="utf-8", errors="ignore") as fid:
lines = fid.readlines()
print("Total lines {}".format(len(lines)))
log_messages = []
log_messages, failed_size, headers, head_delimer = self.formalize_message(lines)
log_dataframe = pd.DataFrame(log_messages, columns=['Content'])
print("Success load logs#: {}, Failed load lines#: {}".format(len(log_messages), failed_size))
t2 = time.time()
print('Time taken {:.2f}s'.format(t2-t1))
return log_dataframe, headers, head_delimer
|
[
"theweijy@gmail.com"
] |
theweijy@gmail.com
|
bb9d408626f63725098b97755d18a10ce482a59a
|
162ef444a06fa20293f177411b9c04359eda7c87
|
/azure_sensor.py
|
1169564cb761570d6283b6465332857a8af224cb
|
[] |
no_license
|
tonggehuang/Wavetronix_raw_info_pull
|
ffccfda7555f9c2008dfa3aa0a27f0ac620ef628
|
2dbe77b2e11d9c5b7c52e080db0e728c5ae03f9b
|
refs/heads/master
| 2021-04-30T05:52:52.166997
| 2018-05-31T16:14:52
| 2018-05-31T16:14:52
| 121,428,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
import os
import sys
import time
import urllib.request
from urllib.request import urlopen
import xml.etree.ElementTree as ET
import pandas as pd
import numpy as np
from numpy import nan
import csv
url = "http://reactorfeeds.org/feeds/stations"
request = urllib.request.Request(url, headers={"Accept" : "text/xml"})
contents = urllib.request.urlopen(request).read()
root = ET.fromstring(contents)
try:
os.remove("streamlist.csv")
except OSError:
pass
stream=[]
z = root.getchildren()
length = len(z)
for i in range (0,length):
x = root.getchildren()[i]
try:
Station = x[0].text
Detectorid = x[1].text
detectorName = x[2].text
Direction = x[6].text
except IndexError:
pass
else:
info = (Station+','+Detectorid+','+detectorName+','+Direction)
stream.append(info)
def streamlist(df):
columns = ['stationName','detectorId', 'detectorName','direction']
df_stream = pd.DataFrame(stream)
df = pd.DataFrame(df_stream[0].str.split(',').tolist(),columns = columns)
return df
df = streamlist(stream)
df.to_csv('streamlist.csv', index = None)
|
[
"tongge@iastate.edu"
] |
tongge@iastate.edu
|
1bc81292000ed7c0f0c42230c96b7d67c221d340
|
d66daf048f77c6c1e1afda219d7ae7535d1e304d
|
/regex.py
|
346d9e456c92b276b8b07f0acd1f6893e2b93641
|
[] |
no_license
|
a373690629/aid2008new
|
3693ebf8add5194981a6a9a2589538f8398e5b41
|
362cc064def239a22e4b1f67f8f92d8dc554dc77
|
refs/heads/main
| 2023-01-23T15:57:20.369765
| 2020-11-27T12:15:14
| 2020-11-27T12:15:14
| 316,488,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
"""
regex.py re模块 功能函数演示1
"""
import re
# 目标字符串
s = "Alex:1994,Sunny:1996"
pattern = r"(\w+):(\d+)" # 正则表达式
# re 模块调用findall
l = re.findall(pattern,s)
print(l)
# compile 对象调用findall
regex = re.compile(pattern)
l = regex.findall(s,0,12)
print(l)
# 按照正则表达式匹配内容切割字符串
# l = re.split(r'[:,]',s)
l = re.split(r'[^\w]',s)
print(l)
# 替换目标字符串
s = re.subn(r':','-',s)
print(s)
|
[
"pengpeng@tedu.cn"
] |
pengpeng@tedu.cn
|
1b64fccb8e1ce49bc1d6132d58eb7f7fe9f2f8f4
|
a85350f29dc8606a2618aa4d25da0a5399b7b860
|
/app/app.py
|
8251bf117b5a48430660e1a11e9327bf3e6e73ff
|
[
"MIT"
] |
permissive
|
cvkumar/flask-ml-app
|
0c57c8824712e912e72583891e97352d93e26203
|
8b00802b436b3429498fbbfdec9e4ed68d18f721
|
refs/heads/master
| 2020-03-27T00:11:38.799060
| 2018-08-23T19:55:11
| 2018-08-23T19:55:11
| 145,601,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,020
|
py
|
import os
from flask import Flask
from flask import request
from flask import jsonify
from sklearn import svm
from sklearn import datasets
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from uuid import uuid4
from datetime import datetime
import pony.orm as pny
import database
import config
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/train')
@pny.db_session
def train_model():
iris_dataset = datasets.load_iris()
X, y = iris_dataset.data, iris_dataset.target
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.33, stratify=y)
# print(iris_dataset['DESCR'])
# Fit Model
svm_model = svm.SVC(
C=1.0,
probability=True,
random_state=1)
svm_model.fit(x_train, y_train)
joblib.dump(svm_model, 'model.pkl')
pny.delete(prediction for prediction in database.Prediction)
model_accuracy = svm_model.score(x_test, y_test)
return jsonify({'success': True, 'model_accuracy': model_accuracy})
@app.route('/predict', methods=['POST'])
@pny.db_session
def predict():
# print(request.json)
svm_model = joblib.load('model.pkl')
sepal_length = float(request.json['sepal_length'])
sepal_width = float(request.json['sepal_width'])
petal_length = float(request.json['petal_length'])
petal_width = float(request.json['petal_width'])
flower = [[sepal_length, sepal_width, petal_length, petal_width]]
prediction = __make_prediction(flower, svm_model)
database.Prediction(id=unicode(str(uuid4())), date=datetime.now(), sepal_length=sepal_length,
sepal_width=sepal_width, petal_length=petal_length, petal_width=petal_width,
prediction=prediction)
response = jsonify({"prediction": prediction})
return response
@app.route('/predictions', methods=['GET'])
@pny.db_session
def get_predictions():
predictions = database.Prediction.select().order_by(pny.desc(database.Prediction.date))[:]
response = jsonify({'predictions': [prediction.to_dict() for prediction in predictions]})
return response
@app.route('/predictions', methods=['DELETE'])
@pny.db_session
def clear_predictions():
pny.delete(prediction for prediction in database.Prediction)
return jsonify({'success': True})
def __make_prediction(flower, svm_model):
predictions = svm_model.predict_proba(flower)[0]
result = {'Setosa': predictions[0], 'Versicolour': predictions[1], 'Virginica': predictions[2]}
if result['Setosa'] >= result['Versicolour'] and result['Setosa'] > result['Versicolour']:
return 'Setosa'
elif result['Versicolour'] >= result['Setosa'] and result['Versicolour'] > result['Virginica']:
return 'Versicolour'
else:
return 'Virginica'
if __name__ == "__main__":
if os.environ["LOCAL"] == "True":
config.set_private_environment_variables()
database.connect_database()
app.run(debug=True, host='0.0.0.0')
|
[
"kumarcal@grinnell.edu"
] |
kumarcal@grinnell.edu
|
2ce28c9741f1e29848d62927caff39bfc99158dc
|
71a8a118b6389376805d290f2c3826f4802e40ef
|
/Macys/macys_main.py
|
4d2fab5623bab4679ed2cc997b52b3c9a49b4eb6
|
[] |
no_license
|
nayanatharap/Forever21-Macy-s
|
515489b872a21e3c0a7b7846bd253e012914f78e
|
46b5dc426c445c7ce61cf83fcb052db922926db1
|
refs/heads/master
| 2020-03-19T14:44:13.239176
| 2018-07-06T02:26:31
| 2018-07-06T02:26:31
| 136,637,228
| 0
| 0
| null | 2018-06-16T19:47:07
| 2018-06-08T15:36:15
|
Python
|
UTF-8
|
Python
| false
| false
| 5,241
|
py
|
import bs4 as bs
from urllib.request import Request, urlopen
import macys_parse, json
import pandas as pd
import tqdm, time, os
import logging
import requests
exp_name = 'Macys_' + time.time().__str__()
logging.basicConfig(filename=exp_name + ".log", level=logging.INFO)
output_path = "./" + exp_name + "_data"
os.makedirs(output_path, exist_ok=False)
url = "https://www.macys.com/"
headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
data = requests.get(url, headers=headers)
sauce=data.text
soup = bs.BeautifulSoup(sauce, "lxml")
woman_product_type_list=[]
woman_product_type_list2=[]
for item in soup.find_all("li"):
for productPage in item.find_all("a"):
if "women" in productPage.get("href"):
woman_product_type_list.append(productPage.get("href"))
for elem in woman_product_type_list:
if elem[0] is "/":
woman_product_type_list2.append("http://www.macys.com"+elem)
else:
woman_product_type_list2.append(elem)
# Hard coded filtering for woman products
# view_all_list = set(t[0] for t in filter(lambda x: x[1].lower() == "view all", woman_product_type_list2))
# woman_product_type_set = set([k[0] for k in woman_product_type_list2]) - view_all_list
woman_product_type_set=set(woman_product_type_list2)
woman_product_type_set_copy=set()
for elem in woman_product_type_set:
if elem.find("women")!=-1:
if elem.find("shoes")==-1:
if elem.find("accessories")==-1:
if elem.find("spray")==-1:
if elem.find("perfume")==-1:
if elem.find("watch")==-1:
if elem.find("ring")==-1:
if elem.find("earrings")==-1:
if elem.find("jewelery")==-1:
woman_product_type_set_copy.add(elem)
else:
continue
else:
continue
else:
continue
else:
continue
else:
continue
else:
continue
else:
continue
else:
continue
else:
continue
print(woman_product_type_set)
# Variable initializations
total_items_need, items_downloaded, num_multiple_category_items = 100, 0, 0
massive_json = {}
color, price, title, description, attributes, categories, composition, url_list, brand = [], [], [], [], [], [], [], [], []
for product_type_link in tqdm.tqdm(woman_product_type_set_copy):
soup_product_type_page = bs.BeautifulSoup(requests.get(product_type_link, headers=headers).text, "lxml")
for item in soup_product_type_page.find_all("a", {'class':"productDescLink"}):
product_link = item.get("href")
product_link="https://macys.com"+product_link
print (product_link)
try:
final_object, id, first_download_of_item = macys_parse.main(product_link, output_path, massive_json)
if not first_download_of_item:
num_multiple_category_items += 1
print("Item_clash_for", item)
continue
except Exception as e:
print("Parse failed for ", item, e)
continue
massive_json[id] = final_object
color.append(final_object["annotation"]["color"])
price.append(final_object["annotation"]["price"])
title.append(final_object["annotation"]["title"])
brand.append(final_object["annotation"]["brand"])
description.append(final_object["annotation"]["description"])
attributes.append(final_object["annotation"]["attributes"])
composition.append(final_object["annotation"]["composition"])
categories.append(final_object["annotation"]["categories"])
url_list.append(final_object['info']['product_url'])
items_downloaded += 1
print("items_downloaded", items_downloaded)
if items_downloaded > total_items_need:
break
json.dump(massive_json, open(exp_name + "_details.json", "w"))
data_frame = pd.DataFrame()
data_frame["color"] = color
data_frame["price"] = price
data_frame['title'] = title
data_frame['brand'] = brand
data_frame['description'] = description
data_frame["attributes"] = attributes
data_frame["composition"] = composition
data_frame["categories"] = categories
data_frame["url_list"] = url_list
data_frame.to_excel(exp_name + "_statistics.xlsx")
logging.info(product_type_link + " finished and total number till now - " + str(items_downloaded))
logging.info("Total items_in_multiple_categories till now - " + str(num_multiple_category_items))
if items_downloaded > total_items_need:
break
|
[
"noreply@github.com"
] |
nayanatharap.noreply@github.com
|
4f764adf3e826852a0a68cb380add6a7e7217da9
|
1b5184494d625dade6195eae0c46160077029108
|
/lab2/suffix_tree_fast.py
|
68a1abb696a7bd93a950427b601706bd1519d601
|
[] |
no_license
|
MatiXOfficial/Text-Algorithms
|
c5e40dd30b07bdc087527da037b883a3b048dabb
|
ed12a797f2ace962c1a67f8e04fcece36cbf648d
|
refs/heads/master
| 2021-02-09T04:36:18.402412
| 2020-06-13T20:25:51
| 2020-06-13T20:25:51
| 244,047,935
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,249
|
py
|
class Node:
text = ''
def __init__(self, start=None, end=None, parent=None, depth=0, children=None):
self.start = start
self.end = end
self.link = None
if children is None:
self.children = []
else:
self.children = children
self.parent = parent
self.depth = depth
def __str__(self, indent=0):
res = indent * ' ' + str(indent) + '-' + str(self.start) + ':' + str(self.end)
for child in self.children:
res += '\n' + child.__str__(indent + 1)
return res
def __repr__(self):
return "<class Node>\n" + self.__str__()
def __len__(self):
return self.end - self.start
def get_label(self, i=0):
return self.text[self.start + i:self.end]
def add_child(self, start, end):
new_node = Node(start, end, self, self.depth + self.end - self.start)
self.children.append(new_node)
return new_node
def find_child_by_first(self, val):
for child in self.children:
if self.text[child.start] == val:
return child
return None
def break_path(self, length):
new_node = Node(start=self.start + length, end=self.end, parent=self,
depth=self.depth + length, children=self.children)
self.end = new_node.start
self.children = [new_node]
def slow_find(self, suffix):
child = self.find_child_by_first(suffix[0])
if child is None:
return self
for i in range(child.start + 1, child.end):
if self.text[i] != suffix[i - child.start]:
child.break_path(i - child.start)
return child
return child.slow_find(suffix[len(child):])
def fast_find(self, suffix):
child = self.find_child_by_first(suffix[0])
if child is None:
return self
if len(suffix) > len(child):
return child.fast_find(suffix[len(child):])
if len(suffix) == len(child):
return child
child.break_path(len(suffix))
return child
class SuffixTree:
def __init__(self, text=None, root=None):
self.root = root
if text is not None:
self.mccreight(text)
def __str__(self, indent=0):
if self.root is None:
return 'Empty tree'
else:
return self.root.__str__()
def __repr__(self):
return "<class SuffixTree>\n" + self.__str__()
def mccreight(self, text):
self.text = text
Node.text = text
n = len(text)
self.root = Node(0, 0)
self.root.add_child(0, n)
last_head = self.root
leaf = self.root.children[0]
for i in range(1, n):
suffix = text[i:]
if last_head == self.root:
head = self.root.slow_find(suffix)
leaf = head.add_child(i + head.depth + len(head), n)
last_head = head
else:
parent = last_head.parent
if parent == self.root:
if len(last_head) == 1:
node = self.root
else:
node = self.root.fast_find(last_head.get_label(1))
else:
node = parent.link.fast_find(last_head.get_label())
if len(node.children) == 1:
head = node
else:
head = node.slow_find(leaf.get_label())
leaf = head.add_child(i + head.depth + len(head), n)
last_head.link = node
last_head = head
def factor_in(self, word):
node = self.root.find_child_by_first(word[0])
while node is not None:
for i in range(node.start + 1, node.end):
if node.depth + i - node.start == len(word):
return True
if self.text[i] != word[node.depth + i - node.start]:
return False
if node.depth + node.end - node.start == len(word):
return True
node = node.find_child_by_first(word[node.depth + node.end - node.start])
return False
|
[
"mateuszkocot99@gmail.com"
] |
mateuszkocot99@gmail.com
|
7ce054927b3fc0f78d68e85c4844a342a9585343
|
c49e8d7a1c0c245fd0edc604f5d7f7b1ce6f2ca6
|
/Ciclo1/Unidad 1/Clases/Unidad 2/for.py
|
9c823093a90b63f62094d6f0c345c7f774f25da0
|
[] |
no_license
|
mmedinar/FundamentosProgramacion
|
71e7706e0218cba461f9d1f9306b3ff9a1f9d11e
|
c8539678b943b55f00b7877e9e1cd04eaa0cd271
|
refs/heads/master
| 2023-05-31T05:25:47.845896
| 2021-07-02T00:03:40
| 2021-07-02T00:03:40
| 382,183,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,044
|
py
|
# for " variable" in "elemento iterable"
# pass
# for numeros in range(0,11):
# print(numeros, end= f'\n')
# x = ["manzanas", "peras", "bananas", "melones", "fresas"]
# for i in x:
# print(i)
# for pares in range(2,101,2):
# print(pares, end=' ')
# print()
# for i in range(65,91):
# print(f"{i}{i:c} ", end=' ')
# #i:c ascci
# #i:o hexadecimial
# #i:b binario
# print()
# print(ord('Z'))
# print(chr(90))
#***********************************************
# abcdario = []
# for letra in range(ord('a'), ord('z')+1):
# #print(letra, sep=' ')
# abcdario.append(chr(letra))
# print(abcdario)
# vocales = []
# consonantes = []
# for letra in abcdario:
# if letra in 'aeiou':
# vocales.append(letra)
# else:
# consonantes.append(letra)
# print(vocales)
# print(consonantes)
#***************************************************************************************
# nombres = ['alex','edwin','heynar','galaxy', 'juan']
# profesiones = ['soporte', 'administrador','tecnologo','electricista','ingeniero','ingeniero']
# print(nombres)
# print(profesiones)
# trabajos = {} #{profesion: persona}
# for i in range(len(nombres)):
# profesion = profesiones[i]
# persona = nombres[i]
# if profesion not in trabajos:
# trabajos[profesion] = [persona]
# else:
# trabajos[profesion].append(persona)
# print(trabajos)
# input()
# test fizz buzz *********************************************************************
"""
Escribir un programa que muestre en pantalla los números del 1 al 100, sustituyendo
los múltiplos de 3 por la palabra “fizz”,
los múltiplos de 5 por “buzz”
y los múltiplos de ambos, es decir, los múltiplos de 3 y 5 (o de 15), por la palabra “fizzbuzz”.
"""
numeros = []
for i in range(1,101):
if i % 15 == 0:
valor = "fizzbuzz"
elif i % 5 == 0:
valor = "buzz"
elif i % 3 == 0:
valor = "fizz"
else:
valor = i
print(valor, end=' ')
print()
|
[
"mmedinar@gmail.com"
] |
mmedinar@gmail.com
|
b3078452785160519520e6a33bf24e1caf6d1245
|
b4cb46c13705dad47531a898adacd95da3c36df6
|
/polls/models.py
|
cf9f6631aa8e84711cc0cc6650914887ddfc93b3
|
[
"MIT"
] |
permissive
|
zhaogp/oxygen
|
3912a8d8a6dc1494cfea60e117e8feff860c10ab
|
2e11cb7c7ea2d533afbed51f9bb2de7ba19cfd31
|
refs/heads/master
| 2021-01-03T13:21:43.632290
| 2018-06-03T15:36:13
| 2018-06-03T15:36:13
| 100,777,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
from django.db import models
import django.utils.timezone
import datetime
class Question(models.Model):
question_text = models.CharField(max_length=120)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def is_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
|
[
"zhaoguoping@jd.com"
] |
zhaoguoping@jd.com
|
9e520d57b80b500c66ebe24c19c9c823c4b24120
|
e93fac3fdf589e7ba0ffa0a43b0b883ee4aa33b5
|
/code/test2.py
|
39048053d641464ba4c9f1ebe2bdf958f20a75d6
|
[] |
no_license
|
Alexanderkorn/A3-project
|
6d1c2d66b5ab34f65781029d9e75c2eadd1927ff
|
e3a0c7c47a88cec3e3f21590ca46483199af05a5
|
refs/heads/master
| 2021-01-13T00:49:21.565076
| 2015-10-30T15:07:32
| 2015-10-30T15:07:32
| 44,971,434
| 1
| 1
| null | 2015-10-28T11:36:55
| 2015-10-26T13:52:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,789
|
py
|
__author__ = 'alexander'
#import mmap
#f = open('passwords.txt')
username=input("gebruikers naam:")
password=input("Wachtwoord:")
#s = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
if username and password in open('passwords.txt').read():
print("true")
else:
print("false")
""""
while password == 'lol':
if s.find(str(username)) != -1:
print("True")
if s.find(str(password)) != 1:
print("True")
import hashlib ,os
resource_file = "passwords.txt"
def encode(username,password):
return username,hashlib.sha1(password).hexdigest()
def add_user(username,password):
if os.path.exists(resource_file):
with open(resource_file) as f:
if username in f.read():
raise Exception("user already exists")
with open(resource_file,"a") as f:
print(f, encode(username,password))
return username
def check_login(username,password):
with open(resource_file) as f:
if encode(username,password) in f.read():
return username
def create_username():
try:
username = add_user(input("enter username:"),input("enter password:"))
print("Added User! %s" + username)
except Exception as e:
print("Failed to add user "+username,"! ... user already exists??" + username)
def login():
if check_login(input("enter username:"),input("enter password:")):
print("Login Success!!")
else:
print("there was a problem logging in")
while True:
try:
{'c':create_username,'l':login}.get(input("(c)reate user\n(l)login\n------------\n>").lower(),login)()
except:
break
print("Login Script")
import getpass
import csv
userbase="Usernames.csv"
CorrectUsername = "Test"
CorrectPassword = "TestPW"
loop = 'true'
while (loop == 'true'):
username = input("Please enter your username: ")
credentials = {}
# with open('Usernames.csv', 'r') as f:
# for line in f:
# user, pwd = line.strip().split(';')
# credentials[user] = pwd
if (username == CorrectUsername):
loop1 = 'true'
while (loop1 == 'true'):
password = getpass.getpass("Please enter your password: ")
code = int(input("uw code alstublieft"))
f=open(userbase,'r')
reader=csv.reader(f, delimiter=';')
for i in reader:
if int(i[1]) == int(code):
print(i[0])
print("Succes")
f.close()
if (password == CorrectPassword):
print("Logged in successfully as " + username)
loop = 'false'
loop1 = 'false'
else:
print("Password incorrect!")
else:
print("Username incorrect!")
"""""
|
[
"alexanderkorn7@gmail.com"
] |
alexanderkorn7@gmail.com
|
e90e6e54ec62d2abead5decf5e250852cfd2bbac
|
0496f51d9d67eaa54945c47f5078c313e6ee5506
|
/Unit Automation/YAML/test.py
|
dc747e2da1f05058ff8ae8d19b5265048b0305c9
|
[] |
no_license
|
jhearn85/automation_wip
|
2d98ca5fb2de49fd29004d150710ba7c47a438a3
|
47c7e66427c74f00171045973af14fbbee37277c
|
refs/heads/master
| 2023-04-15T03:37:42.187091
| 2021-04-07T04:57:02
| 2021-04-07T04:57:02
| 331,192,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
import sys
import yaml
from jinja2 import Environment, FileSystemLoader
from netmiko import ConnectHandler
from datetime import datetime
import os.path
def user_input():
description = input("Give a Description: ")
IP_Address = input("Give your IP: ")
user_input.desc = description
user_input.ip = IP_Address
user_input()
Description = user_input.desc
IP_Address = user_input.ip
with open('data.yaml', 'w') as outfile:
outfile.write(yaml.dump(
{"Interfaces": {
'Description' : Description,
'ip' : IP_Address}
},
default_flow_style=False))
|
[
"hearnjameson@gmail.com"
] |
hearnjameson@gmail.com
|
77f57daeac7db77db887d0574452a3a1e5356270
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adverbs/_kindlier.py
|
8f1161ba660b3d1a9e7bc4e58b5918fb23e3990e
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
from xai.brain.wordbase.adverbs._kindly import _KINDLY
#calss header
class _KINDLIER(_KINDLY, ):
def __init__(self,):
_KINDLY.__init__(self)
self.name = "KINDLIER"
self.specie = 'adverbs'
self.basic = "kindly"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
e85845f60ab0a44155867e1e7a3ef9de3a93c75f
|
2161b5699f4d6ea3d5c420b69aa01259ebb5cb82
|
/my_app.py
|
29e5a66e026b35b718cfc1ab45418090741785a8
|
[
"MIT"
] |
permissive
|
evature/webhooks
|
e6683765b3b8a9b8e794c0bdde07ae0fb56f9726
|
28e473968ba1b3eb61b5e60f1ddf8e2c4bb31e6b
|
refs/heads/master
| 2021-01-20T20:48:26.870143
| 2016-09-29T11:22:42
| 2016-09-29T11:22:42
| 64,929,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,815
|
py
|
# encoding: utf-8
'''
Created on Jul 12, 2016
@author: Tal
Demo implementation of applicative webhooks for the Evature BotKit = http://www.evature.com/docs/botkit.html
It is meant to be as simple as possible.
To achieve max simplicity, it is based on Zappa + Flask, deployed to AWS Lambda.
This is Zappa - https://github.com/Miserlou/Zappa
Assuming you have an AWS account you can have these webhooks running, "serverless", in 5 minutes.
'''
from __future__ import unicode_literals, division
import string
import random
import json
from random import sample
from flask import Flask, request, redirect, render_template, jsonify, make_response
import requests
APP = Flask(__name__)
BOTKIT_API_LATEST_VERSION = "0.4.0"
class DataMessageSubType(object):
"""Sub Types of DataMessage JSON data"""
airline_itinerary = "airline_itinerary"
airline_checkin = "airline_checkin"
airline_boardingpass = "airline_boardingpass"
airline_update = "airline_update"
class BotWebhookTypes(object):
"""The applicative webhooks"""
search_flight = 'search_flight'
search_car = 'search_car'
search_hotel = 'search_hotel' #
search_cruise = 'search_cruise'
chat_greeting = 'chat_greeting'
flight_gate_number = 'flight_gate_number'
flight_departure_time = 'flight_departure_time'
flight_arrival_time = 'flight_arrival_time'
flight_boarding_time = 'flight_boarding_time'
flight_boarding_pass = 'flight_boarding_pass'
flight_itinerary = 'flight_itinerary'
reservation_show = 'reservation_show'
reservation_cancel = 'reservation_cancel'
message_logger = 'message_logger' # is activated for every send message used for logging
flight_status = 'flight_status'
identify_user = 'identify_user' # activated when the login form is complete - given the form answers and returns the loginData
identify_user_questions = 'identify_user_questions' # returns custom questions for login - result will be passed to identify_user webhook
contact_support = 'contact_support'
airport_navigation = 'airport_navigation'
change_booking = 'change_booking'
logout = 'logout'
arrivals = 'arrivals'
departures = 'departures'
show_help = "show_help"
show_reservation = 'show_reservation'
ask_time = 'ask_time'
ask_weather = 'ask_weather'
FLIGHT_STATUS_MESSAGE_EXAMPLE = dict(
_type='DataMessage',
subType='airline_update',
asAttachment=False,
introMessage='Here is an example of a Flight Status',
jsonData=dict(
flight_number='UAL123',
number=123,
airline_name='United',
departure_airport={
"airport_code": 'LHR',
"city":'London Heathrow',
"gate":'232',
"terminal":''
},
arrival_airport={
"airport_code": 'IAD',
"city": 'Washington Dulles Intl',
"gate": 'C2',
"terminal": 'B'
},
flight_schedule={
"departure_time_actual": "2016-08-09T08:16:00",
"arrival_time": "2016-08-09T10:51:00",
"departure_time": "2016-08-09T07:30:00",
"boarding_time": "",
}
),
)
BOARDING_PASS_MESSAGE_EXAMPLE = dict(
_type='DataMessage',
subType='airline_boardingpass',
asAttachment=True,
introMessage='Here is an example of a Boarding Pass',
jsonData={'auxiliary_fields': [{'label': 'Terminal', 'value': 'T1'},
{'label': 'Departure', 'value': '30OCT 19:05'}],
'flight_info': {'arrival_airport': {'airport_code': 'AMS', 'city': 'Amsterdam'},
'departure_airport': {'airport_code': 'JFK', 'city': 'New York', 'gate': 'D57', 'terminal': 'T1'},
'flight_number': 'KL0642',
'flight_schedule': {'arrival_time': '2016-01-05T17:30', 'departure_time': '2016-01-02T19:05'}},
'header_image_url': 'https://d1hz6cg1a1lrv6.cloudfront.net/media/images/evature/logo4-19b0ca62fbf2b08e3bbc9d25298523ea4600422e.jpg',
'logo_image_url': 'https://d2hbukybm05hyt.cloudfront.net/images/airline_logos/logo_JB.png',
'passenger_name': 'TAL WEISS',
'pnr_number': 'CG4X7U',
'qr_code': 'M1WEISS\\/TAL CG4X7U nawouehgawgnapwi3jfa0wfh',
'seat': '75A',
'secondary_fields': [{'label': 'Boarding', 'value': '18:30'},
{'label': 'Gate', 'value': 'D57'},
{'label': 'Seat', 'value': '75A'},
{'label': 'Sec.Nr.', 'value': '003'}],
'travel_class': 'business'},
)
@APP.route('/simple', methods=['POST'])
def simple():
"""Simple view function"""
response = dict(messages=[
dict(_type="TextMessage", text="Here is a text message"),
dict(_type="TextMessage", text="and a picture of a fish"),
dict(_type="ImageMessage",
imageUrl="http://pngimg.com/upload/fish_PNG10538.png")
],
botkitVersion=BOTKIT_API_LATEST_VERSION)
return jsonify(response)
@APP.route('/human', methods=['POST'])
def human():
"""Transfer to Human function"""
response = dict(messages=[
dict(_type="TextMessage", text="I will try to transfer you to an agent!"),
dict(_type="HandoffToHumanEvent")
],
botkitVersion=BOTKIT_API_LATEST_VERSION)
return jsonify(response)
@APP.route('/locked', methods=['POST'])
def locked():
"""Simple view function that needs login"""
body = request.get_json(force=True)
if body and isinstance(body, dict) and body.get('loginData'):
response = dict(messages=[
dict(_type="TextMessage", text="I guess you logged in"),
dict(_type="TextMessage", text="But you still get a picture of a lock"),
dict(_type="ImageMessage",
imageUrl="http://www.fortresslockandsecurity.com/wp-content/uploads/2014/04/Austin-Locksmith.png")
],
botkitVersion=BOTKIT_API_LATEST_VERSION)
else:
response = dict(botkitVersion=BOTKIT_API_LATEST_VERSION,
messages=[dict(_type='LoginOAuthEvent',
loginSuccessHook={'webhook': 'flight_boarding_pass'},
text='Please Login in first',
webLoginUrl='https://chat.evature.com/demo_login')])
return jsonify(response)
@APP.route('/bp', methods=['POST'])
def boarding_pass():
"""Return a boarding pass"""
response = dict(messages=[BOARDING_PASS_MESSAGE_EXAMPLE],
botkitVersion=BOTKIT_API_LATEST_VERSION)
return jsonify(response)
def random_string(length_of_string):
"""Generate a random string"""
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(length_of_string))
@APP.route('/dl', methods=['GET', 'POST'])
def demo_login():
"""Implements a simple page for OAuth login
# example of URL:
# https://chat.evature.com/demo_login?
# account_linking_token=ARREbGIbGD7PQhwWcUt2b5n6yomzPaL6yr_fGAVoFBEADGssklmardZMcnJv9fLsLmpnQ4QuzDhhxg65Ewzq3ObOoUe_aMoDCl5LUS4O_qEumg
# &redirect_uri=https%3A%2F%2Ffacebook.com%2Fmessenger_platform%2Faccount_linking%2F%3Faccount_linking_token%3DARREbGIbGD7PQhwWcUt2b5n6yomzPaL6yr_fGAVoFBEADGssklmardZMcnJv9fLsLmpnQ4QuzDhhxg65Ewzq3ObOoUe_aMoDCl5LUS4O_qEumg
"""
messages = []
context = {}
# context.update(csrf(request))
redirect_uri = request.args.get('redirect_uri', '')
account_linking_token = request.args.get('account_linking_token', '')
if not redirect_uri:
messages.append("Expected to find 'redirect_uri' in the query parameters")
if not account_linking_token:
messages.append("Expected to find 'account_linking_token' in the query parameters")
if request.method == 'POST':
if 'canceled' in request.args:
# canceled
messages.append("Canceled!")
if redirect_uri:
return redirect(redirect_uri)
else:
username = request.form['username']
password = request.form['password']
if username.lower().strip() == 'username' and password.lower().strip() == 'password':
# success
messages.append("Success!")
if redirect_uri:
return redirect('{}&authorization_code={}'.format(redirect_uri, random_string(5)))
else:
# fail
messages.append("Invalid Username/Password<br>Use “username” and “password”"
" for succesful login, or click 'cancel'")
context['message'] = "<br>".join(messages)
return render_template('demo_login.html', **context)
@APP.route('/bplogin', methods=['POST'])
def flight_boarding_pass_webhook():
body = request.get_json(force=True)
if body and isinstance(body, dict) and body.get('loginData'):
response = dict(messages=[BOARDING_PASS_MESSAGE_EXAMPLE],
botkitVersion=BOTKIT_API_LATEST_VERSION)
else:
response = dict(botkitVersion=BOTKIT_API_LATEST_VERSION,
messages=[dict(_type='LoginOAuthEvent',
loginSuccessHook={'webhook': 'flight_boarding_pass'},
text='Please Login in first',
webLoginUrl='https://chat.evature.com/demo_login')])
return jsonify(response)
@APP.route('/roadside', methods=['POST'])
def roadside():
"""Simple roadside assistance function"""
response = dict(messages=[
dict(_type="TextMessage", text="If you need roadside assistance with your Avis vehicle, please call 877-485-5295"),
dict(_type="ImageMessage",
imageUrl="http://www.whatafuture.com/wp-content/uploads/2015/03/Google-roadside-assistance-1024x683.jpg")
],
botkitVersion=BOTKIT_API_LATEST_VERSION)
return jsonify(response)
@APP.route('/flightstat', methods=['POST'])
def flight_status():
"""Simple flight status reply"""
response = dict(messages=[FLIGHT_STATUS_MESSAGE_EXAMPLE],
botkitVersion=BOTKIT_API_LATEST_VERSION)
return jsonify(response)
@APP.route('/taltesting', methods=['POST'])
def tal_testing():
"""Playground for testing stuff"""
response = """{
"botkitVersion": "0.3.0",
"messages": [
{
"_type": "RichMessage",
"imageUrl": "https://www.travelexinsurance.com/images/default-album/mainimg_flightinsurance.jpg",
"title": "LHR /u21d2 SVO Option # 1: $1842.24",
"subtitle": " : 2016-08-31,c: 2016-09-01,one stop at SVO",
"buttons": [
{"_type": "ButtonMessage", "text": "Reserve Seat", "url": "https://www.google.com/search?q=flight%20LHR%20to%20SVO"}
]
}
]
}"""
return jsonify(json.loads(response))
@APP.route('/roshan', methods=['POST'])
def for_roshan():
"""Trying to fix the response for Amadeus"""
response = r"""
{"botkitVersion":"0.3.0","messages":[{"_type":"TextMessage","text":"Here are the the top 3 results:"},{"_type":"MultiRichMessage","messages":[{"_type":"RichMessage","title":"BLR (2016-08-24 18:25) -> NCE (2016-08-24 09:40)","imageUrl":"http://tomcat.www.1aipp.com/sandboxrestservice_chatbot/flight.jpg","buttons":[{"_type":"ButtonMessage","text":"$ 1204.46","url":"https://www.amadeus.net/home/"},{"_type":"ButtonMessage","text":"More Details","url":"https://www.amadeus.net/home/"},{"_type":"ButtonMessage","text":"Book this flight","url":"https://www.amadeus.net/home/"},{"_type":"ButtonMessage","text":"Show similar flights","url":"https://www.amadeus.net/home/"}],"url":"https://www.amadeus.net/home/"},{"_type":"RichMessage","title":"BLR (2016-08-24 18:25) -> NCE (2016-08-24 09:40)","imageUrl":"http://tomcat.www.1aipp.com/sandboxrestservice_chatbot/flight.jpg","buttons":[{"_type":"ButtonMessage","text":"$ 1219.24","url":"https://www.amadeus.net/home/"},{"_type":"ButtonMessage","text":"More Details","url":"https://www.amadeus.net/home/"},{"_type":"ButtonMessage","text":"Book this flight","url":"https://www.amadeus.net/home/"},{"_type":"ButtonMessage","text":"Show similar flights","url":"https://www.amadeus.net/home/"}],"url":"https://www.amadeus.net/home/"},{"_type":"RichMessage","title":"BLR (2016-08-24 17:00) -> NCE (2016-08-24 06:40)","imageUrl":"http://tomcat.www.1aipp.com/sandboxrestservice_chatbot/flight.jpg","buttons":[{"_type":"ButtonMessage","text":"$ 1444.75","url":"https://www.amadeus.net/home/"},{"_type":"ButtonMessage","text":"More Details","url":"https://www.amadeus.net/home/"},{"_type":"ButtonMessage","text":"Book this flight","url":"https://www.amadeus.net/home/"},{"_type":"ButtonMessage","text":"Show similar flights","url":"https://www.amadeus.net/home/"}],"url":"https://www.amadeus.net/home/"}]}]}
"""
return jsonify(json.loads(response))
@APP.route('/sudhanwa', methods=['POST'])
def for_sudhanwa():
"""Trying to fix the response for Amadeus"""
response = """
{
"botkitVersion": "0.3.0",
"messages": [
{
"_type": "TextMessage",
"text": "Here are the the top 3 results:"
},
{
"_type": "RichMessage",
"title": "LHR /u21d2 SVO Option # 1: $1842.24",
"imageUrl": "https://www.travelexinsurance.com/images/default-album/mainimg_flightinsurance.jpg",
"subtitle": " : 2016-08-31,c: 2016-09-01,one stop at SVO",
"buttons": [
{
"_type": "ButtonMessage",
"text": "Reserve Seat",
"payload": null,
"url": "https://www.google.com/search?q=flight%20LHR%20to%20SVO"
}
]
},
{
"_type": "TextMessage",
"text": "Outbound Flight"
},
{
"_type": "HtmlMessage",
"height": "200",
"width": "350",
"html": "<h3>Arrives at</h3> :2016-08-31T23:35<br><h3>Departs at</h3> :2016-08-31T18:40<br><h3>Fly with</h3> :BA<h3>Airways</h3><br><h3>Origin Airport</h3> :LHR<br><h3>Destination Airport</h3> :HEL<br><h3>Flight Number</h3> :5908<br>"
},
{
"_type": "HtmlMessage",
"height": "200",
"width": "350",
"html": "<h3>Arrives at</h3> :2016-09-01T11:05<br><h3>Departs at</h3> :2016-09-01T09:25<br><h3>Fly with</h3> :AY<h3>Airways</h3><br><h3>Origin Airport</h3> :HEL<br><h3>Destination Airport</h3> :SVO<br><h3>Flight Number</h3> :153<br>"
},
{
"_type": "TextMessage",
"text": "Inbound Flight"
},
{
"_type": "HtmlMessage",
"height": "200",
"width": "350",
"html": "<h3>Arrives at</h3> :2016-09-03T20:05<br><h3>Departs at</h3> :2016-09-03T18:20<br><h3>Fly with</h3> :SU<h3>Airways</h3><br><h3>Origin Airport</h3> :SVO<br><h3>Destination Airport</h3> :HEL<br><h3>Flight Number</h3> :6844<br>"
},
{
"_type": "HtmlMessage",
"height": "200",
"width": "350",
"html": "<h3>Arrives at</h3> :2016-09-04T09:00<br><h3>Departs at</h3> :2016-09-04T07:45<br><h3>Fly with</h3> :BA<h3>Airways</h3><br><h3>Origin Airport</h3> :HEL<br><h3>Destination Airport</h3> :LHR<br><h3>Flight Number</h3> :5905<br>"
},
{
"_type": "HtmlMessage",
"height": "200",
"width": "350",
"html": "<h3>Arrives at</h3> :2016-08-31T23:35<br><h3>Departs at</h3> :2016-08-31T18:40<br><h3>Fly with</h3> :BA<h3>Airways</h3><br><h3>Origin Airport</h3> :LHR<br><h3>Destination Airport</h3> :HEL<br><h3>Flight Number</h3> :5908<br>"
},
{
"_type": "RichMessage",
"title": "LHR /u21d2 SVO Option # 2: $2110.83",
"imageUrl": "https://www.travelexinsurance.com/images/default-album/mainimg_flightinsurance.jpg",
"subtitle": ": 2016-08-31,c: 2016-09-01,one stop at SVO",
"buttons": [
{
"_type": "ButtonMessage",
"text": "Reserve Seat",
"payload": null,
"url": "https://www.google.com/search?q=flight%20LHR%20to%20SVO"
}
]
},
{
"_type": "TextMessage",
"text": "Outbound Flight"
},
{
"_type": "HtmlMessage",
"height": "200",
"width": "350",
"html": "<h3>Arrives at</h3> :2016-08-31T18:05<br><h3>Departs at</h3> :2016-08-31T15:50<br><h3>Fly with</h3> :AF<h3>Airways</h3><br><h3>Origin Airport</h3> :LHR<br><h3>Destination Airport</h3> :CDG<br><h3>Flight Number</h3> :1781<br>"
},
{
"_type": "HtmlMessage",
"height": "200",
"width": "350",
"html": "<h3>Arrives at</h3> :2016-09-01T00:10<br><h3>Departs at</h3> :2016-08-31T19:30<br><h3>Fly with</h3> :AF<h3>Airways</h3><br><h3>Origin Airport</h3> :CDG<br><h3>Destination Airport</h3> :SVO<br><h3>Flight Number</h3> :1144<br>"
},
{
"_type": "TextMessage",
"text": "Inbound Flight"
},
{
"_type": "HtmlMessage",
"height": "200",
"width": "350",
"html": "<h3>Arrives at</h3> :2016-09-03T17:00<br><h3>Departs at</h3> :2016-09-03T14:05<br><h3>Fly with</h3> :SU<h3>Airways</h3><br><h3>Origin Airport</h3> :SVO<br><h3>Destination Airport</h3> :CDG<br><h3>Flight Number</h3> :4921<br>"
},
{
"_type": "HtmlMessage",
"height": "200",
"width": "350",
"html": "<h3>Arrives at</h3> :2016-09-03T19:20<br><h3>Departs at</h3> :2016-09-03T19:05<br><h3>Fly with</h3> :AF<h3>Airways</h3><br><h3>Origin Airport</h3> :CDG<br><h3>Destination Airport</h3> :LHR<br><h3>Flight Number</h3> :1180<br>"
},
{
"_type": "HtmlMessage",
"height": "200",
"width": "350",
"html": "<h3>Arrives at</h3> :2016-08-31T18:05<br><h3>Departs at</h3> :2016-08-31T15:50<br><h3>Fly with</h3> :AF<h3>Airways</h3><br><h3>Origin Airport</h3> :LHR<br><h3>Destination Airport</h3> :CDG<br><h3>Flight Number</h3> :1781<br>"
},
{
"_type": "RichMessage",
"title": "LHR /u21d2 SVO Option # 3: $2699.13",
"imageUrl": "https://www.travelexinsurance.com/images/default-album/mainimg_flightinsurance.jpg",
"subtitle": " : 2016-08-31,c: 2016-09-01,non stop ",
"buttons": [
{
"_type": "ButtonMessage",
"text": "Reserve Seat",
"payload": null,
"url": "https://www.google.com/search?q=flight%20LHR%20to%20SVO"
}
]
},
{
"_type": "TextMessage",
"text": "Outbound Flight"
},
{
"_type": "HtmlMessage",
"height": "200",
"width": "350",
"html": "<h3>Arrives at</h3> :2016-09-01T04:25<br><h3>Departs at</h3> :2016-08-31T22:45<br><h3>Fly with</h3> :SU<h3>Airways</h3><br><h3>Origin Airport</h3> :LHR<br><h3>Destination Airport</h3> :SVO<br><h3>Flight Number</h3> :2585<br>"
},
{
"_type": "TextMessage",
"text": "Inbound Flight"
},
{
"_type": "HtmlMessage",
"height": "200",
"width": "350",
"html": "<h3>Arrives at</h3> :2016-09-03T08:00<br><h3>Departs at</h3> :2016-09-03T06:00<br><h3>Fly with</h3> :SU<h3>Airways</h3><br><h3>Origin Airport</h3> :SVO<br><h3>Destination Airport</h3> :LHR<br><h3>Flight Number</h3> :2570<br>"
},
{
"_type": "HtmlMessage",
"height": "200",
"width": "350",
"html": "<h3>Arrives at</h3> :2016-09-01T04:25<br><h3>Departs at</h3> :2016-08-31T22:45<br><h3>Fly with</h3> :SU<h3>Airways</h3><br><h3>Origin Airport</h3> :LHR<br><h3>Destination Airport</h3> :SVO<br><h3>Flight Number</h3> :2585<br>"
}
]
}
"""
return jsonify(json.loads(response))
@APP.route('/questions', methods=['POST'])
def questions():
"""Playing with questions"""
response = """
{
"botkitVersion":"0.4.0",
"messages":[
{
"_type":"QuestionnaireEvent",
"questionnaireAnsweredHook":{
"webhook":"roadside_assistance",
"payload":{
"more_info_to_attach_to_answers":123
}
},
"questionnaireAbortedHook":{
"webhook":"roadside_assistance",
"payload":{
"validation error?":321
}
},
"questions":[
{
"_type":"EmailQuestion",
"name":"email",
"text":"I need to identify you, what is your email?"
},
{
"_type":"MultiChoiceQuestion",
"text":"What happened?",
"name":"what_happened",
"choices":[
"Accident",
"Mechanical problem",
"Other"
]
},
{
"_type":"OpenQuestion",
"name":"details",
"text":"I need a string that starts with 'a' and is 3 or more letters",
"validationRegex":"a.{2}"
}
]
}
]
}
"""
return jsonify(json.loads(response))
@APP.route('/greeting', methods=['POST'])
def greeting():
"""Greeting webhook demo implementation"""
messages = []
body = request.get_json(force=True)
first_name = None
bot_or_agent_key = "bot_or_agent"
bot_please_reply = "YatraBot Please!"
if body and isinstance(body, dict):
bot_or_agent = body.get(bot_or_agent_key)
if bot_or_agent:
if bot_or_agent == bot_please_reply:
messages.append(dict(_type="TextMessage", text="bot requested - how may I help?"))
else:
messages.append(dict(_type="TextMessage", text="human requested"))
messages.append(dict(_type="HandoffToHumanEvent"))
else:
user = body.get('user')
if user and isinstance(user, dict):
first_name = user.get('firstName')
if first_name:
messages.append(dict(_type="TextMessage", text="Hello there {}!".format(first_name)))
if not first_name:
messages.append(dict(_type="TextMessage", text="Hello there!"))
messages.append(dict(_type="QuestionnaireEvent",
questionnaireAnsweredHook=dict(webhook="chat_greeting", payload=dict()),
questions=[dict(_type="MultiChoiceQuestion",
text="Would you like to talk to YatraBot or wait for an agent?",
name=bot_or_agent_key,
choices=["YatraBot Please!",
"Wait for an agent"])]))
response = dict(messages=messages, botkitVersion=BOTKIT_API_LATEST_VERSION)
return jsonify(response)
@APP.route('/https_proxy', methods=['GET'])
def https_proxy():
"""Trying to fix the response for Amadeus"""
url = request.args.get('url')
if url:
unquoted_url = requests.utils.unquote(url)
try:
res = requests.get(unquoted_url)
except requests.exceptions.RequestException:
pass
else:
response = make_response(res.content)
for key, value in res.headers.iteritems():
response.headers[key] = value
return response
return "No URL"
AIRPORT_SUGGESTIONS = [
("Flight Status:",
["My flight status",
"status of ua-123",
"arrivals",
"display arrivals",
"departures",
"list arriving flight",
"departure list",
"departures flights",
]),
("General questions:",
["Time in Rome",
"the weather in paris",
"Who are you?",
"What are you?",
"Who made you?",
"What do you eat?",
"What's new?",
"Who am I?",
"Where are you from?",
"What is your name?",
]),
("Hotel searches:",
["hotel tonight",
"cheap hotel nyc",
"3-4 stars for Monday",
]),
("Reach out for some help:",
["customer service",
"call support",
"talk to a human?",
"help",
'information',
'help me',
'can you help me?',
"can u show me info?",
"I need assistance",
]),
("Request personal information:",
["departure time?",
"boarding pass",
"When do I depart?",
"Show arrival time",
"When do I arrive?",
"When are we boarding?",
"Display my itinerary",
"Trip details",
"Number of my gate",
# "12345678901234567890",
]),
]
@APP.route('/capabilities_evature_airports', methods=['POST'])
def capabilities_evature_airports():
"""Capabilities view function"""
messages = [dict(_type="TextMessage", text="I can do many things! Here are a few options:")]
categories = sample(AIRPORT_SUGGESTIONS, 3)
multi_rich_messages = []
for category in categories:
buttons = [dict(_type="ButtonMessage", text=text, action=dict(_type="InputTextAction", inputText=text))
for text in sample(category[1], 3)] # pylint:disable=unsubscriptable-object
message = dict(_type="RichMessage", title=category[0], buttons=buttons) # pylint:disable=unsubscriptable-object
multi_rich_messages.append(message)
messages.append(dict(_type="MultiRichMessage", messages=multi_rich_messages))
response = dict(botkitVersion=BOTKIT_API_LATEST_VERSION, messages=messages)
return jsonify(response)
# We only need this for local development.
if __name__ == '__main__':
APP.run()
|
[
"tal@evature.com"
] |
tal@evature.com
|
061f704f78dd82afc0cfadd3da7489a155b9c579
|
2b21a7423f31163f0571161501477e6262a22b55
|
/jackdaw/nest/api/session.py
|
9fefb7c25d34ee248c3b282c7a2054f868cd55bd
|
[] |
no_license
|
hartl3y94/jackdaw
|
e2ae9e98cb97a7f1b3c0545042b0985220316720
|
3876298e1568fe8d811e86668e428a5fd937cd5a
|
refs/heads/master
| 2023-04-03T06:36:04.850552
| 2021-03-30T20:26:43
| 2021-03-30T20:26:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,417
|
py
|
from jackdaw.dbmodel.netsession import NetSession
from jackdaw.dbmodel.adcomp import Machine
from jackdaw.dbmodel.aduser import ADUser
from flask import current_app
def session_list(domainid):
db = current_app.db
sessions = {}
for mid, mname, session in db.session.query(Machine.id, Machine.sAMAccountName, NetSession).filter(Machine.ad_id == domainid).filter(NetSession.machine_id == Machine.id).distinct(NetSession.username):
if mid not in sessions:
sessions[mid] = {}
sessions[mid]['sessions'] = []
sessions[mid]['machinename'] = mname
sessions[mid]['sessions'].append(session.username)
return sessions
def session_add(domainid, session):
db = current_app.db
print(session)
cname = session['hostname']
if cname[-1] != '$':
cname = session['hostname'] + '$'
comp = db.session.query(Machine.id, Machine.sAMAccountName).filter_by(ad_id = domainid).filter(Machine.sAMAccountName == cname).first()
if comp is None:
return 'Machine not found!', 404
uname = session['username']
user = db.session.query(ADUser.sAMAccountName).filter_by(ad_id = domainid).filter(ADUser.sAMAccountName == uname).first()
if user is None:
return 'User not found!', 404
sess = NetSession()
sess.machine_id = comp.id
sess.source = comp.sAMAccountName
sess.username = user.sAMAccountName
try:
db.session.add(sess)
db.session.commit()
except:
db.session.rollback()
return 'Session created!', 200
|
[
"info@skelsec.com"
] |
info@skelsec.com
|
01c96ced74400e481508f74bf645b0675e074697
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_stilettos.py
|
4433d008834892f1cf17787b6c5aa487efa15f64
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
#calss header
class _STILETTOS():
def __init__(self,):
self.name = "STILETTOS"
self.definitions = stiletto
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['stiletto']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
3bb1a6919eadc7c1ba52345e456c4e94b78f4016
|
1b3fc35ada474601a76de3c2908524336d6ca420
|
/design/design/spiders/artop.py
|
ee2aa5b7a63a36f995205766d85258cf1ae5f7c3
|
[] |
no_license
|
dqsdatalabs/Internet-worm
|
db3677e65d11542887adcde7719b7652757a3e32
|
62f38f58b4fa7643c482077f5ae18fff6fd81915
|
refs/heads/master
| 2022-01-16T14:29:52.184528
| 2018-12-25T08:46:08
| 2018-12-25T08:46:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,089
|
py
|
import scrapy
from design.items import DesignItem
import json
import re
data = {
'channel': 'artop',
'evt': 3,
'company': '上海浪尖工业设计有限公司'
}
class DesignCaseSpider(scrapy.Spider):
name = 'artop'
allowed_domains = ['www.artop-sh.com']
category = {'2cf03': '智能科技', '01e9b': '家居家电', '65962': '交通出行', '7e2c0': '机器人', '147e1': '机械自动化', '27ff8': '健康医疗','55fe6':'设计研究','99efd':'其他'}
category_list = ['2cf03', '01e9b', '65962', '7e2c0', '147e1', '27ff8','55fe6','99efd']
url = 'http://www.artop-sh.com/industrial#_case'
start_urls = [url]
def parse(self, response):
for j in self.category_list:
x = '//div[@class="row list-show"]/a[contains(@class,"%s")]' %j
detail_list = response.xpath(x)
for i in detail_list:
item = DesignItem()
url = 'http://www.artop-sh.com'+i.xpath('./@href').extract()[0]
tags = self.category[j]
title = i.xpath('./p/text()').extract()[0]
img_url = i.xpath('./span/i/@data-src').extract()[0]
if not img_url.startswith('http'):
img_url = 'http://www.artop-sh.com' + img_url
item['title'] = title
item['img_url'] = img_url
item['url'] = url
item['tags'] = tags
for key, value in data.items():
item[key] = value
yield scrapy.Request(url=url,callback=self.parse_detail,meta={"item":item})
def parse_detail(self,response):
item = response.meta['item']
url = response.url
item['url'] = url
remark = response.xpath('//div[@class="padding-md"]//text()').extract()
remark = [''.join(i.split()) for i in remark]
remark = ''.join(remark).strip()
if len(remark) > 480:
remark = remark[:480]
item['remark'] = remark
yield item
|
[
"noreply@github.com"
] |
dqsdatalabs.noreply@github.com
|
1717915a7b2b26e15950547533e5f928c10aac90
|
a7e150089cc4d29da5247feccb6c8b7e578c5027
|
/fileconvert/word2txt.py
|
9d464cb9cbfe4f941ae551c3080194c624b44363
|
[] |
no_license
|
ligangyuan/pyreggmail
|
8cf5d9000ca486a79a1b3de6e65fb5fe8ecfb4c6
|
b442a73bd21ee4d7a399d1559d13612db104e591
|
refs/heads/master
| 2022-12-28T07:08:00.422405
| 2020-10-10T06:10:38
| 2020-10-10T06:10:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
# coding=utf-8
"""
Description: Word文件转化TXT文本
Author:伏草惟存
Prompt: code in Python3 env
Install package: pip install pypiwin32
"""
import fnmatch
import os
from win32com import client as wc
'''
功能描述:word文件转存txt文件,默认存储当前路径下;用户可以指定存储文件路径。
参数描述:1 filePath:文件路径 2 savePath: 指定保存路径
'''
def Word2Txt(filePath, savePath=''):
# 1 切分文件上级目录和文件名
dirs, filename = os.path.split(filePath)
# print(dirs,'\n',filename)
# 2 修改转化后的文件名
new_name = ''
if fnmatch.fnmatch(filename, '*.doc'):
new_name = filename[:-4] + '.txt'
elif fnmatch.fnmatch(filename, '*.docx'):
new_name = filename[:-5] + '.txt'
else:
return
print('->', new_name)
# 3 文件转化后的保存路径
if savePath == '':
savePath = dirs
else:
savePath = savePath
word_to_txt = os.path.join(savePath, new_name)
print('->', word_to_txt)
# 4 加载处理应用,word转化txt
wordapp = wc.Dispatch('Word.Application')
mytxt = wordapp.Documents.Open(filePath)
mytxt.SaveAs(word_to_txt, 4)
mytxt.Close()
if __name__ == '__main__':
filepath = os.path.abspath(r'./dataSet/建行收单应用第三方应用调用接口v2.0.5.docx')
# savepath = ''
Word2Txt(filepath)
|
[
"xiongxiangquan@gmail.com"
] |
xiongxiangquan@gmail.com
|
49865944dba7af70ad85c560b04fc695073ae8cf
|
a32c8f71664d55e397aac4c7e1dc2835994c5995
|
/workspace/algolrithm_1/12-loop_PLUS_MINUS.py
|
3bc979f84ff7cbd3f3f2b0f0a19de7bffa60f3f0
|
[] |
no_license
|
slowlove729/test
|
3789385999da0ff1f2a2660b9fb6acc29a510d71
|
9d1ac9a5aab328f0ff547e22e3a06a5d687e8844
|
refs/heads/main
| 2023-06-19T01:47:01.485870
| 2021-07-17T08:35:16
| 2021-07-17T08:35:16
| 386,877,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
print("+ - 기호를 반복 출력하는 프로그램")
for i in range(int(input("몇개를 출력 할까요 : "))):
if i % 2 == 0:
print("+", end="")
else:
print("-", end="")
print()
|
[
"slowlove729@naver.com"
] |
slowlove729@naver.com
|
932020f19572143afa24a2dc21ee02a5656912fc
|
bb72621c10dd6a3cee04c8b75e60e4e88786f791
|
/chp15/color_dict.py
|
007d5af9ddb25a4b9d04c5402255b29fe07120da
|
[] |
no_license
|
BenU/thinkPython
|
8fcb0ad63ab62dd3dbf54db8acf2124a3b2ee666
|
325bb9827b071d78494e06819db67e23428ff6e7
|
refs/heads/master
| 2021-01-01T06:05:08.684841
| 2012-04-22T14:05:53
| 2012-04-22T14:05:53
| 3,540,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,523
|
py
|
""" Code example from Complexity and Computation, a book about
exploring complexity science with Python. Available free from
http://greenteapress.com/complexity
Copyright 2011 Allen B. Downey.
Distributed under the GNU General Public License at gnu.org/licenses/gpl.html.
"""
import re
# the following is the contents of /etc/X11/rgb.txt
COLORS = """
! $Xorg: rgb.txt,v 1.3 2000/08/17 19:54:00 cpqbld Exp $
255 250 250 snow
248 248 255 ghost white
248 248 255 GhostWhite
245 245 245 white smoke
245 245 245 WhiteSmoke
220 220 220 gainsboro
255 250 240 floral white
255 250 240 FloralWhite
253 245 230 old lace
253 245 230 OldLace
250 240 230 linen
250 235 215 antique white
250 235 215 AntiqueWhite
255 239 213 papaya whip
255 239 213 PapayaWhip
255 235 205 blanched almond
255 235 205 BlanchedAlmond
255 228 196 bisque
255 218 185 peach puff
255 218 185 PeachPuff
255 222 173 navajo white
255 222 173 NavajoWhite
255 228 181 moccasin
255 248 220 cornsilk
255 255 240 ivory
255 250 205 lemon chiffon
255 250 205 LemonChiffon
255 245 238 seashell
240 255 240 honeydew
245 255 250 mint cream
245 255 250 MintCream
240 255 255 azure
240 248 255 alice blue
240 248 255 AliceBlue
230 230 250 lavender
255 240 245 lavender blush
255 240 245 LavenderBlush
255 228 225 misty rose
255 228 225 MistyRose
255 255 255 white
0 0 0 black
47 79 79 dark slate gray
47 79 79 DarkSlateGray
47 79 79 dark slate grey
47 79 79 DarkSlateGrey
105 105 105 dim gray
105 105 105 DimGray
105 105 105 dim grey
105 105 105 DimGrey
112 128 144 slate gray
112 128 144 SlateGray
112 128 144 slate grey
112 128 144 SlateGrey
119 136 153 light slate gray
119 136 153 LightSlateGray
119 136 153 light slate grey
119 136 153 LightSlateGrey
190 190 190 gray
190 190 190 grey
211 211 211 light grey
211 211 211 LightGrey
211 211 211 light gray
211 211 211 LightGray
25 25 112 midnight blue
25 25 112 MidnightBlue
0 0 128 navy
0 0 128 navy blue
0 0 128 NavyBlue
100 149 237 cornflower blue
100 149 237 CornflowerBlue
72 61 139 dark slate blue
72 61 139 DarkSlateBlue
106 90 205 slate blue
106 90 205 SlateBlue
123 104 238 medium slate blue
123 104 238 MediumSlateBlue
132 112 255 light slate blue
132 112 255 LightSlateBlue
0 0 205 medium blue
0 0 205 MediumBlue
65 105 225 royal blue
65 105 225 RoyalBlue
0 0 255 blue
30 144 255 dodger blue
30 144 255 DodgerBlue
0 191 255 deep sky blue
0 191 255 DeepSkyBlue
135 206 235 sky blue
135 206 235 SkyBlue
135 206 250 light sky blue
135 206 250 LightSkyBlue
70 130 180 steel blue
70 130 180 SteelBlue
176 196 222 light steel blue
176 196 222 LightSteelBlue
173 216 230 light blue
173 216 230 LightBlue
176 224 230 powder blue
176 224 230 PowderBlue
175 238 238 pale turquoise
175 238 238 PaleTurquoise
0 206 209 dark turquoise
0 206 209 DarkTurquoise
72 209 204 medium turquoise
72 209 204 MediumTurquoise
64 224 208 turquoise
0 255 255 cyan
224 255 255 light cyan
224 255 255 LightCyan
95 158 160 cadet blue
95 158 160 CadetBlue
102 205 170 medium aquamarine
102 205 170 MediumAquamarine
127 255 212 aquamarine
0 100 0 dark green
0 100 0 DarkGreen
85 107 47 dark olive green
85 107 47 DarkOliveGreen
143 188 143 dark sea green
143 188 143 DarkSeaGreen
46 139 87 sea green
46 139 87 SeaGreen
60 179 113 medium sea green
60 179 113 MediumSeaGreen
32 178 170 light sea green
32 178 170 LightSeaGreen
152 251 152 pale green
152 251 152 PaleGreen
0 255 127 spring green
0 255 127 SpringGreen
124 252 0 lawn green
124 252 0 LawnGreen
0 255 0 green
127 255 0 chartreuse
0 250 154 medium spring green
0 250 154 MediumSpringGreen
173 255 47 green yellow
173 255 47 GreenYellow
50 205 50 lime green
50 205 50 LimeGreen
154 205 50 yellow green
154 205 50 YellowGreen
34 139 34 forest green
34 139 34 ForestGreen
107 142 35 olive drab
107 142 35 OliveDrab
189 183 107 dark khaki
189 183 107 DarkKhaki
240 230 140 khaki
238 232 170 pale goldenrod
238 232 170 PaleGoldenrod
250 250 210 light goldenrod yellow
250 250 210 LightGoldenrodYellow
255 255 224 light yellow
255 255 224 LightYellow
255 255 0 yellow
255 215 0 gold
238 221 130 light goldenrod
238 221 130 LightGoldenrod
218 165 32 goldenrod
184 134 11 dark goldenrod
184 134 11 DarkGoldenrod
188 143 143 rosy brown
188 143 143 RosyBrown
205 92 92 indian red
205 92 92 IndianRed
139 69 19 saddle brown
139 69 19 SaddleBrown
160 82 45 sienna
205 133 63 peru
222 184 135 burlywood
245 245 220 beige
245 222 179 wheat
244 164 96 sandy brown
244 164 96 SandyBrown
210 180 140 tan
210 105 30 chocolate
178 34 34 firebrick
165 42 42 brown
233 150 122 dark salmon
233 150 122 DarkSalmon
250 128 114 salmon
255 160 122 light salmon
255 160 122 LightSalmon
255 165 0 orange
255 140 0 dark orange
255 140 0 DarkOrange
255 127 80 coral
240 128 128 light coral
240 128 128 LightCoral
255 99 71 tomato
255 69 0 orange red
255 69 0 OrangeRed
255 0 0 red
255 105 180 hot pink
255 105 180 HotPink
255 20 147 deep pink
255 20 147 DeepPink
255 192 203 pink
255 182 193 light pink
255 182 193 LightPink
219 112 147 pale violet red
219 112 147 PaleVioletRed
176 48 96 maroon
199 21 133 medium violet red
199 21 133 MediumVioletRed
208 32 144 violet red
208 32 144 VioletRed
255 0 255 magenta
238 130 238 violet
221 160 221 plum
218 112 214 orchid
186 85 211 medium orchid
186 85 211 MediumOrchid
153 50 204 dark orchid
153 50 204 DarkOrchid
148 0 211 dark violet
148 0 211 DarkViolet
138 43 226 blue violet
138 43 226 BlueViolet
160 32 240 purple
147 112 219 medium purple
147 112 219 MediumPurple
216 191 216 thistle
255 250 250 snow1
238 233 233 snow2
205 201 201 snow3
139 137 137 snow4
255 245 238 seashell1
238 229 222 seashell2
205 197 191 seashell3
139 134 130 seashell4
255 239 219 AntiqueWhite1
238 223 204 AntiqueWhite2
205 192 176 AntiqueWhite3
139 131 120 AntiqueWhite4
255 228 196 bisque1
238 213 183 bisque2
205 183 158 bisque3
139 125 107 bisque4
255 218 185 PeachPuff1
238 203 173 PeachPuff2
205 175 149 PeachPuff3
139 119 101 PeachPuff4
255 222 173 NavajoWhite1
238 207 161 NavajoWhite2
205 179 139 NavajoWhite3
139 121 94 NavajoWhite4
255 250 205 LemonChiffon1
238 233 191 LemonChiffon2
205 201 165 LemonChiffon3
139 137 112 LemonChiffon4
255 248 220 cornsilk1
238 232 205 cornsilk2
205 200 177 cornsilk3
139 136 120 cornsilk4
255 255 240 ivory1
238 238 224 ivory2
205 205 193 ivory3
139 139 131 ivory4
240 255 240 honeydew1
224 238 224 honeydew2
193 205 193 honeydew3
131 139 131 honeydew4
255 240 245 LavenderBlush1
238 224 229 LavenderBlush2
205 193 197 LavenderBlush3
139 131 134 LavenderBlush4
255 228 225 MistyRose1
238 213 210 MistyRose2
205 183 181 MistyRose3
139 125 123 MistyRose4
240 255 255 azure1
224 238 238 azure2
193 205 205 azure3
131 139 139 azure4
131 111 255 SlateBlue1
122 103 238 SlateBlue2
105 89 205 SlateBlue3
71 60 139 SlateBlue4
72 118 255 RoyalBlue1
67 110 238 RoyalBlue2
58 95 205 RoyalBlue3
39 64 139 RoyalBlue4
0 0 255 blue1
0 0 238 blue2
0 0 205 blue3
0 0 139 blue4
30 144 255 DodgerBlue1
28 134 238 DodgerBlue2
24 116 205 DodgerBlue3
16 78 139 DodgerBlue4
99 184 255 SteelBlue1
92 172 238 SteelBlue2
79 148 205 SteelBlue3
54 100 139 SteelBlue4
0 191 255 DeepSkyBlue1
0 178 238 DeepSkyBlue2
0 154 205 DeepSkyBlue3
0 104 139 DeepSkyBlue4
135 206 255 SkyBlue1
126 192 238 SkyBlue2
108 166 205 SkyBlue3
74 112 139 SkyBlue4
176 226 255 LightSkyBlue1
164 211 238 LightSkyBlue2
141 182 205 LightSkyBlue3
96 123 139 LightSkyBlue4
198 226 255 SlateGray1
185 211 238 SlateGray2
159 182 205 SlateGray3
108 123 139 SlateGray4
202 225 255 LightSteelBlue1
188 210 238 LightSteelBlue2
162 181 205 LightSteelBlue3
110 123 139 LightSteelBlue4
191 239 255 LightBlue1
178 223 238 LightBlue2
154 192 205 LightBlue3
104 131 139 LightBlue4
224 255 255 LightCyan1
209 238 238 LightCyan2
180 205 205 LightCyan3
122 139 139 LightCyan4
187 255 255 PaleTurquoise1
174 238 238 PaleTurquoise2
150 205 205 PaleTurquoise3
102 139 139 PaleTurquoise4
152 245 255 CadetBlue1
142 229 238 CadetBlue2
122 197 205 CadetBlue3
83 134 139 CadetBlue4
0 245 255 turquoise1
0 229 238 turquoise2
0 197 205 turquoise3
0 134 139 turquoise4
0 255 255 cyan1
0 238 238 cyan2
0 205 205 cyan3
0 139 139 cyan4
151 255 255 DarkSlateGray1
141 238 238 DarkSlateGray2
121 205 205 DarkSlateGray3
82 139 139 DarkSlateGray4
127 255 212 aquamarine1
118 238 198 aquamarine2
102 205 170 aquamarine3
69 139 116 aquamarine4
193 255 193 DarkSeaGreen1
180 238 180 DarkSeaGreen2
155 205 155 DarkSeaGreen3
105 139 105 DarkSeaGreen4
84 255 159 SeaGreen1
78 238 148 SeaGreen2
67 205 128 SeaGreen3
46 139 87 SeaGreen4
154 255 154 PaleGreen1
144 238 144 PaleGreen2
124 205 124 PaleGreen3
84 139 84 PaleGreen4
0 255 127 SpringGreen1
0 238 118 SpringGreen2
0 205 102 SpringGreen3
0 139 69 SpringGreen4
0 255 0 green1
0 238 0 green2
0 205 0 green3
0 139 0 green4
127 255 0 chartreuse1
118 238 0 chartreuse2
102 205 0 chartreuse3
69 139 0 chartreuse4
192 255 62 OliveDrab1
179 238 58 OliveDrab2
154 205 50 OliveDrab3
105 139 34 OliveDrab4
202 255 112 DarkOliveGreen1
188 238 104 DarkOliveGreen2
162 205 90 DarkOliveGreen3
110 139 61 DarkOliveGreen4
255 246 143 khaki1
238 230 133 khaki2
205 198 115 khaki3
139 134 78 khaki4
255 236 139 LightGoldenrod1
238 220 130 LightGoldenrod2
205 190 112 LightGoldenrod3
139 129 76 LightGoldenrod4
255 255 224 LightYellow1
238 238 209 LightYellow2
205 205 180 LightYellow3
139 139 122 LightYellow4
255 255 0 yellow1
238 238 0 yellow2
205 205 0 yellow3
139 139 0 yellow4
255 215 0 gold1
238 201 0 gold2
205 173 0 gold3
139 117 0 gold4
255 193 37 goldenrod1
238 180 34 goldenrod2
205 155 29 goldenrod3
139 105 20 goldenrod4
255 185 15 DarkGoldenrod1
238 173 14 DarkGoldenrod2
205 149 12 DarkGoldenrod3
139 101 8 DarkGoldenrod4
255 193 193 RosyBrown1
238 180 180 RosyBrown2
205 155 155 RosyBrown3
139 105 105 RosyBrown4
255 106 106 IndianRed1
238 99 99 IndianRed2
205 85 85 IndianRed3
139 58 58 IndianRed4
255 130 71 sienna1
238 121 66 sienna2
205 104 57 sienna3
139 71 38 sienna4
255 211 155 burlywood1
238 197 145 burlywood2
205 170 125 burlywood3
139 115 85 burlywood4
255 231 186 wheat1
238 216 174 wheat2
205 186 150 wheat3
139 126 102 wheat4
255 165 79 tan1
238 154 73 tan2
205 133 63 tan3
139 90 43 tan4
255 127 36 chocolate1
238 118 33 chocolate2
205 102 29 chocolate3
139 69 19 chocolate4
255 48 48 firebrick1
238 44 44 firebrick2
205 38 38 firebrick3
139 26 26 firebrick4
255 64 64 brown1
238 59 59 brown2
205 51 51 brown3
139 35 35 brown4
255 140 105 salmon1
238 130 98 salmon2
205 112 84 salmon3
139 76 57 salmon4
255 160 122 LightSalmon1
238 149 114 LightSalmon2
205 129 98 LightSalmon3
139 87 66 LightSalmon4
255 165 0 orange1
238 154 0 orange2
205 133 0 orange3
139 90 0 orange4
255 127 0 DarkOrange1
238 118 0 DarkOrange2
205 102 0 DarkOrange3
139 69 0 DarkOrange4
255 114 86 coral1
238 106 80 coral2
205 91 69 coral3
139 62 47 coral4
255 99 71 tomato1
238 92 66 tomato2
205 79 57 tomato3
139 54 38 tomato4
255 69 0 OrangeRed1
238 64 0 OrangeRed2
205 55 0 OrangeRed3
139 37 0 OrangeRed4
255 0 0 red1
238 0 0 red2
205 0 0 red3
139 0 0 red4
215 7 81 DebianRed
255 20 147 DeepPink1
238 18 137 DeepPink2
205 16 118 DeepPink3
139 10 80 DeepPink4
255 110 180 HotPink1
238 106 167 HotPink2
205 96 144 HotPink3
139 58 98 HotPink4
255 181 197 pink1
238 169 184 pink2
205 145 158 pink3
139 99 108 pink4
255 174 185 LightPink1
238 162 173 LightPink2
205 140 149 LightPink3
139 95 101 LightPink4
255 130 171 PaleVioletRed1
238 121 159 PaleVioletRed2
205 104 137 PaleVioletRed3
139 71 93 PaleVioletRed4
255 52 179 maroon1
238 48 167 maroon2
205 41 144 maroon3
139 28 98 maroon4
255 62 150 VioletRed1
238 58 140 VioletRed2
205 50 120 VioletRed3
139 34 82 VioletRed4
255 0 255 magenta1
238 0 238 magenta2
205 0 205 magenta3
139 0 139 magenta4
255 131 250 orchid1
238 122 233 orchid2
205 105 201 orchid3
139 71 137 orchid4
255 187 255 plum1
238 174 238 plum2
205 150 205 plum3
139 102 139 plum4
224 102 255 MediumOrchid1
209 95 238 MediumOrchid2
180 82 205 MediumOrchid3
122 55 139 MediumOrchid4
191 62 255 DarkOrchid1
178 58 238 DarkOrchid2
154 50 205 DarkOrchid3
104 34 139 DarkOrchid4
155 48 255 purple1
145 44 238 purple2
125 38 205 purple3
85 26 139 purple4
171 130 255 MediumPurple1
159 121 238 MediumPurple2
137 104 205 MediumPurple3
93 71 139 MediumPurple4
255 225 255 thistle1
238 210 238 thistle2
205 181 205 thistle3
139 123 139 thistle4
0 0 0 gray0
0 0 0 grey0
3 3 3 gray1
3 3 3 grey1
5 5 5 gray2
5 5 5 grey2
8 8 8 gray3
8 8 8 grey3
10 10 10 gray4
10 10 10 grey4
13 13 13 gray5
13 13 13 grey5
15 15 15 gray6
15 15 15 grey6
18 18 18 gray7
18 18 18 grey7
20 20 20 gray8
20 20 20 grey8
23 23 23 gray9
23 23 23 grey9
26 26 26 gray10
26 26 26 grey10
28 28 28 gray11
28 28 28 grey11
31 31 31 gray12
31 31 31 grey12
33 33 33 gray13
33 33 33 grey13
36 36 36 gray14
36 36 36 grey14
38 38 38 gray15
38 38 38 grey15
41 41 41 gray16
41 41 41 grey16
43 43 43 gray17
43 43 43 grey17
46 46 46 gray18
46 46 46 grey18
48 48 48 gray19
48 48 48 grey19
51 51 51 gray20
51 51 51 grey20
54 54 54 gray21
54 54 54 grey21
56 56 56 gray22
56 56 56 grey22
59 59 59 gray23
59 59 59 grey23
61 61 61 gray24
61 61 61 grey24
64 64 64 gray25
64 64 64 grey25
66 66 66 gray26
66 66 66 grey26
69 69 69 gray27
69 69 69 grey27
71 71 71 gray28
71 71 71 grey28
74 74 74 gray29
74 74 74 grey29
77 77 77 gray30
77 77 77 grey30
79 79 79 gray31
79 79 79 grey31
82 82 82 gray32
82 82 82 grey32
84 84 84 gray33
84 84 84 grey33
87 87 87 gray34
87 87 87 grey34
89 89 89 gray35
89 89 89 grey35
92 92 92 gray36
92 92 92 grey36
94 94 94 gray37
94 94 94 grey37
97 97 97 gray38
97 97 97 grey38
99 99 99 gray39
99 99 99 grey39
102 102 102 gray40
102 102 102 grey40
105 105 105 gray41
105 105 105 grey41
107 107 107 gray42
107 107 107 grey42
110 110 110 gray43
110 110 110 grey43
112 112 112 gray44
112 112 112 grey44
115 115 115 gray45
115 115 115 grey45
117 117 117 gray46
117 117 117 grey46
120 120 120 gray47
120 120 120 grey47
122 122 122 gray48
122 122 122 grey48
125 125 125 gray49
125 125 125 grey49
127 127 127 gray50
127 127 127 grey50
130 130 130 gray51
130 130 130 grey51
133 133 133 gray52
133 133 133 grey52
135 135 135 gray53
135 135 135 grey53
138 138 138 gray54
138 138 138 grey54
140 140 140 gray55
140 140 140 grey55
143 143 143 gray56
143 143 143 grey56
145 145 145 gray57
145 145 145 grey57
148 148 148 gray58
148 148 148 grey58
150 150 150 gray59
150 150 150 grey59
153 153 153 gray60
153 153 153 grey60
156 156 156 gray61
156 156 156 grey61
158 158 158 gray62
158 158 158 grey62
161 161 161 gray63
161 161 161 grey63
163 163 163 gray64
163 163 163 grey64
166 166 166 gray65
166 166 166 grey65
168 168 168 gray66
168 168 168 grey66
171 171 171 gray67
171 171 171 grey67
173 173 173 gray68
173 173 173 grey68
176 176 176 gray69
176 176 176 grey69
179 179 179 gray70
179 179 179 grey70
181 181 181 gray71
181 181 181 grey71
184 184 184 gray72
184 184 184 grey72
186 186 186 gray73
186 186 186 grey73
189 189 189 gray74
189 189 189 grey74
191 191 191 gray75
191 191 191 grey75
194 194 194 gray76
194 194 194 grey76
196 196 196 gray77
196 196 196 grey77
199 199 199 gray78
199 199 199 grey78
201 201 201 gray79
201 201 201 grey79
204 204 204 gray80
204 204 204 grey80
207 207 207 gray81
207 207 207 grey81
209 209 209 gray82
209 209 209 grey82
212 212 212 gray83
212 212 212 grey83
214 214 214 gray84
214 214 214 grey84
217 217 217 gray85
217 217 217 grey85
219 219 219 gray86
219 219 219 grey86
222 222 222 gray87
222 222 222 grey87
224 224 224 gray88
224 224 224 grey88
227 227 227 gray89
227 227 227 grey89
229 229 229 gray90
229 229 229 grey90
232 232 232 gray91
232 232 232 grey91
235 235 235 gray92
235 235 235 grey92
237 237 237 gray93
237 237 237 grey93
240 240 240 gray94
240 240 240 grey94
242 242 242 gray95
242 242 242 grey95
245 245 245 gray96
245 245 245 grey96
247 247 247 gray97
247 247 247 grey97
250 250 250 gray98
250 250 250 grey98
252 252 252 gray99
252 252 252 grey99
255 255 255 gray100
255 255 255 grey100
169 169 169 dark grey
169 169 169 DarkGrey
169 169 169 dark gray
169 169 169 DarkGray
0 0 139 dark blue
0 0 139 DarkBlue
0 139 139 dark cyan
0 139 139 DarkCyan
139 0 139 dark magenta
139 0 139 DarkMagenta
139 0 0 dark red
139 0 0 DarkRed
144 238 144 light green
144 238 144 LightGreen
"""
def make_color_dict(colors=COLORS):
"""Returns a dictionary that maps color names to RGB strings.
The format of RGB strings is '#RRGGBB'.
"""
# regular expressions to match numbers and color names
number = r'(\d+)'
space = r'[ \t]*'
name = r'([ \w]+)'
pattern = space + (number + space) * 3 + name
prog = re.compile(pattern)
# read the file
d = dict()
for line in colors.split('\n'):
ro = prog.match(line)
if ro:
r, g, b, name = ro.groups()
rgb = '#%02x%02x%02x' % (int(r), int(g), int(b))
d[name] = rgb
return d
if __name__ == '__main__':
color_dict = make_color_dict()
for name, rgb in color_dict.iteritems():
print rgb, name
|
[
"benjamin.d.unger@gmail.com"
] |
benjamin.d.unger@gmail.com
|
e084aacacd2f025e8fc92c9a2090102d13937f36
|
2648f57ebd565a96e4e4f6272411b587bfac3394
|
/__init__.py
|
43c7086b5ee33312d8826b7287e001336ca7a6ae
|
[] |
no_license
|
FuJianTech/TodayHot
|
e372380a4f6a8b4cf28b5dc9de051e76398deead
|
3e8909f01ffe41dde466a50a94fb2d995219b52c
|
refs/heads/master
| 2022-12-21T15:19:21.107741
| 2020-09-09T07:10:06
| 2020-09-09T07:12:01
| 292,454,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 50
|
py
|
# 这个文件请勿删除,用来导入包使用
|
[
"dorians5689@gmail.com"
] |
dorians5689@gmail.com
|
0671c253c33bf2f166a0cd75968a5cc47fe484cd
|
30bf8dcb27be8cea604b2578d13b836d4944650d
|
/linear.py
|
1d8bf872504b0a4cf8010780833620816bef1050
|
[] |
no_license
|
yshu/221-project
|
39ca6ab520461caae74d75bbee6643716ad0b20a
|
9b8a6cc0b88b416333a7c5281e9a3fcc1730efbf
|
refs/heads/master
| 2020-05-15T19:44:16.397723
| 2019-06-06T23:49:29
| 2019-06-06T23:49:29
| 182,463,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,044
|
py
|
import tensorflow as tf
import tensorflow.contrib.layers as layers
import gym
from progressbar import Progbar
from util import *
import os
import numpy as np
import sys
from gym import wrappers
from collections import deque
from replay_buffer import ReplayBuffer
class config():
env_name = "Pong-v0"
record = True
output_path = "results/pong_linear_3/"
model_output = output_path + "model.weights/"
log_path = output_path + "log.txt"
plot_output = output_path + "scores.png"
record_path = output_path + "monitor/"
saving_freq = 250000
log_freq = 50
eval_freq = 250000
record_freq = 250000
num_episodes_test = 100
nsteps_train = 5000000
batch_size = 32
buffer_size = 1000000
target_update_freq = 10000
gamma = 0.99
learning_freq = 4
state_history = 4
skip_frame = 4
lr_begin = 0.00025
lr_end = 0.00005
lr_nsteps = nsteps_train/2
eps_begin = 1
eps_end = 0.1
eps_nsteps = 1000000
learning_start = 50000
soft_epsilon = 0.05
class Linear(object):
def __init__(self, env, config):
self.env = env
self.config = config
self.build()
def add_placeholders_op(self):
h, w, c = list(self.env.observation_space.shape)
self.s = tf.placeholder(tf.uint8, shape=[None, h, w, c*self.config.state_history])
self.a = tf.placeholder(tf.int32, shape=[None])
self.r = tf.placeholder(tf.float32, shape=[None])
self.sp = tf.placeholder(tf.uint8, shape=[None, h, w, c*self.config.state_history])
self.done_mask = tf.placeholder(tf.bool, shape=[None])
self.lr = tf.placeholder(tf.float32, shape=())
self.avg_reward_placeholder = tf.placeholder(tf.float32, shape=(), name="avg_reward")
def q_network_op(self, state):
num_actions = self.env.action_space.n
flatten = layers.flatten(state)
out = layers.fully_connected(flatten, num_actions, activation_fn=None)
return out
def add_loss_op(self, q, target_q):
"""
Q_samp(s) = r if done
= r + gamma * max_a' Q_target(s', a')
loss = (Q_samp(s) - Q(s, a))^2
"""
num_actions = self.env.action_space.n
gamma = self.config.gamma * tf.reduce_max(target_q, axis=1)
q_samp = tf.where(self.done_mask, self.r, self.r+gamma)
q_s = tf.reduce_sum(q*tf.one_hot(self.a, num_actions), axis=1)
loss = tf.reduce_mean(tf.squared_difference(q_samp, q_s))
return loss
def build(self):
self.add_placeholders_op()
with tf.variable_scope('q', reuse=False):
s = tf.cast(self.s, tf.float32)/255. #[0,255] -> [0,1]
self.q = self.q_network_op(s)
with tf.variable_scope('target_q', reuse=False):
sp = tf.cast(self.sp, tf.float32)/255. #[0,255] -> [0,1]
self.target_q = self.q_network_op(sp)
q_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='q')
t_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target_q')
op = [tf.assign(t_vars[i], q_vars[i]) for i in range(len(q_vars))]
self.update_target_op = tf.group(*op)
self.loss = self.add_loss_op(self.q, self.target_q)
optimizer = tf.train.AdamOptimizer(self.lr)
q_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='q')
grads_and_vars = optimizer.compute_gradients(self.loss, q_vars)
self.train_op = optimizer.apply_gradients(grads_and_vars)
tf.summary.scalar("loss", self.loss)
tf.summary.scalar("Avg_Reward", self.avg_reward_placeholder)
class train_Linear(Linear):
def __init__(self, env, config):
Linear.__init__(self, env, config)
self.logger = get_logger(config.log_path)
self.avg_reward = 0
self.progress = Progbar(target=self.config.nsteps_train)
def get_log(self, exp_schedule, lr_schedule, t, loss_eval, max_q_values, rewards):
if ((t > self.config.learning_start) and (t % self.config.log_freq == 0) and (t % self.config.learning_freq == 0)):
self.avg_reward = np.mean(rewards)
max_q = np.mean(max_q_values)
exp_schedule.update(t)
lr_schedule.update(t)
if len(rewards) > 0:
self.progress.update(t + 1, values=[("Loss", loss_eval), ("Avg_R", self.avg_reward),
("Max_R", np.max(rewards)), ("eps", exp_schedule.epsilon),
("Max_Q", max_q), ("lr", lr_schedule.epsilon)])
elif (t < self.config.learning_start) and (t % self.config.log_freq == 0):
sys.stdout.write("\rLearning not start yet: {}/{}...".format(t, self.config.learning_start))
sys.stdout.flush()
def train_step(self, t, replay_buffer, lr):
loss_eval = 0
if (t > self.config.learning_start and t % self.config.learning_freq == 0):
s_batch, a_batch, r_batch, sp_batch, done_mask_batch = replay_buffer.sample(self.config.batch_size)
model_spec = {self.s: s_batch,
self.a: a_batch,
self.r: r_batch,
self.sp: sp_batch,
self.done_mask: done_mask_batch,
self.lr: lr,
self.avg_reward_placeholder: self.avg_reward, }
loss_eval, summary, _ = self.sess.run([self.loss, self.all_summary, self.train_op], feed_dict=model_spec)
self.file_writer.add_summary(summary, t)
if t % self.config.target_update_freq == 0:
self.sess.run(self.update_target_op)
if (t % self.config.saving_freq == 0):
self.saver.save(self.sess, self.config.model_output)
return loss_eval
def train(self, exp_schedule, lr_schedule):
replay_buffer = ReplayBuffer(self.config.buffer_size, self.config.state_history)
rewards = deque(maxlen=self.config.num_episodes_test)
max_q_values = deque(maxlen=1000)
q_values = deque(maxlen=1000)
t = last_eval = last_record = 0
scores_eval = [] # scores for plot
scores_eval += [self.evaluate()]
while t < self.config.nsteps_train:
sum_reward = 0
state = self.env.reset()
while True:
t += 1
last_eval += 1
last_record += 1
# replay memory stuff
idx = replay_buffer.store_frame(state)
q_input = replay_buffer.encode_recent_observation()
action_values = self.sess.run(self.q, feed_dict={self.s: [q_input]})[0]
best_action = np.argmax(action_values)
q_values = action_values
action = exp_schedule.get_action(best_action)
max_q_values.append(max(q_values))
q_values += list(q_values)
new_state, reward, done, info = self.env.step(action)
# store the transition
replay_buffer.store_effect(idx, action, reward, done)
state = new_state
loss_eval = self.train_step(t, replay_buffer, lr_schedule.epsilon)
self.get_log(exp_schedule, lr_schedule, t, loss_eval, max_q_values, rewards)
sum_reward += reward
if done or t >= self.config.nsteps_train: break
rewards.append(sum_reward)
if t > self.config.learning_start:
if last_eval > self.config.eval_freq:
last_eval = 0
scores_eval += [self.evaluate()]
elif self.config.record and (last_record > self.config.record_freq):
self.logger.info("Recording...")
last_record =0
self.record()
self.logger.info("*** Training is done.")
self.saver.save(self.sess, self.config.model_output)
scores_eval += [self.evaluate()]
export_plot(scores_eval, "Scores", self.config.plot_output)
def evaluate(self, env=None, num_episodes=None):
if env is None: env = self.env
if num_episodes is None:
self.logger.info("Evaluating...")
num_episodes = self.config.num_episodes_test
replay_buffer = ReplayBuffer(self.config.buffer_size, self.config.state_history)
rewards = []
for i in range(num_episodes):
sum_reward = 0
state = env.reset()
while True:
idx = replay_buffer.store_frame(state)
q_input = replay_buffer.encode_recent_observation()
action = self.env.action_space.sample()
if self.config.soft_epsilon < np.random.random():
action = np.argmax(self.sess.run(self.q, feed_dict={self.s: [q_input]})[0])
new_state, reward, done, info = env.step(action)
replay_buffer.store_effect(idx, action, reward, done)
state = new_state
sum_reward += reward
if done: break
rewards.append(sum_reward)
avg_reward = np.mean(rewards)
if num_episodes > 1: self.logger.info("Average reward: {:04.2f}".format(avg_reward))
return avg_reward
def record(self):
record_env = gym.wrappers.Monitor(self.env, self.config.record_path, video_callable=lambda x: True, resume=True)
self.evaluate(record_env, 1)
def run(self, exp_schedule, lr_schedule):
self.sess = tf.Session()
self.all_summary = tf.summary.merge_all()
self.file_writer = tf.summary.FileWriter(config.output_path, self.sess.graph)
init = tf.global_variables_initializer()
self.sess.run(init)
self.sess.run(model.update_target_op)
self.saver = tf.train.Saver()
# model
self.train(exp_schedule, lr_schedule)
if self.config.record:
self.record()
if __name__ == '__main__':
if not os.path.exists(config.output_path):
os.makedirs(config.output_path)
if not os.path.exists(config.model_output):
os.makedirs(config.model_output)
env = gym.make(config.env_name)
env = MaxAndSkipWrapper(env, skip=config.skip_frame)
env = ResizeWrapper(env, preprocess=greyscale, shape=(80, 80, 1))
eps_schedule = LinearExploration(env, config.eps_begin,
config.eps_end, config.eps_nsteps)
lr_schedule = LinearSchedule(config.lr_begin, config.lr_end,
config.lr_nsteps)
model = train_Linear(env, config)
model.run(eps_schedule, lr_schedule)
|
[
"sunnyyujie@gmail.com"
] |
sunnyyujie@gmail.com
|
a1f2ab82603f9a36953e31f203b6f23bcd2bc7ea
|
1e65989631b28777ea9440d81851d8feacecd9c2
|
/ss.py
|
edb741935ca992f88c75ab2c022eb5758c41787c
|
[] |
no_license
|
pcamateur/pyt
|
3e2f51c497c3cf57aabec986e6801388c5b36052
|
7b4912587a360a241fff5a340c815c2070ba299e
|
refs/heads/master
| 2023-08-25T13:19:49.411968
| 2023-08-03T14:46:18
| 2023-08-03T14:46:18
| 172,338,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
from time import time as t
s = t()
li = list()
# datas = [a for a in range(5, 50000) if a % 2 != 0]
# for x in datas:
for x in range(5, 50000):
for y in range(2, x):
if x % y == 0:
break
else:
li.append(x)
e = t()
# print(li)
print(len(li))
print(e - s)
|
[
"pcamateur@outlook.com"
] |
pcamateur@outlook.com
|
60efcabdf54a0dee2f46e3e9330960626fa4990e
|
a616bb193e867342138c10c5f1dabe3fb724ff44
|
/build/baxter_common/baxter_maintenance_msgs/cmake/baxter_maintenance_msgs-genmsg-context.py
|
59d5c7721e1a43d008f667c7ab691b173995fcd5
|
[] |
no_license
|
erueda/ros_ws
|
c2a5ee8627e5996c5d7dd438a38abdba1ae23a54
|
c88fcff1c8a0db790cc0237150349ed2f64622a3
|
refs/heads/master
| 2016-08-11T07:15:33.537558
| 2016-04-14T09:28:01
| 2016-04-14T09:28:01
| 55,764,856
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/user/ros_ws/src/baxter_common/baxter_maintenance_msgs/msg/CalibrateArmData.msg;/home/user/ros_ws/src/baxter_common/baxter_maintenance_msgs/msg/CalibrateArmEnable.msg;/home/user/ros_ws/src/baxter_common/baxter_maintenance_msgs/msg/TareData.msg;/home/user/ros_ws/src/baxter_common/baxter_maintenance_msgs/msg/TareEnable.msg;/home/user/ros_ws/src/baxter_common/baxter_maintenance_msgs/msg/UpdateSource.msg;/home/user/ros_ws/src/baxter_common/baxter_maintenance_msgs/msg/UpdateSources.msg;/home/user/ros_ws/src/baxter_common/baxter_maintenance_msgs/msg/UpdateStatus.msg"
services_str = ""
pkg_name = "baxter_maintenance_msgs"
dependencies_str = "std_msgs"
langs = "gencpp;genlisp;genpy"
dep_include_paths_str = "baxter_maintenance_msgs;/home/user/ros_ws/src/baxter_common/baxter_maintenance_msgs/msg;std_msgs;/opt/ros/indigo/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/indigo/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
[
"ernestorueda@gmail.com"
] |
ernestorueda@gmail.com
|
97a6a7782e055cddefadc02fbcb1c9f43a065e00
|
7f52724110a12d7721f3bbb7a0fce0c4b1c3dd97
|
/gameserver/fb_open_graph_to_sql.py
|
fe685d6683b82038f09530b599d2202d570d79c9
|
[
"MIT"
] |
permissive
|
cssp1/assignment1
|
896cb69e8ff43e26658c65ea16b079f87eebef9a
|
0839fc589cb52e7384c446593db79e0c2ea737d5
|
refs/heads/master
| 2023-03-10T08:03:56.954064
| 2022-07-20T04:02:15
| 2022-07-20T04:02:15
| 29,496,198
| 0
| 2
| null | 2023-02-17T17:56:53
| 2015-01-19T20:52:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 8,597
|
py
|
#!/usr/bin/env python
# Copyright (c) 2015 Battlehouse Inc. All rights reserved.
# Use of this source code is governed by an MIT-style license that can be
# found in the LICENSE file.
# dump "log_fb_open_graph" from MongoDB to a MySQL database for analytics
import sys, time, getopt
import SpinConfig
import SpinNoSQL
import SpinSQLUtil
import SpinSingletonProcess
import SpinMySQLdb
time_now = int(time.time())
def fb_open_graph_schema(sql_util): return {
'fields': [('time', 'INT8 NOT NULL'),
('user_id', 'INT4 NOT NULL'),
('event_name', 'VARCHAR(128) NOT NULL')] + \
sql_util.summary_in_dimensions() + \
[
('action', 'VARCHAR(128)'),
('object_type', 'VARCHAR(128)'),
('object_spec', 'VARCHAR(128)'),
('object_level', 'INT4'),
('posting_user_id', 'INT4')
],
'indices': {'by_time': {'keys': [('time','ASC')]}}
}
def fb_open_graph_summary_schema(sql_util): return {
'fields': [('day', 'INT8 NOT NULL')] + \
sql_util.summary_out_dimensions() + \
[('event_name', 'VARCHAR(128) NOT NULL'),
('action', 'VARCHAR(128)'),
('object_type', 'VARCHAR(128)'),
('object_spec', 'VARCHAR(128)'),
('object_level', 'INT4'),
('count', 'INT4'),
('unique_players', 'INT4')],
'indices': {'by_day': {'keys': [('day','ASC')]}}
}
def iterate_from_mongodb(game_id, table_name, start_time, end_time):
nosql_client = SpinNoSQL.NoSQLClient(SpinConfig.get_mongodb_config(game_id))
qs = {'time': {'$gt': start_time, '$lt': end_time}}
for row in nosql_client.log_buffer_table(table_name).find(qs):
row['_id'] = nosql_client.decode_object_id(row['_id'])
yield row
if __name__ == '__main__':
game_id = SpinConfig.game()
commit_interval = 1000
verbose = True
do_prune = False
do_optimize = False
opts, args = getopt.gnu_getopt(sys.argv[1:], 'g:c:q', ['prune','optimize'])
for key, val in opts:
if key == '-g': game_id = val
elif key == '-c': commit_interval = int(val)
elif key == '-q': verbose = False
elif key == '--prune': do_prune = True
elif key == '--optimize': do_optimize = True
sql_util = SpinSQLUtil.MySQLUtil()
if not verbose: sql_util.disable_warnings()
cfg = SpinConfig.get_mysql_config(game_id+'_upcache')
con = SpinMySQLdb.connect(*cfg['connect_args'], **cfg['connect_kwargs'])
with SpinSingletonProcess.SingletonProcess('fb_open_graph_to_sql-%s' % game_id):
fb_open_graph_table = cfg['table_prefix']+game_id+'_fb_open_graph'
fb_open_graph_summary_table = cfg['table_prefix']+game_id+'_fb_open_graph_daily_summary'
cur = con.cursor(SpinMySQLdb.cursors.DictCursor)
sql_util.ensure_table(cur, fb_open_graph_table, fb_open_graph_schema(sql_util))
sql_util.ensure_table(cur, fb_open_graph_summary_table, fb_open_graph_summary_schema(sql_util))
con.commit()
# find most recent already-converted action
start_time = -1
end_time = time_now - 600 # skip entries too close to "now" to ensure all events for a given second have all arrived
cur.execute("SELECT time FROM "+sql_util.sym(fb_open_graph_table)+" ORDER BY time DESC LIMIT 1")
rows = cur.fetchall()
if rows:
start_time = max(start_time, rows[0]['time'])
con.commit()
if verbose: print 'start_time', start_time, 'end_time', end_time
batch = 0
total = 0
affected_days = set()
for source_table in ('log_fb_open_graph',):
for row in iterate_from_mongodb(game_id, source_table, start_time, end_time):
if ('sum' not in row) or ('user_id' not in row): continue # ignore bad legacy data
if row['sum'].get('developer',False): continue # skip events by developers
keyvals = [('time',row['time']),
('user_id',row['user_id']),
('event_name',row['event_name'])] + \
sql_util.parse_brief_summary(row['sum'])
for FIELD in ('action', 'object_type', 'object_spec', 'object_level', 'posting_user_id'):
if FIELD in row:
keyvals.append((FIELD, row[FIELD]))
sql_util.do_insert(cur, fb_open_graph_table, keyvals)
batch += 1
total += 1
affected_days.add(86400*(row['time']//86400))
if commit_interval > 0 and batch >= commit_interval:
batch = 0
con.commit()
if verbose: print total, 'inserted'
con.commit()
if verbose: print 'total', total, 'inserted', 'affecting', len(affected_days), 'day(s)'
# update summary
cur.execute("SELECT MIN(time) AS min_time, MAX(time) AS max_time FROM "+sql_util.sym(fb_open_graph_table))
rows = cur.fetchall()
if rows and rows[0] and rows[0]['min_time'] and rows[0]['max_time']:
event_range = (rows[0]['min_time'], rows[0]['max_time'])
else:
event_range = None
dt = 86400
# check how much summary data we already have
cur.execute("SELECT MIN(day) AS begin, MAX(day) AS end FROM "+sql_util.sym(fb_open_graph_summary_table))
rows = cur.fetchall()
if rows and rows[0] and rows[0]['begin'] and rows[0]['end']:
# we already have summary data - update it incrementally
if event_range: # fill in any missing trailing summary data
source_days = sorted(affected_days.union(set(xrange(dt*(rows[0]['end']//dt + 1), dt*(event_range[1]//dt + 1), dt))))
else:
source_days = sorted(list(affected_days))
else:
# recreate entire summary
if event_range:
source_days = range(dt*(event_range[0]//dt), dt*(event_range[1]//dt + 1), dt)
else:
source_days = None
if source_days:
for day_start in source_days:
if verbose: print 'updating', fb_open_graph_summary_table, 'at', time.strftime('%Y%m%d', time.gmtime(day_start))
# delete entries for the date range we're about to update
cur.execute("DELETE FROM "+sql_util.sym(fb_open_graph_summary_table)+" WHERE day >= %s AND day < %s+86400", [day_start,]*2)
cur.execute("INSERT INTO "+sql_util.sym(fb_open_graph_summary_table) + \
"SELECT 86400*FLOOR(time/86400.0) AS day ," + \
" frame_platform AS frame_platform, " + \
" country_tier AS country_tier ," + \
" townhall_level AS townhall_level, " + \
" "+sql_util.encode_spend_bracket("prev_receipts")+" AS spend_bracket, " + \
" event_name AS event_name, " + \
" action AS action, " + \
" object_type AS object_type, " + \
" object_spec AS object_spec, " + \
" object_level AS object_level, " + \
" COUNT(1) AS count, " + \
" COUNT(DISTINCT(user_id)) AS unique_players " + \
"FROM " + sql_util.sym(fb_open_graph_table) + " req " + \
"WHERE time >= %s AND time < %s+86400 " + \
"GROUP BY day, frame_platform, country_tier, townhall_level, spend_bracket, event_name, action, object_type, object_spec, object_level ORDER BY NULL", [day_start,]*2)
con.commit() # one commit per day
else:
if verbose: print 'no change to', fb_open_graph_summary_table
if do_prune:
# drop old data
KEEP_DAYS = 90
old_limit = time_now - KEEP_DAYS * 86400
if verbose: print 'pruning', fb_open_graph_table
cur = con.cursor()
cur.execute("DELETE FROM "+sql_util.sym(fb_open_graph_table)+" WHERE time < %s", [old_limit])
if do_optimize:
if verbose: print 'optimizing', fb_open_graph_table
cur.execute("OPTIMIZE TABLE "+sql_util.sym(fb_open_graph_table))
con.commit()
|
[
"dmaas@spinpunch.com"
] |
dmaas@spinpunch.com
|
ce6f4a8467d898e56fdbda95b48575d8483b051d
|
a6b4b072f47aa6b8fa86eee636b1bf7d6666f010
|
/uplink/types.py
|
4c32f5a22c1a29270e09735cc3d5d63af6044fe7
|
[
"MIT"
] |
permissive
|
plucena24/uplink
|
ea796f2198f4c907e9d2268159e100174e63f285
|
b80c790d45da4b3bdd8ec3909ba99025c51a0b7c
|
refs/heads/master
| 2023-08-21T14:22:13.528707
| 2017-11-08T19:18:34
| 2017-11-08T19:18:34
| 110,165,549
| 0
| 0
|
MIT
| 2023-08-08T19:45:02
| 2017-11-09T20:58:10
|
Python
|
UTF-8
|
Python
| false
| false
| 12,331
|
py
|
"""
This module implements the built-in argument annotations and their
handling classes.
"""
# Standard library imports
import collections
import inspect
# Local imports
from uplink import converter, exceptions, interfaces, utils
__all__ = [
"Path",
"Query",
"QueryMap",
"Header",
"HeaderMap",
"Field",
"FieldMap",
"Part",
"PartMap",
"Body",
"Url"
]
class ExhaustedArguments(exceptions.AnnotationError):
message = (
"Failed to add `%s` to method `%s`, as all arguments have "
"been annotated."
)
def __init__(self, annotation, func):
self.message = self.message % (annotation, func.__name__)
class ArgumentNotFound(exceptions.AnnotationError):
message = "`%s` does not match any argument name of method `%s`."
def __init__(self, name, func):
self.message = self.message % (name, func.__name__)
class MissingArgumentAnnotations(exceptions.InvalidRequestDefinition):
message = "Missing annotation for argument(s): '%s'."
implicit_message = " (Implicit path variables: '%s')"
def __init__(self, missing, path_variables):
missing, path_variables = list(missing), list(path_variables)
self.message = self.message % "', '".join(missing)
if path_variables:
self.message += self.implicit_message % "', '".join(path_variables)
class ArgumentAnnotationHandlerBuilder(
interfaces.AnnotationHandlerBuilder
):
def __init__(self, func, arguments, func_is_method=True):
self._arguments = arguments[func_is_method:]
self._argument_types = collections.OrderedDict.fromkeys(self._arguments)
self._defined = 0
self._func = func
@property
def missing_arguments(self):
return (a for a in self._arguments if self._argument_types[a] is None)
@property
def remaining_args_count(self):
return len(self._arguments) - self._defined
def set_annotations(self, annotations=None, **more_annotations):
if annotations is not None:
if not isinstance(annotations, collections.Mapping):
missing = tuple(
a for a in self.missing_arguments
if a not in more_annotations
)
annotations = dict(zip(missing, annotations))
more_annotations.update(annotations)
for name in more_annotations:
self.add_annotation(more_annotations[name], name)
def add_annotation(self, annotation, name=None, *args, **kwargs):
try:
name = next(self.missing_arguments) if name is None else name
except StopIteration:
raise ExhaustedArguments(annotation, self._func)
if name not in self._argument_types:
raise ArgumentNotFound(name, self._func)
if inspect.isclass(annotation):
annotation = annotation()
if isinstance(annotation, NamedArgument) and annotation.name is None:
annotation.name = name
super(ArgumentAnnotationHandlerBuilder, self).add_annotation(annotation)
self._defined += self._argument_types[name] is None
self._argument_types[name] = annotation
return annotation
def is_done(self):
return self.remaining_args_count == 0
def _auto_fill_remaining_arguments(self):
uri_vars = set(self.request_definition_builder.uri.remaining_variables)
missing = list(self.missing_arguments)
still_missing = set(missing) - uri_vars
# Preserve order of function parameters.
matching = [p for p in missing if p in uri_vars]
if still_missing:
raise MissingArgumentAnnotations(still_missing, matching)
self.set_annotations(dict.fromkeys(matching, Path))
def build(self):
if not self.is_done():
self._auto_fill_remaining_arguments()
return ArgumentAnnotationHandler(
self._func,
self._argument_types,
)
class ArgumentAnnotationHandler(interfaces.AnnotationHandler):
def __init__(self, func, arguments):
self._func = func
self._arguments = arguments
@property
def annotations(self):
return iter(self._arguments.values())
def get_relevant_arguments(self, call_args):
return filter(call_args.__contains__, self._arguments)
def handle_call(self, request_builder, func_args, func_kwargs):
call_args = utils.get_call_args(self._func, *func_args, **func_kwargs)
for name in self.get_relevant_arguments(call_args):
self.handle_argument(
request_builder,
self._arguments[name],
call_args[name]
)
@staticmethod
def handle_argument(request_builder, argument, value):
argument_type, converter_key = argument.type, argument.converter_type
converter_ = request_builder.get_converter(converter_key, argument_type)
value = converter_.convert(value)
# TODO: Catch Annotation errors and chain them here + provide context.
argument.modify_request(request_builder, value)
class ArgumentAnnotation(interfaces.Annotation):
can_be_static = True
def __call__(self, request_definition_builder):
request_definition_builder.argument_handler_builder.add_annotation(self)
return request_definition_builder
def modify_request_definition(self, request_definition_builder):
pass
def modify_request(self, request_builder, value):
raise NotImplementedError
@property
def type(self):
return None
@property
def converter_type(self):
raise NotImplementedError
class TypedArgument(ArgumentAnnotation):
def __init__(self, type=None):
self._type = type
@property
def type(self):
return self._type
@property
def converter_type(self):
raise NotImplementedError
def modify_request(self, request_builder, value):
raise NotImplementedError
class NamedArgument(TypedArgument):
can_be_static = True
def __init__(self, name=None, type=None):
self._arg_name = name
super(NamedArgument, self).__init__(type)
@property
def name(self):
return self._arg_name
@name.setter
def name(self, name):
if self._arg_name is None:
self._arg_name = name
else:
raise AttributeError("Name is already set.")
@property
def converter_type(self):
raise NotImplementedError
def modify_request(self, request_builder, value):
raise NotImplementedError
class Path(NamedArgument):
"""
Substitution of a path variable in a `URI template
<https://tools.ietf.org/html/rfc6570>`__.
URI template parameters are enclosed in braces (e.g.,
:code:`{name}`). To map an argument to a declared URI parameter, use
the :py:class:`Path` annotation:
.. code-block:: python
class TodoService(object):
@get("todos{/id}")
def get_todo(self, todo_id: Path("id")): pass
Then, invoking :code:`get_todo` with a consumer instance:
.. code-block:: python
todo_service.get_todo(100)
creates an HTTP request with a URL ending in :code:`todos/100`.
Note:
When building the consumer instance, :py:func:`uplink.build` will try
match unannotated function arguments with URL path parameters. See
:ref:`implicit_path_annotations` for details.
For example, we could rewrite the method from the previous
example as:
.. code-block:: python
@get("todos{/id}")
def get_todo(self, id): pass
"""
@property
def converter_type(self):
return converter.CONVERT_TO_STRING
def modify_request_definition(self, request_definition_builder):
request_definition_builder.uri.add_variable(self.name)
def modify_request(self, request_builder, value):
request_builder.uri.set_variable({self.name: value})
class Query(NamedArgument):
@staticmethod
def convert_to_string(value):
# TODO: Move this responsibility to the `converter`
# Convert to string or list of strings.
if isinstance(value, (list, tuple)):
return list(map(str, value))
else:
return str(value)
@property
def converter_type(self):
return converter.CONVERT_TO_REQUEST_BODY
def modify_request(self, request_builder, value):
value = self.convert_to_string(value)
request_builder.info["params"][self.name] = value
class QueryMap(TypedArgument):
@property
def converter_type(self):
return converter.Map(converter.CONVERT_TO_REQUEST_BODY)
@classmethod
def modify_request(cls, request_builder, value):
value = dict((k, Query.convert_to_string(value[k])) for k in value)
request_builder.info["params"].update(value)
class Header(NamedArgument):
@property
def converter_type(self):
return converter.CONVERT_TO_STRING
def modify_request(self, request_builder, value):
request_builder.info["headers"][self.name] = value
class HeaderMap(TypedArgument):
@property
def converter_type(self):
return converter.Map(converter.CONVERT_TO_STRING)
@classmethod
def modify_request(cls, request_builder, value):
request_builder.info["headers"].update(value)
class Field(NamedArgument):
class FieldAssignmentFailed(exceptions.AnnotationError):
message = (
"Failed to define field '%s' to request body. Another argument "
"annotation might have overwritten the body entirely."
)
def __init__(self, field):
self.message = self.message % field.name
@property
def converter_type(self):
return converter.CONVERT_TO_STRING
def modify_request(self, request_builder, value):
try:
request_builder.info["data"][self.name] = value
except TypeError:
# TODO: re-raise with TypeError
# `data` does not support item assignment
raise self.FieldAssignmentFailed(self)
class FieldMap(TypedArgument):
class FieldMapUpdateFailed(exceptions.AnnotationError):
message = (
"Failed to update request body with field map. Another argument "
"annotation might have overwritten the body entirely."
)
@property
def converter_type(self):
return converter.Map(converter.CONVERT_TO_STRING)
def modify_request(self, request_builder, value):
try:
request_builder.info["data"].update(value)
except AttributeError:
# TODO: re-raise with AttributeError
raise self.FieldMapUpdateFailed()
class Part(NamedArgument):
@property
def converter_type(self):
return converter.CONVERT_TO_REQUEST_BODY
def modify_request(self, request_builder, value):
request_builder.info["files"][self.name] = value
class PartMap(TypedArgument):
@property
def converter_type(self):
return converter.Map(converter.CONVERT_TO_REQUEST_BODY)
def modify_request(self, request_builder, value):
request_builder.info["files"].update(value)
class Body(TypedArgument):
@property
def converter_type(self):
return converter.CONVERT_TO_REQUEST_BODY
def modify_request(self, request_builder, value):
request_builder.info["data"] = value
class Url(ArgumentAnnotation):
class DynamicUrlAssignmentFailed(exceptions.AnnotationError):
message = "Failed to set dynamic url annotation on `%s`. "
def __init__(self, request_definition_builder):
self.message = self.message % request_definition_builder.__name__
@property
def converter_type(self):
return converter.CONVERT_TO_STRING
def modify_request_definition(self, request_definition_builder):
try:
request_definition_builder.uri.is_dynamic = True
except ValueError:
# TODO: re-raise with ValueError
raise self.DynamicUrlAssignmentFailed(request_definition_builder)
@classmethod
def modify_request(cls, request_builder, value):
request_builder.uri = value
|
[
"raj.pritvi.kumar@gmail.com"
] |
raj.pritvi.kumar@gmail.com
|
e32cfd7255003e19bdc7b19d7ab4593542306db2
|
1dad81adfb52bc22f554243142d78e6ffa42c570
|
/app/migrations/0010_auto_20210918_0900.py
|
8a0a71a3d0375ef0d8d29f3ede3e31999c663d7b
|
[] |
no_license
|
Mowzak/django-shop
|
9f54fab1bd227c997d01eb0854e36084f8c351f4
|
0910bc691edb31177f3a7aed297bbd5881145ba3
|
refs/heads/master
| 2023-08-01T23:57:43.021327
| 2021-10-03T07:14:28
| 2021-10-03T07:14:28
| 388,475,908
| 0
| 0
| null | 2021-07-25T10:49:40
| 2021-07-22T13:41:01
|
CSS
|
UTF-8
|
Python
| false
| false
| 706
|
py
|
# Generated by Django 3.0.4 on 2021-09-18 04:30
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0009_auto_20210918_0854'),
]
operations = [
migrations.AddField(
model_name='checkout',
name='price',
field=models.CharField(default='', max_length=100),
),
migrations.AlterField(
model_name='checkout',
name='phone_number',
field=models.CharField(default='', max_length=15, validators=[django.core.validators.RegexValidator('[0-9]{11}', 'شماره ی تلفن صحیح نیست')]),
),
]
|
[
"mahdiyadi044@gmail.com"
] |
mahdiyadi044@gmail.com
|
2158adf2dc099b6e5433af8821dd81f1164c63f5
|
06debb37acfdc5038514a9b86c086186d04b04b7
|
/APIR80/migrations/0014_auto_20190111_2214.py
|
0436b0b769e016f44f05f2e3ba4f4d3094f9971d
|
[] |
no_license
|
cadgo/django-chkp
|
f772062a8d1ca589862f95c3116d318984dd3593
|
516328a9919f34b14f93dc63e023fd3428f8d344
|
refs/heads/master
| 2023-08-31T14:04:01.039238
| 2019-09-29T23:07:36
| 2019-09-29T23:07:36
| 149,029,791
| 2
| 3
| null | 2019-01-30T18:59:34
| 2018-09-16T19:43:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,313
|
py
|
# Generated by Django 2.1.5 on 2019-01-11 22:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('APIR80', '0013_mgmtserverobjects_mgmtserverfilepathnetworksobjects'),
]
operations = [
migrations.AlterField(
model_name='mgmtserverobjects',
name='MGMTServerFilePathNetObjects',
field=models.CharField(default='/home/carlos/gitdjango/django-chkp/APIR80/tmp/chkpobjects.txt', max_length=250),
),
migrations.AlterField(
model_name='mgmtserverobjects',
name='MGMTServerFilePathNetworksObjects',
field=models.CharField(default='/home/carlos/gitdjango/django-chkp/APIR80/tmp/chkpobjectsnetworks.txt', max_length=250),
),
migrations.AlterField(
model_name='mgmtserverobjects',
name='MGMTServerFilePathTCPPorts',
field=models.CharField(default='/home/carlos/gitdjango/django-chkp/APIR80/tmp/chkpports.txt', max_length=250),
),
migrations.AlterField(
model_name='mgmtserverobjects',
name='MGMTServerFilePathUDPPorts',
field=models.CharField(default='/home/carlos/gitdjango/django-chkp/APIR80/tmp/chkpudpports.txt', max_length=250),
),
]
|
[
"carlos@UbuntuAnsible.4ghqaf2pfwlupave34ssco3ame.bx.internal.cloudapp.net"
] |
carlos@UbuntuAnsible.4ghqaf2pfwlupave34ssco3ame.bx.internal.cloudapp.net
|
2bac0ce50e8e7a4861d21295d30dddce01e3dada
|
d059e76c71d7b639308fc67c70d2e51ff70705e5
|
/tests/module_test.py
|
31c44357ff87ec92d1e5eaf000aa78c24c655e1a
|
[
"BSD-2-Clause"
] |
permissive
|
alexxroche/redis-dump-load
|
e12cdd10aabd5a55589c19a4a769d8dc3d278073
|
696015ae35097bf7d4ed9aff557f432dd1bb7f88
|
refs/heads/master
| 2021-01-17T01:03:17.601107
| 2015-04-26T02:15:20
| 2015-04-26T02:15:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,356
|
py
|
import redisdl
import unittest
import json
import os.path
from . import util
try:
from io import StringIO, BytesIO
except ImportError:
from StringIO import StringIO
class ModuleTest(unittest.TestCase):
def setUp(self):
import redis
self.r = redis.Redis()
for key in self.r.keys('*'):
self.r.delete(key)
def test_roundtrip(self):
path = os.path.join(os.path.dirname(__file__), 'fixtures', 'dump.json')
with open(path) as f:
dump = f.read()
redisdl.loads(dump)
redump = redisdl.dumps()
expected = json.loads(dump)
actual = json.loads(redump)
self.assertEqual(expected, actual)
def test_dump_string_value(self):
self.r.set('key', 'value')
dump = redisdl.dumps()
actual = json.loads(dump)
expected = {'key': {'type': 'string', 'value': 'value'}}
self.assertEqual(expected, actual)
def test_dump_unicode_value(self):
self.r.set('key', util.u("\u041c\u043e\u0441\u043a\u0432\u0430"))
dump = redisdl.dumps()
actual = json.loads(dump)
expected = {'key': {'type': 'string', 'value': util.u("\u041c\u043e\u0441\u043a\u0432\u0430")}}
self.assertEqual(expected, actual)
def test_load_string_value(self):
dump = '{"key":{"type":"string","value":"hello, world"}}'
redisdl.loads(dump)
value = self.r.get('key')
self.assertEqual('hello, world', value.decode('ascii'))
def test_load_unicode_value(self):
dump = '{"key":{"type":"string","value":"\\u041c\\u043e\\u0441\\u043a\\u0432\\u0430"}}'
redisdl.loads(dump)
value = self.r.get('key')
self.assertEqual(util.b('\xd0\x9c\xd0\xbe\xd1\x81\xd0\xba\xd0\xb2\xd0\xb0'), value)
def test_load_stringio_python_backend_global(self):
self.assertTrue(redisdl.have_streaming_load)
redisdl.streaming_backend = 'python'
dump = '{"key":{"type":"string","value":"hello, world"}}'
io = StringIO(dump)
redisdl.load(io)
value = self.r.get('key')
self.assertEqual('hello, world', value.decode('ascii'))
def test_load_stringio_python_backend_local(self):
self.assertTrue(redisdl.have_streaming_load)
dump = '{"key":{"type":"string","value":"hello, world"}}'
io = StringIO(dump)
redisdl.load(io, streaming_backend='python')
value = self.r.get('key')
self.assertEqual('hello, world', value.decode('ascii'))
def test_load_stringio_no_backend(self):
self.assertTrue(redisdl.have_streaming_load)
redisdl.streaming_backend = None
dump = '{"key":{"type":"string","value":"hello, world"}}'
io = StringIO(dump)
redisdl.load(io)
value = self.r.get('key')
self.assertEqual('hello, world', value.decode('ascii'))
def test_load_stringio_lump(self):
dump = '{"key":{"type":"string","value":"hello, world"}}'
io = StringIO(dump)
redisdl.load_lump(io)
value = self.r.get('key')
self.assertEqual('hello, world', value.decode('ascii'))
if redisdl.py3:
def test_load_bytesio(self):
self.assertTrue(redisdl.have_streaming_load)
dump = '{"key":{"type":"string","value":"hello, world"}}'
io = BytesIO(dump.encode('utf-8'))
redisdl.load(io)
value = self.r.get('key')
self.assertEqual('hello, world', value.decode('ascii'))
def test_load_bytesio_lump(self):
dump = '{"key":{"type":"string","value":"hello, world"}}'
io = BytesIO(dump.encode('utf-8'))
redisdl.load_lump(io)
value = self.r.get('key')
self.assertEqual('hello, world', value.decode('ascii'))
# yajl2 backend does not appear to be capable of loading stringios
def test_load_bytesio_yajl2_backend(self):
self.assertTrue(redisdl.have_streaming_load)
redisdl.streaming_backend = 'yajl2'
dump = '{"key":{"type":"string","value":"hello, world"}}'
io = BytesIO(dump.encode('utf-8'))
redisdl.load(io)
value = self.r.get('key')
self.assertEqual('hello, world', value.decode('ascii'))
|
[
"oleg@bsdpower.com"
] |
oleg@bsdpower.com
|
829c62427a21d1b27b4d0a5d13c03a1f4eb76862
|
e9ac78d1c5f83dea452e330b0efec087aeb33743
|
/assignment-2/12.py
|
d5d96089340890495858a17982c5f0e464a3d1fe
|
[] |
no_license
|
bytesagar/iw-assignments
|
9af4e8e40b19b32f17377df7c97fc43db1065e99
|
c5cb8540affe8452a0b00e6382d74fad9f50039a
|
refs/heads/main
| 2023-03-05T05:38:59.787071
| 2021-02-14T03:43:00
| 2021-02-14T03:43:00
| 338,722,714
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 97
|
py
|
string = input("enter a string")
print("Lower" + string.lower())
print("Upper" + string.upper())
|
[
"sagarkarki076@gmail.com"
] |
sagarkarki076@gmail.com
|
3ffcccb9c24c45d88a2bb9f2dbcf065d7916a192
|
60965c77f553371e0055be8c9a0f28e2babf19c7
|
/charts.py
|
bbb76f952ec86e9489f289339b38bfed08df8b56
|
[] |
no_license
|
Sravya2007/Project-135-Interpreting-Results
|
18509e70f2b0c5dffa0db63b2cff65c42445461d
|
f7f739b2d2e4edec87e13ee81221e54c0b85b6fa
|
refs/heads/master
| 2023-05-18T11:28:06.940337
| 2021-06-09T11:50:25
| 2021-06-09T11:50:25
| 375,332,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 966
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
star_data = pd.read_csv("habitable_star_data.csv")
star_name = star_data["Star Name"]
distance = star_data["Distance (ly)"]
mass = star_data["Mass (M☉)"]
radius = star_data["Radius (R☉)"]
gravity = star_data["Surface Gravity (m/s²)"]
plt.figure()
plt.bar(star_name, mass)
plt.xlabel("Star Names")
plt.ylabel("Mass of Stars")
plt.title("Star Name vs Mass")
plt.xticks(rotation = 90)
plt.figure()
plt.bar(star_name, radius)
plt.xlabel("Star Names")
plt.ylabel("Radius of Stars")
plt.title("Star Name vs Radius")
plt.xticks(rotation = 90)
plt.figure()
plt.bar(star_name, distance)
plt.xlabel("Star Names")
plt.ylabel("Distance of Stars")
plt.title("Star Name vs Distance")
plt.xticks(rotation = 90)
plt.figure()
plt.bar(star_name, gravity)
plt.xlabel("Star Names")
plt.ylabel("Gravity of Stars")
plt.title("Star Name vs Gravity")
plt.xticks(rotation = 90)
plt.show()
|
[
"noreply@github.com"
] |
Sravya2007.noreply@github.com
|
8478ffa64a2427dc93d0e03c6c70b86448cab486
|
7e93826a8305f8b7977bf511fc2eaafbc782809b
|
/dtshare/option/option_commodity.py
|
feb9dcdb9cb99c6d034994008dfd59ce1d32b253
|
[] |
no_license
|
SuperPcBull/dtshare
|
71defe13ad12c56f6ea1f548e0ee42d166e4aff8
|
996b249078f3295c019e592d5ae5c135033d1c6d
|
refs/heads/master
| 2021-03-15T22:12:25.937314
| 2020-03-11T01:58:30
| 2020-03-11T01:58:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,862
|
py
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: Tong Du
date: 2019/9/30 13:58
Email: dtshare@126.com
desc: 获取商品期权数据
说明:
(1) 价格:自2019年12月02日起,纤维板报价单位由元/张改为元/立方米
(2) 价格:元/吨,鸡蛋为元/500千克,纤维板为元/立方米,胶合板为元/张
(3) 成交量、持仓量:手(按双边计算)
(4) 成交额:万元(按双边计算)
(5) 涨跌=收盘价-前结算价
(6) 涨跌1=今结算价-前结算价
(7) 合约系列:具有相同月份标的期货合约的所有期权合约的统称
(8) 隐含波动率:根据期权市场价格,利用期权定价模型计算的标的期货合约价格波动率
"""
import datetime
import warnings
from io import StringIO
import requests
import pandas as pd
from dtshare.option.cons import (get_calendar,
convert_date,
DCE_DAILY_OPTION_URL,
SHFE_OPTION_URL,
CZCE_DAILY_OPTION_URL_3,
SHFE_HEADERS)
def get_dce_option_daily(trade_date="20191017", symbol="玉米期权"):
"""
获取大连商品交易所-期权-日频行情数据
:param trade_date: str format:"20191017"
:param symbol: str "玉米期权" or "豆粕期权"
:return: pandas.DataFrame
part-1:
商品名称 合约名称 开盘价 最高价 最低价 收盘价 前结算价 结算价 涨跌 涨跌1 \
0 玉米 c2001-C-1680 168.5 168.5 168.5 168.5 168.0 167.5 0.5 -0.5
1 玉米 c2001-C-1700 0 0.0 0.0 148.0 148.0 148.0 0.0 0.0
2 玉米 c2001-C-1720 0 0.0 0.0 129.0 128.0 129.0 1.0 1.0
3 玉米 c2001-C-1740 115 115.0 115.0 115.0 108.0 111.0 7.0 3.0
4 玉米 c2001-C-1760 89 95.5 89.0 95.5 89.0 93.5 6.5 4.5
.. ... ... ... ... ... ... ... ... ... ...
239 玉米 c2009-P-2040 0 0.0 0.0 91.0 88.5 91.0 2.5 2.5
240 玉米 c2009-P-2060 0 0.0 0.0 106.0 104.0 106.0 2.0 2.0
241 玉米 c2009-P-2080 0 0.0 0.0 121.5 120.5 121.5 1.0 1.0
242 玉米 c2009-P-2100 0 0.0 0.0 138.5 137.5 138.5 1.0 1.0
243 玉米 c2009-P-2120 0 0.0 0.0 155.5 155.5 155.5 0.0 0.0
Delta 成交量 持仓量 持仓量变化 成交额 行权量
0 0.98 2 236 0 0.34 0.0
1 0.96 0 236 0 0 0.0
2 0.94 0 210 0 0 0.0
3 0.90 20 1,040 0 2.3 0.0
4 0.85 12 680 0 1.11 0.0
.. ... .. ... ... ... ...
239 -0.70 0 30 0 0 0.0
240 -0.75 0 50 0 0 0.0
241 -0.80 0 20 0 0 0.0
242 -0.84 0 10 0 0 0.0
243 -0.88 0 0 0 0 0.0
part-2:
0 合约系列 隐含波动率(%)
1 c2001 12.95
2 c2003 8.74
3 c2005 8.75
4 c2007 7.7
5 c2009 6.85
"""
calendar = get_calendar()
day = convert_date(trade_date) if trade_date is not None else datetime.date.today()
if day.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % day.strftime('%Y%m%d'))
return None
url = DCE_DAILY_OPTION_URL
payload = {
"dayQuotes.variety": "all",
"dayQuotes.trade_type": "1",
"year": str(day.year),
"month": str(day.month - 1),
"day": str(day.day),
"exportFlag": "txt"
}
res = requests.post(url, data=payload)
f = StringIO(res.text)
table_df = pd.read_table(f, encoding="gbk", skiprows=2, header=None, sep=r"\t\t", engine="python")
another_df = table_df.iloc[table_df[table_df.iloc[:, 0].str.contains("合约")].iloc[-1].name:, [0, 1]]
another_df.reset_index(inplace=True, drop=True)
another_df.iloc[0] = another_df.iat[0, 0].split("\t")
another_df.columns = another_df.iloc[0]
another_df = another_df.iloc[1:, :]
table_df = table_df.join(table_df.iloc[:, 1].str.split(r"\t", expand=True), lsuffix="l")
table_df.columns = ["商品名称", "_", "最高价", "最低价", "收盘价", "前结算价", "结算价", "涨跌", "涨跌1", "Delta", "成交量", "持仓量", "持仓量变化",
"成交额", "行权量", "合约名称", "开盘价"]
table_df = table_df[
["商品名称", "合约名称", "开盘价", "最高价", "最低价", "收盘价", "前结算价", "结算价", "涨跌", "涨跌1", "Delta", "成交量", "持仓量", "持仓量变化", "成交额",
"行权量"]]
table_df.dropna(axis=1, how="all", inplace=True)
product_one_df = table_df.iloc[:table_df[table_df.iloc[:, 0].str.contains("小计")].iloc[0].name, :]
product_two_df = table_df.iloc[table_df[table_df.iloc[:, 0].str.contains("小计")].iloc[0].name + 1:
table_df[table_df.iloc[:, 0].str.contains("小计")].iloc[1].name, :]
product_three_df = table_df.iloc[table_df[table_df.iloc[:, 0].str.contains("小计")].iloc[1].name + 1:
table_df[table_df.iloc[:, 0].str.contains("小计")].iloc[2].name, :]
if symbol == "玉米期权":
return product_one_df, another_df[another_df.iloc[:, 0].str.contains("c")]
elif symbol == "铁矿石期权":
return product_two_df, another_df[another_df.iloc[:, 0].str.contains("i")]
else:
return product_three_df, another_df[another_df.iloc[:, 0].str.contains("m")]
def get_czce_option_daily(trade_date="20191017", symbol="白糖期权"):
"""
郑州商品交易所-期权-日频行情数据
说明:
(1) 价格:元/吨
(2) 成交量、空盘量:手
(3) 成交额:万元
(4) 涨跌一:今收盘-昨结算
(5) 涨跌二:今结算-昨结算
(6) 隐含波动率:将当日期权合约的结算价代入期权定价模型,反推出来的波动率数值
:param trade_date: str "20191017"
:param symbol: str "白糖期权", "棉花期权", "甲醇期权", "PTA期权", "菜籽粕期权"
:return: pandas.DataFrame
郑商所每日期权交易数据
品种代码 昨结算 今开盘 最高价 最低价 今收盘 \
0 CF001C10800 1,579.00 0.00 0.00 0.00 0.00
1 CF001C11000 1,392.00 0.00 0.00 0.00 0.00
2 CF001C11200 1,211.00 0.00 0.00 0.00 0.00
3 CF001C11400 1,038.00 1,396.00 1,396.00 1,396.00 1,396.00
4 CF001C11600 874.00 0.00 0.00 0.00 0.00
.. ... ... ... ... ... ...
398 SR009P5900 576.00 0.00 0.00 0.00 0.00
399 SR009P6000 653.00 0.00 0.00 0.00 0.00
400 小计
401 SR合计
402 总计
今结算 涨跌1 涨跌2 成交量(手) 空盘量 增减量 \
0 1,866.00 287.00 287.00 0 0 0
1 1,672.00 280.00 280.00 0 0 0
2 1,481.00 270.00 270.00 0 4 0
3 1,295.00 358.00 257.00 2 68 0
4 1,114.00 240.00 240.00 0 224 0
.. ... ... ... ... ... ...
398 580.00 4.00 4.00 0 0 0
399 658.00 5.00 5.00 0 0 0
400 656 860 400
401 32,098 276,900 2252
402 110,664 474,154 14770
成交额(万元) DELTA 隐含波动率 行权量
0 0.00 0.9765 22.29 0
1 0.00 0.9621 21.84 0
2 0.00 0.9423 21.38 0
3 1.40 0.9155 20.91 0
4 0.00 0.8800 20.45 0
.. ... ... ... ...
398 0.00 -0.6639 16.24 0
399 0.00 -0.7007 16.58 0
400 97.28 0
401 2138.41 0
402 8769.52 2
"""
calendar = get_calendar()
day = convert_date(trade_date) if trade_date is not None else datetime.date.today()
if day.strftime('%Y%m%d') not in calendar:
warnings.warn('{}非交易日'.format(day.strftime('%Y%m%d')))
return None
if day > datetime.date(2010, 8, 24):
url = CZCE_DAILY_OPTION_URL_3.format(day.strftime('%Y'), day.strftime('%Y%m%d'))
try:
r = requests.get(url)
f = StringIO(r.text)
table_df = pd.read_table(f, encoding="utf-8", skiprows=1, sep="|")
if symbol == "白糖期权":
temp_df = table_df[table_df.iloc[:, 0].str.contains("SR")]
temp_df.reset_index(inplace=True, drop=True)
return temp_df.iloc[:-1, :]
elif symbol == "PTA期权":
temp_df = table_df[table_df.iloc[:, 0].str.contains("TA")]
temp_df.reset_index(inplace=True, drop=True)
return temp_df.iloc[:-1, :]
elif symbol == "甲醇期权":
temp_df = table_df[table_df.iloc[:, 0].str.contains("MA")]
temp_df.reset_index(inplace=True, drop=True)
return temp_df.iloc[:-1, :]
elif symbol == "菜籽粕期权":
temp_df = table_df[table_df.iloc[:, 0].str.contains("RM")]
temp_df.reset_index(inplace=True, drop=True)
return temp_df.iloc[:-1, :]
else:
temp_df = table_df[table_df.iloc[:, 0].str.contains("CF")]
temp_df.reset_index(inplace=True, drop=True)
return temp_df.iloc[:-1, :]
except:
return None
def get_shfe_option_daily(trade_date="20191220", symbol="黄金期权"):
"""
上海期货交易所-期权-日频行情数据
:param trade_date: str "20191017"
:param symbol: str "铜期权" or "天胶期权" or "黄金期权"
:return: pandas.DataFrame
part-1:
PRODUCTID PRODUCTSORTNO PRODUCTNAME \
288 ru_o 100 天胶期权
289 ru_o 100 天胶期权
290 ru_o 100 天胶期权
291 ru_o 100 天胶期权
292 ru_o 100 天胶期权
.. ... ... ...
789 ru_o 100 天胶期权
790 ru_o 100 天胶期权
791 ru_o 100 天胶期权
792 ru_o 100 天胶期权
793 ru_o 100 天胶期权
INSTRUMENTID PRESETTLEMENTPRICE OPENPRICE \
288 ru1911C10000 729
289 ru1911C10250 495
290 ru1911C10500 293
291 ru1911C10750 146
292 ru1911C11000 58
.. ... ... ...
789 ru2010P9500 155
790 ru2010P9600 172
791 ru2010P9700 189
792 ru2010P9800 209
793 ru2010P9900 229
HIGHESTPRICE LOWESTPRICE CLOSEPRICE SETTLEMENTPRICE ZD1_CHG ZD2_CHG \
288 778 778 49 49
289 542 542 47 47
290 334 334 41 41
291 176 176 30 30
292 76 76 18 18
.. ... ... ... ... ... ...
789 151 151 -4 -4
790 167 167 -5 -5
791 184 184 -5 -5
792 204 204 -5 -5
793 224 224 -5 -5
VOLUME OPENINTEREST OPENINTERESTCHG ORDERNO EXECVOLUME TURNOVER \
288 0 0 0 0 0 0.0
289 0 0 0 0 0 0.0
290 0 0 0 0 0 0.0
291 0 0 0 0 0 0.0
292 0 4 0 0 0 0.0
.. ... ... ... ... ... ...
789 0 0 0 0 0 0.0
790 0 0 0 0 0 0.0
791 0 0 0 0 0 0.0
792 0 0 0 0 0 0.0
793 0 0 0 0 0 0.0
DELTA
288 0.976387
289 0.908465
290 0.757436
291 0.531736
292 0.299911
.. ...
789 -0.112120
790 -0.122028
791 -0.131944
792 -0.142837
793 -0.154073
part-2:
PRODUCTID PRODUCTSORTNO PRODUCTNAME HIGHESTPRICE LOWESTPRICE \
1 ru_o 100 天胶期权 2774 2
AVGPRICE VOLUME TURNOVER YEARVOLUME YEARTURNOVER EXECVOLUME \
1 148.573 8290 0.125033 112.5122 34.062215 0
YEAREXECVOLUME
1 1.0624
part-3:
PRODUCTID PRODUCTSORTNO PRODUCTNAME INSTRUMENTID \
12 ru_o 100 天胶期权 ru1911
13 ru_o 100 天胶期权 ru2001
14 ru_o 100 天胶期权 ru2003
15 ru_o 100 天胶期权 ru2004
16 ru_o 100 天胶期权 ru2005
17 ru_o 100 天胶期权 ru2006
18 ru_o 100 天胶期权 ru2007
19 ru_o 100 天胶期权 ru2008
20 ru_o 100 天胶期权 ru2009
21 ru_o 100 天胶期权 ru2010
SIGMA
12 0.242419
13 0.234428
14 0.218916
15 0.208057
16 0.205821
17 0.205821
18 0.240689
19 0.240689
20 0.216861
21 0.216861
"""
calendar = get_calendar()
day = convert_date(trade_date) if trade_date is not None else datetime.date.today()
if day.strftime('%Y%m%d') not in calendar:
warnings.warn('%s非交易日' % day.strftime('%Y%m%d'))
return None
if day > datetime.date(2010, 8, 24):
url = SHFE_OPTION_URL.format(day.strftime('%Y%m%d'))
try:
r = requests.get(url, headers=SHFE_HEADERS)
json_data = r.json()
table_df = pd.DataFrame([row for row in json_data['o_curinstrument'] if
row['INSTRUMENTID'] not in ['小计', '合计'] and row['INSTRUMENTID'] != ''])
contract_df = table_df[table_df["PRODUCTNAME"].str.strip() == symbol]
product_df = pd.DataFrame(json_data['o_curproduct'])
product_df = product_df[product_df["PRODUCTNAME"].str.strip() == symbol]
volatility_df = pd.DataFrame(json_data['o_cursigma'])
volatility_df = volatility_df[volatility_df["PRODUCTNAME"].str.strip() == symbol]
return contract_df, product_df, volatility_df
except:
return None
if __name__ == "__main__":
df_test = get_czce_option_daily(trade_date="20200117", symbol="菜籽粕期权")
print(df_test)
one, two = get_dce_option_daily(trade_date="20191209", symbol="铁矿石期权")
print(one)
print(two)
one, two, three = get_shfe_option_daily(trade_date="20191220", symbol="黄金期权")
print(one)
print(two)
print(three)
|
[
"dtshare@126.com"
] |
dtshare@126.com
|
d91cb988e6267df64db44f867a1eeccff5e773b8
|
87ad372898e793faf1ad89f4bb3b6e84a8002131
|
/tests/unit/UnagiiToken/test_approve.py
|
c37a007c87b27f55f23e9b564580ea5328e8a0ab
|
[] |
no_license
|
atsignhandle/unagii-vault-v2
|
6a9a96c11d34257bc3fdae57455ec3b2f9c0029a
|
548f715f34329eb5abebffe40acbeb56a31cb6f3
|
refs/heads/main
| 2023-08-27T00:59:48.080152
| 2021-09-28T02:47:36
| 2021-09-28T02:47:36
| 413,448,825
| 0
| 0
| null | 2021-10-04T14:07:37
| 2021-10-04T14:07:36
| null |
UTF-8
|
Python
| false
| false
| 1,250
|
py
|
import brownie
from brownie.test import given, strategy
import pytest
@pytest.fixture(scope="function", autouse=True)
def setup(fn_isolation):
pass
@given(
owner=strategy("address"),
spender=strategy("address"),
amount=strategy("uint256"),
)
def test_apprpve(uToken, owner, spender, amount):
tx = uToken.approve(spender, amount, {"from": owner})
assert uToken.allowance(owner, spender) == amount
assert len(tx.events) == 1
assert tx.events["Approval"].values() == [owner, spender, amount]
def test_increase_allowance(uToken, accounts):
owner = accounts[0]
spender = accounts[1]
uToken.approve(spender, 100, {"from": owner})
tx = uToken.increaseAllowance(spender, 403, {"from": owner})
assert uToken.allowance(owner, spender) == 503
assert len(tx.events) == 1
assert tx.events["Approval"].values() == [owner, spender, 503]
def test_decrease_allowance(uToken, accounts):
owner = accounts[0]
spender = accounts[1]
uToken.approve(spender, 100, {"from": owner})
tx = uToken.decreaseAllowance(spender, 34, {"from": owner})
assert uToken.allowance(owner, spender) == 66
assert len(tx.events) == 1
assert tx.events["Approval"].values() == [owner, spender, 66]
|
[
"tsk.nakamura@gmail.com"
] |
tsk.nakamura@gmail.com
|
20deb90846ad288a2d94d9b45fbff6c904bc0e54
|
c105e5cf2a1eec2e04398d7b50030b6c97d6a04b
|
/poker/poker/urls.py
|
984103b63bef0c8ccbdaa477f76d9779a5545822
|
[
"MIT"
] |
permissive
|
chitty/poker_base
|
845e464fa68de93de6ff79eaba77b91e45be1788
|
913c13b89a0c4e4c0ec7e3b01495a96309a02f26
|
refs/heads/master
| 2021-01-20T19:21:43.628307
| 2016-07-20T16:33:47
| 2016-07-20T16:33:47
| 61,395,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^holdem/', include('holdem.urls')),
url(r'^admin/', include(admin.site.urls)),
]
|
[
"chitty.carlosj@gmail.com"
] |
chitty.carlosj@gmail.com
|
d2fa208268124115fb8707888efbde6813276ac2
|
75e1a56916469d9fc9695cc9d5b8d216578f9e53
|
/aaweb/views/investors.py
|
8ac1d0e61f8bcfbfa55ca7d5fa6af1b81c04cb75
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
cpelite/astorian-airways
|
f1cc496688bf52df763cb3d952b6623c27172e67
|
55498f308de7a4b8159519e191b492675ec5612a
|
refs/heads/master
| 2021-09-14T12:20:14.632632
| 2018-05-13T18:01:54
| 2018-05-13T18:01:54
| 255,722,246
| 0
| 0
| null | 2020-04-14T20:46:50
| 2020-04-14T20:46:49
| null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
# -*- coding: utf-8 -*-
from flask import render_template
from aaweb import app
from aaweb.models import Company
@app.route('/investor/relations.html')
def view_investor():
companies = Company.select()
return render_template('investor.html', companies=companies)
|
[
"christian.koepp@cs.tum.edu"
] |
christian.koepp@cs.tum.edu
|
0142110cb2437b1407ba2fed98e5650f4fcd115c
|
de16cd6a0302278e4e5c30a97902b974ee4cc70c
|
/app.py
|
9c48c8869ff2430a4effec0761da6cfa37db7ce6
|
[] |
no_license
|
GaryG484/Mission-to-Mars
|
4798caf02326fcd666aa550cdf6dcf3ee47a120b
|
9a06c92a4e69320c1a8cd6e51fae899586fdb280
|
refs/heads/main
| 2023-08-23T08:34:54.040300
| 2021-09-22T00:13:15
| 2021-09-22T00:13:15
| 398,110,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,335
|
py
|
# This is where we'll use Flask and Mongo to begin creating our web app
# begin by importing our tools
from flask import Flask, render_template, redirect, url_for
from flask_pymongo import PyMongo
import scraping
# Let's break down what this code is doing.
# The first line says that we'll use Flask to render a template, redirecting to another url,
# and creating a URL.
# The second line says we'll use PyMongo to interact with our Mongo database.
# The third line says that to use the scraping code, we will convert from Jupyter notebook to Python.
# set up Flask:
app = Flask(__name__)
# tell Python how to connect to Mongo using PyMongo
# Use flask_pymongo to set up mongo connection
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
# app.config["MONGO_URI"] tells Python that our app will connect to Mongo using a URI, a uniform resource
# identifier similar to a URL.
# "mongodb://localhost:27017/mars_app" is the URI we'll be using to connect our app to Mongo.
# This URI is saying that the app can reach Mongo through our localhost server, using port 27017, using
# a database named "mars_app".
# Set Up App Routes
# one for the main HTML page everyone will view when visiting the web app,
# and one to actually scrape new data using the code we've written.
# First, let's define the route for the HTML page
@app.route("/")
def index():
mars = mongo.db.mars.find_one()
return render_template("index.html", mars=mars)
# This route, @app.route("/"), tells Flask what to display when we're looking at the home page,
# index.html (index.html is the default HTML file that we'll use to display the content we've scraped).
# This means that when we visit our web app's HTML page, we will see the home page.
# Within the def index(): function the following is accomplished:
# mars = mongo.db.mars.find_one() uses PyMongo to find the "mars" collection in our database, which we
# will create when we convert our Jupyter scraping code to Python Script. We will also assign that path to
# themars variable for use later.
# return render_template("index.html" tells Flask to return an HTML template using an index.html file.
# We'll create this file after we build the Flask routes.
# , mars=mars) tells Python to use the "mars" collection in MongoDB.
# This function is what links our visual representation of our work, our web app, to the code that powers it.
# Our next function will set up our scraping route. This route will be the "button" of the web application,
# the one that will scrape updated data when we tell it to from the homepage of our web app. It'll be
# tied to a button that will run the code when it's clicked.
@app.route("/scrape")
def scrape():
mars = mongo.db.mars
mars_data = scraping.scrape_all()
# The first line, @app.route(“/scrape”) defines the route that Flask will be using. This route, “/scrape”,
# will run the function that we create just beneath it.
# The next lines allow us to access the database, scrape new data using our scraping.py script, update
# the database, and return a message when successful. Let's break it down.
# First, we define it with def scrape():.
# Then, we assign a new variable that points to our Mongo database: mars = mongo.db.mars.
# Next, we created a new variable to hold the newly scraped data: mars_data = scraping.scrape_all().
# In this line, we're referencing the scrape_all function in the scraping.py file exported from
# Jupyter Notebook.
# Now that we've gathered new data, we need to update the database using .update()
# .update(query_parameter, data, options)
# We're inserting data, so first we'll need to add an empty JSON object with {} in place of the
# query_parameter. Next, we'll use the data we have stored in mars_data. Finally, the option we'll
# include is upsert=True. This indicates to Mongo to create a new document if one doesn't already
# exist, and new data will always be saved (even if we haven't already created a document for it).
mars.update({}, mars_data, upsert=True)
# Finally, we will add a redirect after successfully scraping the data:
return redirect('/', code=302)
# This will navigate our page back to / where we can see the updated content.
# Tell Flask to run
if __name__ == "__main__":
app.run(debug=True)
|
[
"85652494+GaryG484@users.noreply.github.com"
] |
85652494+GaryG484@users.noreply.github.com
|
6afbde266297ea50d91e24346d4e03987b62c63f
|
37c677551f6b11fb6ec93c35d45c8f52d9b483a0
|
/calc_factories.py
|
f06b2fb6370f2479602701dca121c32154d57e60
|
[] |
no_license
|
70akaline/nested_scripts
|
735e1ee5868596a80633d5b1d1c36ce5bb95c9d1
|
c69306bd51fda90fd04b33e6f72f2b6d6edfcef1
|
refs/heads/master
| 2021-09-05T23:23:16.165209
| 2018-01-31T16:09:16
| 2018-01-31T16:09:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,305
|
py
|
from data_containers import nested_data
from data_containers import cumul_nested_data
from data_containers import cellular_data
from data_containers import dca_data
from data_containers import dca_plus_data
from data_containers import nested_edmft_data
from action_cautionaries import impose_real_valued_in_imtime_numpy
from action_cautionaries import impose_real_valued_in_imtime
from getters import *
from impurity_solvers import solvers
def set_n(n, data):
for key in data.ns.keys():
data.ns[key] = n
return n
def set_mu(mu, data):
for key in data.mus.keys():
data.mus[key] = mu
def is_zero(bg):
return sum([ numpy.count_nonzero(g.data) for name, g in bg ]) == 0
#----------------------------- nested -----------------------------------------------------------------------#
def prepare_nested( data, nested_scheme, solver_class = solvers.ctint, flexible_Gweiss=False, sign=-1, sign_up_to=2, use_G_proj = False ):
assert (data.__class__ == nested_data) or (data.__class__ == nested_edmft_data) , "wrong data type"
assert data.fermionic_struct == {'up': [0]}, "wrong fermionic struct for this calcalation"
assert data.impurity_struct == nested_scheme.get_impurity_struct(), "wrong impurity struct for this nested scheme"
data.get_Sigmaijw = lambda: full_fill_Sigmaijw_from_Sigma_imp_iw(data.Sigmaijw, data.Sigma_imp_iw, nested_scheme.get_latt_to_imp_mapping())
data.get_Sigmakw = lambda: full_fill_Sigmakw_from_Sigmaijw(data.Sigmakw, data.Sigmaijw)
data.get_Sigma_loc = lambda: full_fill_local_from_latt(data.Sigma_loc_iw, data.Sigmakw)
data.get_Gkw = lambda: full_fill_Gkw_from_iws_mus_epsiolonk_and_Sigmakw(data.Gkw, data.iws, data.mus, data.epsilonk, data.Sigmakw)
data.get_G_loc = lambda: full_fill_local_from_latt(data.G_loc_iw, data.Gkw)
data.get_n_from_G_loc = lambda: blockwise_get_n_from_G_loc_iw(data.G_loc_iw['up'], fit_tail_starting_iw = 14.0, ntau = None, site_index = 0)
if use_G_proj:
data.get_Gijw = lambda: [full_fill_Gijw_from_Gkw(data.Gijw, data.Gkw, N_cores=1), full_fill_G_proj_iw(data.G_proj_iw, data.Gijw, nested_scheme) ]
else:
data.get_Gijw = lambda: full_fill_Gijw_from_Gkw(data.Gijw, data.Gkw, N_cores=1)
data.set_mu = lambda mu: set_mu(mu, data)
data.get_mu = lambda: data.mus['up']
data.get_n = lambda: [data.get_Gkw(), data.get_G_loc(), set_n(data.get_n_from_G_loc(),data)][-1]
if flexible_Gweiss:
data.get_Gweiss = lambda: ( flexible_Gweiss_iw_from_Gweiss_iw_Gijw_and_G_imp_iw(data.Gweiss_iw, data.Gijw, data.G_imp_iw,
nested_scheme.get_imp_to_latt_mapping(), sign, sign_up_to)
if not is_zero(data.Gweiss_iw) else
full_fill_Gweiss_iw_from_Gijw_and_Sigma_imp_iw(data.Gweiss_iw,data.Gijw,data.Sigma_imp_iw, mapping = nested_scheme.get_imp_to_latt_mapping())
)
elif use_G_proj:
data.get_Gweiss = lambda: full_full_Gweiss_iw_from_G_proj_iw_and_Sigma_imp_iw(data.Gweiss_iw,data.G_proj_iw,data.Sigma_imp_iw)
else:
data.get_Gweiss = lambda: full_fill_Gweiss_iw_from_Gijw_and_Sigma_imp_iw(data.Gweiss_iw,data.Gijw,data.Sigma_imp_iw, mapping = nested_scheme.get_imp_to_latt_mapping())
data.dump_solvers = lambda suffix: [solver_class.dump( data.solvers[C], data.archive_name, suffix='-%s%s'%(C,suffix) ) for C in data.solvers.keys()]
#----------------------------- nested edmft -----------------------------------------------------------------------#
def prepare_nested_edmft( data, nested_scheme, solver_class = solvers.ctint):
assert data.__class__ == nested_edmft_data, "wrong data type"
prepare_nested( data, nested_scheme, solver_class )
data.get_P_imp = lambda: fill_P_imp_from_chi_imp_W_imp_and_Uweiss(data.P_imp_iw, data.chi_imp_iw, data.W_imp_iw, data.Uweiss_iw)
data.get_Pijnu = lambda: full_fill_Pijnu_from_P_imp_iw(data.Pijnu, data.P_imp_iw, nested_scheme.get_latt_to_imp_mapping())
data.get_Pqnu = lambda: full_fill_Sigmakw_from_Sigmaijw(data.Pqnu, data.Pijnu)
data.get_P_loc = lambda: full_fill_local_from_latt(data.P_loc_iw, data.Pqnu)
data.get_W_imp = lambda: fill_W_imp_from_chi_imp_and_Uweiss( data.W_imp_iw, data.chi_imp_iw, data.Uweiss_iw)
data.get_Wqnu = lambda: full_fill_Wqnu_from_Jq_and_Pqnu(data.Wqnu,data.Jq,data.Pqnu)
data.get_W_loc = lambda: full_fill_local_from_latt(data.W_loc_iw, data.Wqnu)
data.get_Wijnu = lambda: full_fill_Gijw_from_Gkw(data.Wijnu, data.Wqnu, N_cores=1)
data.get_Uweiss = lambda: [ full_fill_Uweiss_iw_from_Wijnu_and_P_imp_iw(data.Uweiss_iw,data.Wijnu,data.P_imp_iw, mapping = nested_scheme.get_imp_to_latt_mapping()),
fill_Uweiss_dyn_from_Uweiss(data.Uweiss_dyn_iw,data.Uweiss_iw) ]
#no lattice calc, for reversed etc.
mp = nested_scheme.get_imp_to_latt_mapping()
print "nested_scheme.maxLx: ",nested_scheme.maxLx
print "max nsites:", nested_scheme.maxLx**2
def ij_iterator():
nsites = nested_scheme.maxLx**2
for i in range(nsites):
for j in range(nsites):
yield i,j
def ijA_iterator():
for i,j in ij_iterator():
for A in data.bosonic_struct.keys():
yield i,j,A
data.copy_imp_to_latt = lambda C: [ [ numpy.copyto(data.Gijw['up'][:,mp(C,i,j)[0], mp(C,i,j)[1]],data.G_imp_iw[C].data[:,i,j]) for i,j in ij_iterator()],
[ numpy.copyto(data.Wijnu[A][:,mp(C,i,j)[0], mp(C,i,j)[1]],data.W_imp_iw[C+'|'+A].data[:,i,j]) for i,j,A in ijA_iterator() ] ]
#----------------------------- cumul_nested -----------------------------------------------------------------------#
def prepare_cumul_nested( data, nested_scheme, solver_class = solvers.ctint ):
assert data.__class__ == cumul_nested_data, "wrong data type"
assert data.fermionic_struct == {'up': [0]}, "wrong fermionic struct for this calcalation"
assert data.impurity_struct == nested_scheme.get_impurity_struct(), "wrong impurity struct for this nested scheme"
data.get_g_imp = lambda: full_fill_g_imp_iw_from_Sigma_imp_iw(data.g_imp_iw, data.mus['up'], data.Sigma_imp_iw)
data.get_gijw = lambda: full_fill_Sigmaijw_from_Sigma_imp_iw(data.gijw, data.g_imp_iw, nested_scheme.get_latt_to_imp_mapping())
data.get_gkw = lambda: full_fill_Sigmakw_from_Sigmaijw(data.gkw, data.gijw)
data.get_Sigmakw = lambda: full_fill_Sigmakw_from_gkw(data.Sigmakw, data.ws, data.mus['up'], data.gkw)
data.get_Sigma_loc = lambda: full_fill_local_from_latt(data.Sigma_loc_iw, data.Sigmakw)
data.get_Gkw = lambda: full_fill_Gkw_from_epsiolonk_and_gkw(data.Gkw, data.epsilonk, data.gkw)
data.get_G_loc = lambda: full_fill_local_from_latt(data.G_loc_iw, data.Gkw)
data.get_n_from_G_loc = lambda: blockwise_get_n_from_G_loc_iw(data.G_loc_iw['up'], fit_tail_starting_iw = 14.0, ntau = None, site_index = 0)
data.get_Gijw = lambda: full_fill_Gijw_from_Gkw(data.Gijw, data.Gkw, N_cores=1)
data.set_mu = lambda mu: set_mu(mu, data)
data.get_mu = lambda: data.mus['up']
data.get_n = lambda: [data.get_g_imp(), data.get_gijw(), data.get_gkw(), data.get_Gkw(), data.get_G_loc(), set_n(data.get_n_from_G_loc(),data)][-1]
data.get_Gweiss = lambda: full_fill_Gweiss_iw_from_Gijw_and_Sigma_imp_iw(data.Gweiss_iw,data.Gijw,data.Sigma_imp_iw, mapping = nested_scheme.get_imp_to_latt_mapping())
data.dump_solvers = lambda suffix: [solver_class.dump( data.solvers[C], data.archive_name, suffix='-%s%s'%(C,suffix) ) for C in data.impurity_struct.keys()]
#----------------------------- dca -----------------------------------------------------------------------#
def prepare_dca( data, dca_scheme, solver_class = solvers.ctint ):
assert len(data.impurity_struct.keys()) == 1, "in dca only one impurity problem!!"
key = data.impurity_struct.keys()[0]
assert len(data.impurity_struct[key]) == dca_scheme.dim, "wrong impurity struct for the dca calculation!"
assert len(data.fermionic_struct.keys()) == len(data.impurity_struct[key]), "fermionic and impurity struct not consistent"
assert data.__class__ == dca_data, "wrong data type"
r0 = dca_scheme.get_r0()
r0_key = '%02d'%r0
data.get_SigmaR = lambda: dca_scheme.get_QR_from_Q_imp(data.SigmaR_iw, data.Sigma_imp_iw)
data.get_SigmaK = lambda: dca_scheme.get_QK_from_QR(data.SigmaK_iw, data.SigmaR_iw)
data.get_GK = lambda: [ full_fill_GK_iw(data.GK_iw, data.SigmaK_iw, data.mus[r0_key], dca_scheme.dca_patches),
[impose_real_valued_in_imtime(g) for name,g in data.GK_iw] ]
data.get_GR0 = lambda: [ dca_scheme.get_QR_from_QK(data.GR_iw, data.GK_iw, l_list = [r0]),
impose_real_valued_in_imtime(data.GR_iw[r0_key]) ]
data.get_n_from_GR0 = lambda: blockwise_get_n_from_G_loc_iw(data.GR_iw[r0_key], fit_tail_starting_iw = 14.0, ntau = None, site_index = 0)
data.get_GR = lambda: [ dca_scheme.get_QR_from_QK(data.GR_iw, data.GK_iw),
[impose_real_valued_in_imtime(g) for name,g in data.GR_iw] ]
data.get_Gijw = data.get_GR
data.set_mu = lambda mu: set_mu(mu, data)
data.get_mu = lambda: data.mus['00']
data.get_n = lambda: [data.get_GK(), data.get_GR0(), set_n(data.get_n_from_GR0(),data)][-1]
data.get_GweissK = lambda: full_fill_GweissK_iw_from_Dyson(data.GweissK_iw, data.GK_iw, data.SigmaK_iw)
data.get_GweissR = lambda: dca_scheme.get_QR_from_QK(data.GweissR_iw, data.GweissK_iw)
data.get_Gweiss_iw = lambda: dca_scheme.get_Q_imp_from_QR(data.Gweiss_iw, data.GweissR_iw)
data.get_Gweiss = lambda: [data.get_GweissK(), data.get_GweissR(), data.get_Gweiss_iw(), [impose_real_valued_in_imtime(g) for name,g in data.Gweiss_iw] ]
data.dump_solvers = lambda suffix: [solver_class.dump( data.solvers[C], data.archive_name, suffix='-%s%s'%(C,suffix) ) for C in data.impurity_struct.keys()]
#----------------------------- dca_plus -----------------------------------------------------------------------#
def prepare_dca_plus( data, dca_scheme, solver_class = solvers.ctint, alpha = 1, n_RL_iterations = 10, embedded = False, real_space_sc = False, no_convolution = False, impose_ph_symmetry = False ):
assert len(data.impurity_struct.keys()) == 1, "in dca only one impurity problem!!"
key = data.impurity_struct.keys()[0]
assert len(data.impurity_struct[key]) == dca_scheme.dim, "wrong impurity struct for the dca calculation!"
assert len(data.fermionic_struct.keys()) == len(data.impurity_struct[key]), "fermionic and impurity struct not consistent"
assert data.__class__ == dca_plus_data, "wrong data type"
nK = int(round(numpy.sqrt(dca_scheme.dim)))
print 'nK: ', nK
assert dca_scheme.n1 == dca_scheme.n1 and dca_scheme.m1==0 and dca_scheme.n2==0, "not general for now..."
assert nK**2 == dca_scheme.dim, "must be n1==m2, n2==m1==0"
#data.get_SigmaR = lambda: [ full_fill_SigmaR_iw_from_Sigma_imp_iw(data.SigmaR_iw, data.Sigma_imp_iw, lambda i: dca_scheme.i_to_ij(i)), dca_scheme.symmetrize_QR(data.SigmaR_iw) ]
data.get_SigmaR = lambda: [ dca_scheme.get_QR_from_Q_imp(data.SigmaR_iw, data.Sigma_imp_iw) ]
data.get_SigmaK = lambda: dca_scheme.get_QK_from_QR(data.SigmaK_iw, data.SigmaR_iw)
r0 = dca_scheme.get_r0()
r0_key = '%02d'%r0
data.get_XiK = lambda: fill_XiK_from_SigmaK(data.XiK_iw, data.SigmaK_iw, alpha)
data.get_XiR = lambda: dca_scheme.get_QR_from_QK(data.XiR_iw, data.XiK_iw)
if not embedded:
data.get_Xik = lambda: dca_scheme.get_Qk_from_QR(data.Xikw['up'], data.XiR_iw, data.ks)
data.get_Sigmaimpk = lambda: blockwise_Sigmak_from_Xik(data.Sigmaimpkw['up'], data.Xikw['up'], alpha)
if not no_convolution:
data.get_Sigmakw = lambda: [ numpy.copyto(data.Sigmakw['up'], data.Sigmaimpkw['up']),
Richardson_Lucy(data.Sigmaimpkw['up'], data.Sigmakw['up'],
nK, n_iterations = n_RL_iterations,
desired_loc=data.SigmaR_iw[r0_key].data[:,0,0],
impose_ph_symmetry=impose_ph_symmetry) ]
else:
data.get_Sigmakw = lambda: numpy.copyto(data.Sigmakw['up'], data.Sigmaimpkw['up'])
else:
data.get_Xik = lambda: dca_scheme.get_Qk_from_QR_embedded(data.Xikw['up'], data.XiR_iw, data.ks)
data.get_Sigmaimpk = lambda: None
data.get_Sigmakw = lambda: blockwise_Sigmak_from_Xik(data.Sigmakw['up'], data.Xikw['up'], alpha)
data.get_Gkw = lambda: full_fill_Gkw_from_iws_mus_epsiolonk_and_Sigmakw(data.Gkw, data.iws, data.mus, data.epsilonk, data.Sigmakw)
data.get_G_loc = lambda: full_fill_local_from_latt(data.G_loc_iw, data.Gkw)
data.get_n_from_G_loc = lambda: blockwise_get_n_from_G_loc_iw(data.G_loc_iw['up'], fit_tail_starting_iw = 14.0, ntau = None, site_index = 0)
data.get_GR = lambda: [ dca_scheme.get_QR_from_QK(data.GR_iw, data.GK_iw), dca_scheme.symmetrize_QR(data.GR_iw) ]
data.set_mu = lambda mu: set_mu(mu, data)
data.get_mu = lambda: data.mus['up']
data.get_n = lambda: [data.get_Gkw(), data.get_G_loc(), set_n(data.get_n_from_G_loc(),data)][-1]
if not real_space_sc:
data.get_GK = lambda: dca_scheme.Qkw_to_QK_iw(data.GK_iw, IBZ_convolution(data.Gkw['up'].real, nK)+1j*IBZ_convolution(data.Gkw['up'].imag, nK))
data.get_Gijw = lambda: [data.get_GK(), [fit_fermionic_gf_tail(g) for name,g in data.GK_iw], data.get_GR()]
else:
data.get_Gijw = lambda: full_fill_Gijw_from_Gkw(data.Gijw, data.Gkw, N_cores=1)
data.get_GR = lambda: dca_scheme.Qrw_to_QR_iw(data.GR_iw, data.Gijw)
data.get_GK = lambda: dca_scheme.get_QK_from_QR(data.GK_iw, data.GR_iw)
data.get_GweissK = lambda: full_fill_GweissK_iw_from_Dyson(data.GweissK_iw, data.GK_iw, data.SigmaK_iw)
data.get_GweissR = lambda: dca_scheme.get_QR_from_QK(data.GweissR_iw, data.GweissK_iw)
data.get_Gweiss_iw = lambda: dca_scheme.get_Q_imp_from_QR(data.Gweiss_iw, data.GweissR_iw)
data.get_Gweiss = lambda: [data.get_GweissK(), data.get_GweissR(), dca_scheme.symmetrize_QR(data.GweissR_iw), data.get_Gweiss_iw()]
data.dump_solvers = lambda suffix: [solver_class.dump( data.solvers[C], data.archive_name, suffix='-%s%s'%(C,suffix) ) for C in data.impurity_struct.keys()]
#----------------------------- celullar -----------------------------------------------------------------------#
def prepare_cellular( data, Lx, Ly, solver_class = solvers.ctint, periodized = False ):
print "prepare_cellular"
assert data.__class__ == cellular_data, "wrong data type"
assert data.fermionic_struct == {'up': [0]}, "wrong fermionic struct for this calcalation"
assert len(data.impurity_struct.keys()) == 1, "in celullar we solve only one cluster"
if periodized:
data.get_Sigmaijkw = lambda: full_fill_Sigmaijkw_periodized(data.Sigmaijkw, data.Sigma_imp_iw, data.ks)
else:
data.get_Sigmaijkw = lambda: full_fill_Sigmaijkw(data.Sigmaijkw, data.Sigma_imp_iw)
data.get_Gijkw = lambda: full_fill_Gijkw(data.Gijkw, data.iws, data.mus, data.epsilonijk, data.Sigmaijkw)
data.get_G_ij_loc = lambda: full_fill_G_ij_iw(data.G_ij_iw, data.Gijkw)
data.get_Gijw = data.get_G_ij_loc #this is needed for the nested_mains.lattice
print 'imp_key: ', data.imp_key
data.get_n_from_G_ij_loc = lambda: blockwise_get_n_from_G_loc_iw(data.G_ij_iw[data.imp_key], fit_tail_starting_iw = 14.0, ntau = None, site_index = 0)
#full_fill_ns_from_G_loc_iw(data.ns, data.G_ij_iw, fit_tail_starting_iw = 14.0, ntau = None)
data.set_mu = lambda mu: set_mu(mu, data)
data.get_mu = lambda: data.mus['up']
data.get_n = lambda: [data.get_Gijkw(), data.get_G_ij_loc(), set_n(data.get_n_from_G_ij_loc(),data)][-1]
data.get_Gweiss = lambda: full_fill_Gweiss_iw(data.Gweiss_iw, data.G_ij_iw, data.Sigma_imp_iw)
data.dump_solvers = lambda suffix: [solver_class.dump( data.solvers[C], data.archive_name, suffix='-%s%s'%(C,suffix) ) for C in data.impurity_struct.keys()]
data.periodize_cumul = lambda: periodize_cumul(data.Gkw, data.Sigmakw, data.gkw, data.gijw, data.g_imp_iw, data.iws, data.mus, data.epsilonk, data.Sigma_imp_iw, Lx, Ly)
data.periodize_selfenergy = lambda: periodize_selfenergy(data.Gkw, data.Sigmakw, data.Sigmaijw, data.iws, data.mus, data.epsilonk, data.Sigma_imp_iw, Lx, Ly)
data.dump_solvers = lambda suffix: [solver_class.dump( data.solvers[C], data.archive_name, suffix='-%s%s'%(C,suffix) ) for C in data.impurity_struct.keys()]
#----------------------------- triangular celullar -----------------------------------------------------------------------#
def prepare_cellular_triangular( data, Lx, Ly, solver_class = solvers.ctint, periodized = False ):
print "prepare_cellular_triangular"
prepare_cellular( data, Lx, Ly, solver_class, periodized )
if periodized:
data.get_Sigmaijkw = lambda: triangular_full_fill_Sigmaijkw_periodized(data.Sigmaijkw, data.Sigma_imp_iw, data.ks)
data.periodize_cumul = lambda: None
data.periodize_selfenergy = lambda: periodize_selfenergy(data.Gkw, data.Sigmakw, data.Sigmaijw,
data.iws, data.mus, data.epsilonk, data.Sigma_imp_iw,
Lx, Ly, mapping=triangular_cellular_latt_to_imp_mapping)
|
[
"jaksa.vucicevic@gmail.com"
] |
jaksa.vucicevic@gmail.com
|
b4704758b099b19c9b67d81b6b07934ef05ee775
|
b3bbda776072fdf8829e7a94bff9817d3d0a346f
|
/project_fraud/lib.py
|
68052e346f90de05871d0dd8c23bd36b918bc55b
|
[] |
no_license
|
ayo-byte/project_fraud
|
34684913f027c5bcd61d7e20c28ec3efd79cf954
|
9e68a0b42ec9344ae37daa157484a8776f1bf931
|
refs/heads/master
| 2023-01-14T16:06:19.168399
| 2020-11-24T16:01:44
| 2020-11-24T16:01:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2018 Jean Bizot <jean@styckr.io>
""" Main lib for project_fraud Project
"""
from os.path import split
import pandas as pd
import datetime
pd.set_option('display.width', 200)
def clean_data(data):
""" clean data
"""
# Remove columns starts with vote
cols = [x for x in data.columns if x.find('vote') >= 0]
data.drop(cols, axis=1, inplace=True)
# Remove special characteres from columns
data.loc[:, 'civility'] = data['civility'].replace('\.', '', regex=True)
# Calculate Age from day of birth
actual_year = datetime.datetime.now().year
data.loc[:, 'Year_Month'] = pd.to_datetime(data.birthdate)
data.loc[:, 'Age'] = actual_year - data['Year_Month'].dt.year
# Uppercase variable to avoid duplicates
data.loc[:, 'city'] = data['city'].str.upper()
# Take 2 first digits, 2700 -> 02700 so first two are region
data.loc[:, 'postal_code'] = data.postal_code.str.zfill(5).str[0:2]
# Remove columns with more than 50% of nans
cnans = data.shape[0] / 2
data = data.dropna(thresh=cnans, axis=1)
# Remove rows with more than 50% of nans
rnans = data.shape[1] / 2
data = data.dropna(thresh=rnans, axis=0)
# Discretize based on quantiles
data.loc[:, 'duration'] = pd.qcut(data['surveyduration'], 10)
# Discretize based on values
data.loc[:, 'Age'] = pd.cut(data['Age'], 10)
# Rename columns
data.rename(columns={'q1': 'Frequency'}, inplace=True)
# Transform type of columns
data.loc[:, 'Frequency'] = data['Frequency'].astype(int)
# Rename values in rows
drows = {1: 'Manytimes', 2: 'Onetimebyday', 3: '5/6timesforweek',
4: '4timesforweek', 5: '1/3timesforweek', 6: '1timeformonth',
7: '1/trimestre', 8: 'Less', 9: 'Never'}
data.loc[:, 'Frequency'] = data['Frequency'].map(drows)
return data
if __name__ == '__main__':
# For introspections purpose to quickly get this functions on ipython
import project_fraud
folder_source, _ = split(project_fraud.__file__)
df = pd.read_csv('{}/data/data.csv.gz'.format(folder_source))
clean_data = clean_data(df)
print(' dataframe cleaned')
|
[
"mrum98@gmail.com"
] |
mrum98@gmail.com
|
2fdd63775b26a6bfdc169b203a9f78c1f457ac39
|
6e6bea7799ba68eeb956b6f6b88c784f90871f27
|
/src/app/dbManager/DBTool.py
|
45d1ee154c33ca3ac514f18ac5bfdacba9e70be2
|
[] |
no_license
|
jeanhao/hackday2017
|
6f5488057f82541b4a53f580f923c431498ed61e
|
6dcfb99c0de24d02fac6898aa807cac9cf33dafe
|
refs/heads/master
| 2021-01-23T16:13:19.502319
| 2017-06-04T03:32:33
| 2017-06-04T03:32:33
| 93,287,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,461
|
py
|
# -*- coding: utf-8 -*-
from mosql import query, mysql, util # @UnusedImport
# 将data数据插入字典中,若不指定key,则插入data中所有数据
def insert(table, data , keys=None):
if keys:
if type(data) == list:
vals = []
for obj in data:
vals.append([v for k, v in obj if k in keys])
else:
vals = [v for k, v in data if k in keys]
return query.insert(table, columns=keys, values=vals)
else:
return query.insert(table, data)
def multi_insert(table, datas , keys=None):
if not keys:
keys = datas[0].keys()
pair = dict(zip(keys, ["%s"] * len(keys)))
params = [[data[key] for key in keys] for data in datas]
return query.insert(table, pair), params
def update(table, data, where=None, keys=None):
if keys:
data = {key:data[key] for key in keys}
return query.update(table, where=where, set=data)
def multiUpdate(table, datas, where=None, keys=None):
if not keys:
keys = datas[0].keys()
pair = dict(zip(keys, ["%s"] * len(keys)))
params = [[data[key] for key in keys] for data in datas]
return query.update(table, pair), params
def join(table, on=None, using=None, _type='left'):
return query.join(table, on=on, using=using, type=_type)
def select(table, columns=None, where=None, joins=None, order=None, group=None, limit=None, offset=None):
return query.select(table, columns=columns, where=where, joins=joins, order_by=order, group_by=group, limit=limit, offset=offset)
def delete(table, where):
return query.delete(table, where)
def sqlAnd(where, conmap={}, keys=None):
new_dict = {}
if not keys:
keys = where.keys()
for key in keys:
if key in conmap:
new_dict[(key, conmap[key])] = where[key]
else:
new_dict[key] = where[key]
return util.build_where(new_dict)
def sqlOr(where, conmap={}, keys=None, andpart=None):
new_list = []
if not keys:
keys = where.keys()
for key in keys:
if key in conmap:
new_list.append({(key, conmap[key]):where[key]})
else:
new_list.append({key:where[key]})
if andpart:
new_list.append(andpart)
return util.or_(new_list)
def raw(string):
return util.value(util.raw(string))
def value(value):
return util.value(value)
|
[
"jeanheo@foxmail.com"
] |
jeanheo@foxmail.com
|
e0e18357155d553b59b57f813ce1be268b44de94
|
386f597647a09ed0a65ff56af746a2f8f70ff6c5
|
/bus_card.py
|
fb87c268f73808d98e1eca0ce733541e038d3f4f
|
[] |
no_license
|
baixiao9/self-python-practice
|
3332caa6c809d3463a5e58faf09f191fa33919e3
|
df7bbeafecb24738924e51ec3c8873e790ef725f
|
refs/heads/master
| 2020-03-21T07:11:32.069032
| 2018-06-26T09:01:01
| 2018-06-26T09:01:01
| 138,261,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
#-*- coding: utf-8 -*-
# 2018-06-22
# 公交卡计算
sum = 0
for _ in range(30):
for i in [2.5,5]:
if sum < 100:
sum = i + sum
elif 100<=sum<150:
sum = 0.8*i + sum
elif sum>= 150:
sum = 0.5*i +sum
print('Totally cost:%s' %(sum))
|
[
"40483495+baixiao9@users.noreply.github.com"
] |
40483495+baixiao9@users.noreply.github.com
|
5be45d425ef04a70bea62b97cbcccc6dacd9675b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/135/usersdata/179/46636/submittedfiles/OBI.py
|
6f6a0899cabeb0aa77f1ee146c9a5cc9b55abaca
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
# -*- coding: utf-8 -*-
n=int(input('digite n :'))
p=int(input('digite p :'))
cont=0
i=1
while i>=n:
x=int(input('digite x :'))
y=int(input('digite y :'))
if (x+y)>=p:
cont=cont+1
i=i-1
print(cont)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
f2852592f4337fabff8b02e1967870e5387ff334
|
159c9d6d8132b06b0e9cdbfd2412c98755f9242e
|
/loginUi.py
|
e128bc646a7ccdf554e754daf28a959653fa0b80
|
[] |
no_license
|
leodpj/loginAPP
|
4de1c6f60515b02eeffc6af08ad9e05fa716cbc7
|
47387d66697e60d3f33c25a017ef2e2981510f6d
|
refs/heads/master
| 2023-07-17T15:49:20.489406
| 2021-08-13T23:00:16
| 2021-08-13T23:00:16
| 395,821,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,019
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'login2.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(729, 549)
Form.setWindowFlag(QtCore.Qt.FramelessWindowHint)
Form.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.widget = QtWidgets.QWidget(Form)
self.widget.setGeometry(QtCore.QRect(20, 20, 590, 420))
self.widget.setStyleSheet("QPushButton#pushButton{\n"
" background-color:rgba(85, 98, 112, 255);\n"
" color:rgba(255, 255, 255, 200);\n"
" border-radius:5px;\n"
"}\n"
"QPushButton#pushButton:pressed{\n"
" padding-left:5px;\n"
" padding-top:5px;\n"
" backgroud-color:rgba(255, 107, 107, 255);\n"
" background-position:calc(100% - 10px)center;\n"
"}\n"
"QPushButton#pushButton:hover{\n"
" background-color:rgba(255, 107, 107, 255);\n"
"}")
self.widget.setObjectName("widget")
self.label = QtWidgets.QLabel(self.widget)
self.label.setGeometry(QtCore.QRect(290, 40, 260, 330))
self.label.setStyleSheet("background-color:rgba(255, 255, 255, 255);\n"
"border-radius:10px;")
self.label.setText("")
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.widget)
self.label_2.setGeometry(QtCore.QRect(40, 25, 270, 360))
self.label_2.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(85, 98, 112, 255), stop:1 rgba(255, 107, 107, 255)); \n"
"border-radius:10px;")
self.label_2.setText("")
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.widget)
self.label_3.setGeometry(QtCore.QRect(330, 80, 101, 31))
font = QtGui.QFont()
font.setPointSize(15)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setStyleSheet("color:rgba(0, 0, 0, 200); ")
self.label_3.setObjectName("label_3")
self.lineEdit = QtWidgets.QLineEdit(self.widget)
self.lineEdit.setGeometry(QtCore.QRect(330, 140, 190, 40))
font = QtGui.QFont()
font.setPointSize(9)
self.lineEdit.setFont(font)
self.lineEdit.setStyleSheet("background-color:rgba(0, 0, 0, 0);\n"
"border:2px solid rgba(0, 0, 0, 0);\n"
"border-bottom-color:rgba(46, 82, 101, 200);\n"
"color:rgb(0, 0, 0);\n"
"padding-bottom:7px;")
self.lineEdit.setObjectName("lineEdit")
self.lineEdit_2 = QtWidgets.QLineEdit(self.widget)
self.lineEdit_2.setGeometry(QtCore.QRect(330, 200, 190, 40))
font = QtGui.QFont()
font.setPointSize(9)
self.lineEdit_2.setFont(font)
self.lineEdit_2.setStyleSheet("background-color:rgba(0, 0, 0, 0);\n"
"border:2px solid rgba(0, 0, 0, 0);\n"
"border-bottom-color:rgba(46, 82, 101, 200);\n"
"color:rgb(0, 0, 0);\n"
"padding-bottom:7px;")
self.lineEdit_2.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEdit_2.setObjectName("lineEdit_2")
self.pushButton = QtWidgets.QPushButton(self.widget)
self.pushButton.setGeometry(QtCore.QRect(330, 280, 190, 40))
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.pushButton.setFont(font)
self.pushButton.setStyleSheet("")
self.pushButton.setObjectName("pushButton")
self.label_4 = QtWidgets.QLabel(self.widget)
self.label_4.setGeometry(QtCore.QRect(340, 330, 191, 16))
self.label_4.setStyleSheet("color:rgba(0, 0, 0, 200);")
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.widget)
self.label_5.setGeometry(QtCore.QRect(60, 50, 141, 41))
font = QtGui.QFont()
font.setPointSize(22)
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setStyleSheet("color:rgba(255, 255, 255, 200);")
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.widget)
self.label_6.setGeometry(QtCore.QRect(60, 110, 231, 51))
font = QtGui.QFont()
font.setPointSize(10)
self.label_6.setFont(font)
self.label_6.setStyleSheet("color:rgba(255, 255, 255, 220);")
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(self.widget)
self.label_7.setGeometry(QtCore.QRect(50, 170, 251, 201))
font = QtGui.QFont()
font.setFamily("Mountain")
font.setPointSize(150)
self.label_7.setFont(font)
self.label_7.setStyleSheet("color:rgba(255, 107, 107, 255);")
self.label_7.setObjectName("label_7")
self.label.setGraphicsEffect(QtWidgets.QGraphicsDropShadowEffect(blurRadius=25, xOffset=0, yOffset=0))
self.label_2.setGraphicsEffect(QtWidgets.QGraphicsDropShadowEffect(blurRadius=25, xOffset=0, yOffset=0))
self.pushButton.setGraphicsEffect(QtWidgets.QGraphicsDropShadowEffect(blurRadius=25, xOffset=3, yOffset=3))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label_3.setText(_translate("Form", "Log In"))
self.lineEdit.setPlaceholderText(_translate("Form", " User Name"))
self.lineEdit_2.setPlaceholderText(_translate("Form", " Password"))
self.pushButton.setText(_translate("Form", "L o g I n"))
self.label_4.setText(_translate("Form", "Forgot Your User Name or Password?"))
self.label_5.setText(_translate("Form", "LDPJ Dev"))
self.label_6.setText(_translate("Form", "Ola, \n"
"Bem Vindo ao mundo Dev"))
self.label_7.setText(_translate("Form", "-"))
|
[
"leodpj@gmail.com"
] |
leodpj@gmail.com
|
6f1ef249d90f148ff63557d7c2fb9305368862ab
|
6fb24d8425ece3b02683031fa445ae4041ae8150
|
/templates/clasificacion-de-bosque/clasificacion-de-bosque_1.1.py
|
ec58d2f8cd7d804c49ebb0ecc038c458b87b1bcb
|
[] |
no_license
|
OpenDatacubeIDEAM/cdcol-workflows
|
60c0fad713e09659cf36de238288e5956b655971
|
dac26a5bd22e0796d6bb9b015dc8841532c7d409
|
refs/heads/master
| 2021-06-25T19:04:55.196501
| 2021-02-27T15:14:29
| 2021-02-27T15:14:29
| 204,059,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,218
|
py
|
import airflow
from airflow.models import DAG
from airflow.operators import CDColQueryOperator, CDColFromFileOperator, CDColReduceOperator
from airflow.operators.python_operator import PythonOperator
from cdcol_utils import dag_utils, queue_utils, other_utils
from airflow.utils.trigger_rule import TriggerRule
from datetime import timedelta
from pprint import pprint
_params = {{params}}
_steps = {
'ndvi': {
'algorithm': "ndvi-wf",
'version': '1.0',
'queue': queue_utils.assign_queue(),
'params': {},
'del_prev_result': _params['elimina_resultados_anteriores'],
},
'bosque': {
'algorithm': "bosque-no-bosque-wf",
'version': '1.0',
'queue': queue_utils.assign_queue(),
'params': {
'ndvi_threshold': _params['ndvi_threshold'],
'vegetation_rate': _params['vegetation_rate'],
'slice_size': _params['slice_size']
},
'del_prev_result': _params['elimina_resultados_anteriores'],
},
'mosaico': {
'algorithm': "joiner",
'version': '1.0',
'queue': queue_utils.assign_queue(
input_type='multi_area',
lat=_params['lat'],
lon=_params['lon']
),
'params': {},
'del_prev_result': _params['elimina_resultados_anteriores'],
}
}
args = {
'owner': _params['owner'],
'start_date': airflow.utils.dates.days_ago(2),
'execID': _params['execID'],
'product':_params['products'][0]
}
dag = DAG(
dag_id=args["execID"],
default_args=args,
schedule_interval=None,
dagrun_timeout=timedelta(minutes=20)
)
ndvi = dag_utils.queryMapByTile(
lat=_params['lat'],
lon=_params['lon'],
product=_params['products'][0],
time_ranges=_params['time_ranges'][0],
algorithm=_steps['ndvi']['algorithm'],
version=_steps['ndvi']['version'],
params=_steps['ndvi']['params'],
queue=_steps['ndvi']['queue'],
delete_partial_results=_steps['ndvi']['del_prev_result'],
dag=dag,
task_id="ndvi",
to_tiff= not (_params['genera_mosaico'] and queue_utils.get_tiles(_params['lat'],_params['lon'])>1)
)
bosque = dag_utils.IdentityMap(
ndvi,
algorithm=_steps['bosque']['algorithm'],
product=_params['products'][0],
version=_steps['bosque']['version'],
params=_steps['bosque']['params'],
queue=_steps['bosque']['queue'],
delete_partial_results=_steps['ndvi']['del_prev_result'],
dag=dag,
task_id="bosque", to_tiff= not( _params['genera_mosaico'] and queue_utils.get_tiles(_params['lat'],_params['lon'])>1)
)
workflow = bosque
if _params['genera_mosaico'] and queue_utils.get_tiles(_params['lat'],_params['lon'])>1:
mosaico = dag_utils.OneReduce(
workflow, task_id="mosaic",
algorithm=_steps['mosaico']['algorithm'],
version=_steps['mosaico']['version'],
queue=_steps['mosaico']['queue'],
delete_partial_results=_steps['mosaico']['del_prev_result'],
trigger_rule=TriggerRule.NONE_FAILED,
dag=dag,
to_tiff=True
)
workflow = mosaico
workflow
|
[
"aa.vivas@uniandes.edu.co"
] |
aa.vivas@uniandes.edu.co
|
c97056cd28f7aa533c943b339d3e36084c0704e9
|
9d29ca19feddfb774e990ccef6903206ecdb4ea1
|
/src/binarize_coco_data.py
|
437c8d664a54138e3b9ffa2c22fcb71e1e8c92ef
|
[] |
no_license
|
rasoolims/ImageTranslate
|
180f5d6c310f7eb028bc3246e12ff7a5ab7b4fa8
|
51593a845a95fa3d05fc722a7c6a33077ee267be
|
refs/heads/master
| 2023-06-23T14:21:13.985028
| 2022-09-29T18:57:06
| 2022-09-29T18:57:06
| 250,050,377
| 5
| 1
| null | 2023-06-12T21:28:44
| 2020-03-25T17:49:40
|
Python
|
UTF-8
|
Python
| false
| false
| 3,163
|
py
|
import json
import marshal
from optparse import OptionParser
from textprocessor import TextProcessor
id2path = lambda path: "".join(["".join((12 - len(path)) * ["0"]), path, ".jpg"])
caption_format = lambda caption: " ".join(["<en>", caption, "</s>"])
caption_data = lambda annotation: (id2path(str(annotation["image_id"])), caption_format(annotation["caption"]))
def write(text_processor: TextProcessor, output_file: str, input_file: str, max_len: int, sample_size: int):
with open(input_file, "r") as r:
obj = json.load(r)
annotations = obj["annotations"]
captions = list(map(lambda annotation: caption_data(annotation), annotations))
print(len(captions))
skipped_long_sens = 0
image_path_dict, unique_images = dict(), dict()
tok_captions = {}
image_ids = {}
for ci, c in enumerate(captions):
if ci % 1000 == 0:
print(ci, "/", len(captions), "->", len(tok_captions), len(unique_images), end="\r")
tok_sen = text_processor.tokenize_one_sentence(c[1])
if len(tok_sen) > max_len:
skipped_long_sens += 1
continue
path = c[0]
if path not in image_path_dict:
image_id = len(unique_images)
unique_images[image_id] = path
image_path_dict[path] = image_id
elif path in image_path_dict:
image_id = image_path_dict[path]
unique_images[image_id] = path
caption_id = len(tok_captions)
tok_captions[caption_id] = tok_sen
image_ids[caption_id] = image_id
if (ci + 1) >= sample_size and sample_size > 0:
break
print("Skipped long sentences:", skipped_long_sens, "from", len(captions))
tok_captions_sorted = sorted(tok_captions.items(), key=lambda item: len(item[1]))
caption_sorted = list(map(lambda e: (image_ids[e[0]], e[1]), tok_captions_sorted))
print("Longest sentence", len(tok_captions_sorted[-1][1]))
with open(output_file, "wb") as wfp:
marshal.dump((unique_images, caption_sorted), wfp)
print("Dumped", len(caption_sorted), "captions from", len(unique_images), "unique images")
def get_options():
global options
parser = OptionParser()
parser.add_option("--file", dest="file", help="Which files to use", metavar="FILE", default=None)
parser.add_option("--output", dest="output_file", help="Output pickle file.", metavar="FILE", default=None)
parser.add_option("--tok", dest="tokenizer_path", help="Path to the tokenizer folder", metavar="FILE", default=None)
parser.add_option("--max-len", dest="max_len", help="Maximum tokenized caption length", type="int", default=256)
parser.add_option("--sample", dest="sample_size", type="int", default=-1)
(options, args) = parser.parse_args()
return options
if __name__ == "__main__":
options = get_options()
tokenizer = TextProcessor(options.tokenizer_path)
print("Writing batches")
write(text_processor=tokenizer,
output_file=options.output_file,
input_file=options.file,
max_len=options.max_len,
sample_size=options.sample_size)
print("Finished")
|
[
"rasooli.ms@gmail.com"
] |
rasooli.ms@gmail.com
|
941bed558f06a3184463dd125586129fc4b7b9ee
|
67117705720a3e3d81253ba48c1826d36737b126
|
/Wk8_STRANDS/error_ks_2samples.py
|
f805256dfd4811d66b28e65afcfa106bdf8499ee
|
[] |
no_license
|
pyliut/Rokos2021
|
41f0f96bc396b6e8a5e268e31a38a4a4b288c370
|
70753ab29afc45766eb502f91b65cc455e6055e1
|
refs/heads/main
| 2023-08-13T17:29:30.013829
| 2021-09-26T19:01:35
| 2021-09-26T19:01:35
| 382,092,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,811
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 16:27:34 2021
@author: pyliu
"""
import numpy as np
from integrate_pdf import *
import matplotlib.pyplot as plt
def error_ks_2samples(t_obs1, t_obs2, plot_graph = True):
"""
Calculate Kolmogorov-Smirnov Distance of between 2 samples
Parameters
----------
t_obs1 : FLOAT, vector
Set 1 of observed durations
t_obs1 : FLOAT, vector
Set 2 of observed durations
Returns
-------
D : FLOAT
K-S statistic
supremum (Greatest Lower Bound) of distances between the empirical CDF of the observations and CDF of the predicted distribution
"""
#1) create histogram of observed values
n_bins = max(len(t_obs1), len(t_obs2))
range_min = 0
range_max = np.max( [np.max(t_obs1), np.max(t_obs2)] )
range_max = 2*(range_max//5)*5 + 5
p_hist1, t_hist1 = np.histogram(t_obs1, density = True, bins = n_bins, range = (range_min, range_max) );
p_hist2, t_hist2 = np.histogram(t_obs2, density = True, bins = n_bins, range = (range_min, range_max) );
#2) turn both observed & predicted pdfs in to cdfs
cdf_hist1 = integrate_pdf(p_hist1)
cdf_hist1 /= cdf_hist1[-1] #normalise cdf
cdf_hist2 = integrate_pdf(p_hist2)
cdf_hist2 /= cdf_hist2[-1] #normalise cdf
#3) Calculate max distance
D = np.max( np.abs(cdf_hist1 - cdf_hist2) )
#4) Plot for visualisation
if plot_graph == True:
plt.plot(t_hist1[:-1], cdf_hist1)
plt.plot(t_hist2[:-1], cdf_hist2)
plt.legend(["edge1", "edge2"])
plt.xlabel("Duration (s)")
plt.ylabel("probability")
plt.title( "K-S test: D = " + str( np.round(D,5) ) )
return D
|
[
"noreply@github.com"
] |
pyliut.noreply@github.com
|
7c0b6cc4bb467321b0cce421dfdd1146c1f3941f
|
a499fbdd93f85a286505433a08afc25d84c8ff04
|
/tests/python/unittest/test_tvmscript_roundtrip.py
|
7c123afdc4d0611bfeeb1358ef530686ba4a217b
|
[
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
elphinkuo/tvm
|
a81e0ccc5950a1473efdcdbb8263de9adbe36787
|
9df2ae8eaa8b394013182a7ad09ac57fe401f80e
|
refs/heads/main
| 2023-08-05T07:41:18.652097
| 2021-09-28T00:38:26
| 2021-09-28T00:38:26
| 411,311,927
| 2
| 0
|
Apache-2.0
| 2021-09-28T14:51:56
| 2021-09-28T14:17:46
| null |
UTF-8
|
Python
| false
| false
| 131,414
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import tvm
from tvm import tir
from tvm.script import ty
@tvm.script.tir
class Module1:
def mmult(A: ty.handle, B: ty.handle, C: ty.handle) -> None:
# function attr dict
tir.func_attr({"global_symbol": "mmult", "tir.noalias": True})
# buffer definition
C_global = tir.buffer_decl([1024, 1024], elem_offset=0, align=128, offset_factor=1)
packedB = tir.buffer_decl([32, 1024, 32], elem_offset=0, align=128, offset_factor=1)
A_1 = tir.match_buffer(A, [1024, 1024], elem_offset=0, align=128, offset_factor=1)
B_1 = tir.match_buffer(B, [1024, 1024], elem_offset=0, align=128, offset_factor=1)
C_1 = tir.match_buffer(C, [1024, 1024], elem_offset=0, align=128, offset_factor=1)
# body
tir.realize(packedB[0:32, 0:1024, 0:32], "")
for x in tir.parallel(0, 32):
for y in tir.serial(0, 1024):
for z in tir.vectorized(0, 32):
packedB[x, y, z] = B_1[y, ((x * 32) + z)]
tir.realize(C_1[0:1024, 0:1024], "")
for x_outer in tir.parallel(0, 32):
for y_outer in tir.serial(0, 32):
tir.realize(
C_global[
(x_outer * 32) : ((x_outer * 32) + 32),
(y_outer * 32) : ((y_outer * 32) + 32),
],
"global",
)
for x_c_init in tir.serial(0, 32):
for y_c_init in tir.vectorized(0, 32):
C_global[
(x_c_init + (x_outer * 32)), (y_c_init + (y_outer * 32))
] = tir.float32(0)
for k_outer in tir.serial(0, 256):
for x_c in tir.serial(0, 32):
for k_inner in tir.unroll(0, 4):
for y_c in tir.vectorized(0, 32):
C_global[(x_c + (x_outer * 32)), (y_c + (y_outer * 32))] = C_global[
(x_c + (x_outer * 32)), (y_c + (y_outer * 32))
] + (
A_1[(x_c + (x_outer * 32)), (k_inner + (k_outer * 4))]
* packedB[
tir.floordiv((y_c + (y_outer * 32)), 32),
(k_inner + (k_outer * 4)),
tir.floormod((y_c + (y_outer * 32)), 32),
]
)
for x_inner in tir.serial(0, 32):
for y_inner in tir.serial(0, 32):
C_1[(x_inner + (x_outer * 32)), (y_inner + (y_outer * 32))] = C_global[
(x_inner + (x_outer * 32)), (y_inner + (y_outer * 32))
]
def test_opt_gemm_normalize():
mod = Module1()
rt_mod = tvm.script.from_source(tvm.script.asscript(mod, True))
tvm.ir.assert_structural_equal(mod, rt_mod, True)
@tvm.script.tir
class Module2:
def mmult(A: ty.handle, B: ty.handle, C: ty.handle) -> None:
# function attr dict
tir.func_attr({"global_symbol": "mmult", "tir.noalias": True})
A_1 = tir.match_buffer(A, [1024, 1024], elem_offset=0, align=128, offset_factor=1)
B_1 = tir.match_buffer(B, [1024, 1024], elem_offset=0, align=128, offset_factor=1)
C_1 = tir.match_buffer(C, [1024, 1024], elem_offset=0, align=128, offset_factor=1)
# body
packedB = tir.allocate([32768], "float32x32", "global")
for x in tir.parallel(0, 32):
for y in tir.serial(0, 1024):
tir.store(
packedB,
tir.ramp(((x * 32768) + (y * 32)), 1, 32),
tir.load(
"float32x32",
B_1.data,
tir.ramp(((y * 1024) + (x * 32)), 1, 32),
tir.broadcast(True, 32),
),
tir.broadcast(True, 32),
)
for x_outer in tir.parallel(0, 32):
C_global = tir.allocate([1024], "float32", "global")
for y_outer in tir.serial(0, 32):
for x_c_init in tir.serial(0, 32):
tir.store(
C_global,
tir.ramp((x_c_init * 32), 1, 32),
tir.broadcast(tir.float32(0), 32),
tir.broadcast(True, 32),
)
for k_outer in tir.serial(0, 256):
for x_c in tir.serial(0, 32):
tir.store(
C_global,
tir.ramp((x_c * 32), 1, 32),
(
tir.load(
"float32x32",
C_global,
tir.ramp((x_c * 32), 1, 32),
tir.broadcast(True, 32),
)
+ (
tir.broadcast(
tir.load(
"float32",
A_1.data,
(((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4)),
),
32,
)
* tir.load(
"float32x32",
packedB,
tir.ramp(((y_outer * 32768) + (k_outer * 128)), 1, 32),
tir.broadcast(True, 32),
)
)
),
tir.broadcast(True, 32),
)
tir.store(
C_global,
tir.ramp((x_c * 32), 1, 32),
(
tir.load(
"float32x32",
C_global,
tir.ramp((x_c * 32), 1, 32),
tir.broadcast(True, 32),
)
+ (
tir.broadcast(
tir.load(
"float32",
A_1.data,
(
(((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4))
+ 1
),
),
32,
)
* tir.load(
"float32x32",
packedB,
tir.ramp(
(((y_outer * 32768) + (k_outer * 128)) + 32), 1, 32
),
tir.broadcast(True, 32),
)
)
),
tir.broadcast(True, 32),
)
tir.store(
C_global,
tir.ramp((x_c * 32), 1, 32),
(
tir.load(
"float32x32",
C_global,
tir.ramp((x_c * 32), 1, 32),
tir.broadcast(True, 32),
)
+ (
tir.broadcast(
tir.load(
"float32",
A_1.data,
(
(((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4))
+ 2
),
),
32,
)
* tir.load(
"float32x32",
packedB,
tir.ramp(
(((y_outer * 32768) + (k_outer * 128)) + 64), 1, 32
),
tir.broadcast(True, 32),
)
)
),
tir.broadcast(True, 32),
)
tir.store(
C_global,
tir.ramp((x_c * 32), 1, 32),
(
tir.load(
"float32x32",
C_global,
tir.ramp((x_c * 32), 1, 32),
tir.broadcast(True, 32),
)
+ (
tir.broadcast(
tir.load(
"float32",
A_1.data,
(
(((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4))
+ 3
),
),
32,
)
* tir.load(
"float32x32",
packedB,
tir.ramp(
(((y_outer * 32768) + (k_outer * 128)) + 96), 1, 32
),
tir.broadcast(True, 32),
)
)
),
tir.broadcast(True, 32),
)
for x_inner in tir.serial(0, 32):
for y_inner in tir.serial(0, 32):
C_1.data[
((((x_outer * 32768) + (x_inner * 1024)) + (y_outer * 32)) + y_inner)
] = tir.load("float32", C_global, ((x_inner * 32) + y_inner))
def test_opt_gemm_lower():
mod = Module2()
rt_mod = tvm.script.from_source(tvm.script.asscript(mod, True))
tvm.ir.assert_structural_equal(mod, rt_mod, True)
@tvm.script.tir
class Module3:
def mmult(
args: ty.handle,
arg_type_ids: ty.handle,
num_args: ty.int32,
out_ret_value: ty.handle,
out_ret_tcode: ty.handle,
) -> ty.int32:
# function attr dict
tir.func_attr(
{
"tir.noalias": True,
"global_symbol": "mmult",
"tir.is_entry_func": True,
"calling_conv": 1,
}
)
# var definition
C_global = tir.buffer_var("float32", "global")
packedB = tir.buffer_var("float32", "global")
# body
assert num_args == 3, "mmult: num_args should be 3"
arg0: ty.handle = tir.tvm_struct_get(args, 0, 12, dtype="handle")
arg0_code: ty.int32 = tir.load("int32", arg_type_ids, 0)
arg1: ty.handle = tir.tvm_struct_get(args, 1, 12, dtype="handle")
arg1_code: ty.int32 = tir.load("int32", arg_type_ids, 1)
arg2: ty.handle = tir.tvm_struct_get(args, 2, 12, dtype="handle")
arg2_code: ty.int32 = tir.load("int32", arg_type_ids, 2)
A: ty.handle = tir.tvm_struct_get(arg0, 0, 1, dtype="handle")
tir.attr(A, "storage_alignment", 128)
arg0_shape: ty.handle = tir.tvm_struct_get(arg0, 0, 2, dtype="handle")
arg0_strides: ty.handle = tir.tvm_struct_get(arg0, 0, 3, dtype="handle")
dev_id: ty.int32 = tir.tvm_struct_get(arg0, 0, 9, dtype="int32")
B: ty.handle = tir.tvm_struct_get(arg1, 0, 1, dtype="handle")
tir.attr(B, "storage_alignment", 128)
arg1_shape: ty.handle = tir.tvm_struct_get(arg1, 0, 2, dtype="handle")
arg1_strides: ty.handle = tir.tvm_struct_get(arg1, 0, 3, dtype="handle")
C: ty.handle = tir.tvm_struct_get(arg2, 0, 1, dtype="handle")
tir.attr(C, "storage_alignment", 128)
arg2_shape: ty.handle = tir.tvm_struct_get(arg2, 0, 2, dtype="handle")
arg2_strides: ty.handle = tir.tvm_struct_get(arg2, 0, 3, dtype="handle")
assert (((arg0_code == 3) or (arg0_code == 13)) or (arg0_code == 7)) or (
arg0_code == 4
), "mmult: Expect arg[0] to be pointer"
assert (((arg1_code == 3) or (arg1_code == 13)) or (arg1_code == 7)) or (
arg1_code == 4
), "mmult: Expect arg[1] to be pointer"
assert (((arg2_code == 3) or (arg2_code == 13)) or (arg2_code == 7)) or (
arg2_code == 4
), "mmult: Expect arg[2] to be pointer"
assert 2 == tir.tvm_struct_get(
arg0, 0, 4, dtype="int32"
), "arg0.ndim is expected to equal 2"
assert 2 == tir.tvm_struct_get(
arg0, 0, 4, dtype="int32"
), "arg0.ndim is expected to equal 2"
assert (
(tir.tvm_struct_get(arg0, 0, 5, dtype="uint8") == tir.uint8(2))
and (tir.tvm_struct_get(arg0, 0, 6, dtype="uint8") == tir.uint8(32))
) and (
tir.tvm_struct_get(arg0, 0, 7, dtype="uint16") == tir.uint16(1)
), "arg0.dtype is expected to be float32"
assert 1024 == tir.cast(
tir.load("int64", arg0_shape, 0), "int32"
), "Argument arg0.shape[0] has an unsatisfied constraint"
assert 1024 == tir.cast(
tir.load("int64", arg0_shape, 1), "int32"
), "Argument arg0.shape[1] has an unsatisfied constraint"
if not (tir.isnullptr(arg0_strides, dtype="bool")):
assert (1 == tir.cast(tir.load("int64", arg0_strides, 1), "int32")) and (
1024 == tir.cast(tir.load("int64", arg0_strides, 0), "int32")
), "arg0.strides: expected to be compact array"
tir.evaluate(0)
assert tir.uint64(0) == tir.tvm_struct_get(
arg0, 0, 8, dtype="uint64"
), "Argument arg0.byte_offset has an unsatisfied constraint"
assert 1 == tir.tvm_struct_get(
arg0, 0, 10, dtype="int32"
), "Argument arg0.device_type has an unsatisfied constraint"
assert 2 == tir.tvm_struct_get(
arg1, 0, 4, dtype="int32"
), "arg1.ndim is expected to equal 2"
assert 2 == tir.tvm_struct_get(
arg1, 0, 4, dtype="int32"
), "arg1.ndim is expected to equal 2"
assert (
(tir.tvm_struct_get(arg1, 0, 5, dtype="uint8") == tir.uint8(2))
and (tir.tvm_struct_get(arg1, 0, 6, dtype="uint8") == tir.uint8(32))
) and (
tir.tvm_struct_get(arg1, 0, 7, dtype="uint16") == tir.uint16(1)
), "arg1.dtype is expected to be float32"
assert 1024 == tir.cast(
tir.load("int64", arg1_shape, 0), "int32"
), "Argument arg1.shape[0] has an unsatisfied constraint"
assert 1024 == tir.cast(
tir.load("int64", arg1_shape, 1), "int32"
), "Argument arg1.shape[1] has an unsatisfied constraint"
if not (tir.isnullptr(arg1_strides, dtype="bool")):
assert (1 == tir.cast(tir.load("int64", arg1_strides, 1), "int32")) and (
1024 == tir.cast(tir.load("int64", arg1_strides, 0), "int32")
), "arg1.strides: expected to be compact array"
tir.evaluate(0)
assert tir.uint64(0) == tir.tvm_struct_get(
arg1, 0, 8, dtype="uint64"
), "Argument arg1.byte_offset has an unsatisfied constraint"
assert 1 == tir.tvm_struct_get(
arg1, 0, 10, dtype="int32"
), "Argument arg1.device_type has an unsatisfied constraint"
assert dev_id == tir.tvm_struct_get(
arg1, 0, 9, dtype="int32"
), "Argument arg1.device_id has an unsatisfied constraint"
assert 2 == tir.tvm_struct_get(
arg2, 0, 4, dtype="int32"
), "arg2.ndim is expected to equal 2"
assert 2 == tir.tvm_struct_get(
arg2, 0, 4, dtype="int32"
), "arg2.ndim is expected to equal 2"
assert (
(tir.tvm_struct_get(arg2, 0, 5, dtype="uint8") == tir.uint8(2))
and (tir.tvm_struct_get(arg2, 0, 6, dtype="uint8") == tir.uint8(32))
) and (
tir.tvm_struct_get(arg2, 0, 7, dtype="uint16") == tir.uint16(1)
), "arg2.dtype is expected to be float32"
assert 1024 == tir.cast(
tir.load("int64", arg2_shape, 0), "int32"
), "Argument arg2.shape[0] has an unsatisfied constraint"
assert 1024 == tir.cast(
tir.load("int64", arg2_shape, 1), "int32"
), "Argument arg2.shape[1] has an unsatisfied constraint"
if not (tir.isnullptr(arg2_strides, dtype="bool")):
assert (1 == tir.cast(tir.load("int64", arg2_strides, 1), "int32")) and (
1024 == tir.cast(tir.load("int64", arg2_strides, 0), "int32")
), "arg2.strides: expected to be compact array"
tir.evaluate(0)
assert tir.uint64(0) == tir.tvm_struct_get(
arg2, 0, 8, dtype="uint64"
), "Argument arg2.byte_offset has an unsatisfied constraint"
assert 1 == tir.tvm_struct_get(
arg2, 0, 10, dtype="int32"
), "Argument arg2.device_type has an unsatisfied constraint"
assert dev_id == tir.tvm_struct_get(
arg2, 0, 9, dtype="int32"
), "Argument arg2.device_id has an unsatisfied constraint"
tir.attr(0, "compute_scope", "mmult_compute_")
tir.attr(packedB, "storage_scope", "global")
tir.attr(packedB, "storage_alignment", 128)
with tir.let(
packedB,
tir.TVMBackendAllocWorkspace(1, dev_id, tir.uint64(4194304), 2, 32, dtype="handle"),
):
if tir.isnullptr(packedB, dtype="bool"):
tir.evaluate(tir.tvm_throw_last_error(dtype="int32"))
for x in tir.parallel(0, 32):
for y in tir.serial(0, 1024):
tir.store(
packedB,
tir.ramp(((x * 32768) + (y * 32)), 1, 32),
tir.load(
"float32x32",
B,
tir.ramp(((y * 1024) + (x * 32)), 1, 32),
tir.broadcast(True, 32),
),
tir.broadcast(True, 32),
)
for x_outer in tir.parallel(0, 32):
tir.attr(C_global, "storage_scope", "global")
tir.attr(C_global, "storage_alignment", 128)
with tir.let(
C_global,
tir.TVMBackendAllocWorkspace(
1, dev_id, tir.uint64(4096), 2, 32, dtype="handle"
),
):
if tir.isnullptr(C_global, dtype="bool"):
tir.evaluate(tir.tvm_throw_last_error(dtype="int32"))
for y_outer in tir.serial(0, 32):
for x_c_init in tir.serial(0, 32):
tir.store(
C_global,
tir.ramp((x_c_init * 32), 1, 32),
tir.broadcast(tir.float32(0), 32),
tir.broadcast(True, 32),
)
for k_outer in tir.serial(0, 256):
for x_c in tir.serial(0, 32):
tir.store(
C_global,
tir.ramp((x_c * 32), 1, 32),
tir.call_llvm_pure_intrin(
tir.uint32(97),
tir.uint32(3),
tir.broadcast(
tir.load(
"float32",
A,
(
((x_outer * 32768) + (x_c * 1024))
+ (k_outer * 4)
),
),
32,
),
tir.load(
"float32x32",
packedB,
tir.ramp(((y_outer * 32768) + (k_outer * 128)), 1, 32),
tir.broadcast(True, 32),
),
tir.load(
"float32x32",
C_global,
tir.ramp((x_c * 32), 1, 32),
tir.broadcast(True, 32),
),
dtype="float32x32",
),
tir.broadcast(True, 32),
)
tir.store(
C_global,
tir.ramp((x_c * 32), 1, 32),
tir.call_llvm_pure_intrin(
tir.uint32(97),
tir.uint32(3),
tir.broadcast(
tir.load(
"float32",
A,
(
(
((x_outer * 32768) + (x_c * 1024))
+ (k_outer * 4)
)
+ 1
),
),
32,
),
tir.load(
"float32x32",
packedB,
tir.ramp(
(((y_outer * 32768) + (k_outer * 128)) + 32), 1, 32
),
tir.broadcast(True, 32),
),
tir.load(
"float32x32",
C_global,
tir.ramp((x_c * 32), 1, 32),
tir.broadcast(True, 32),
),
dtype="float32x32",
),
tir.broadcast(True, 32),
)
tir.store(
C_global,
tir.ramp((x_c * 32), 1, 32),
tir.call_llvm_pure_intrin(
tir.uint32(97),
tir.uint32(3),
tir.broadcast(
tir.load(
"float32",
A,
(
(
((x_outer * 32768) + (x_c * 1024))
+ (k_outer * 4)
)
+ 2
),
),
32,
),
tir.load(
"float32x32",
packedB,
tir.ramp(
(((y_outer * 32768) + (k_outer * 128)) + 64), 1, 32
),
tir.broadcast(True, 32),
),
tir.load(
"float32x32",
C_global,
tir.ramp((x_c * 32), 1, 32),
tir.broadcast(True, 32),
),
dtype="float32x32",
),
tir.broadcast(True, 32),
)
tir.store(
C_global,
tir.ramp((x_c * 32), 1, 32),
tir.call_llvm_pure_intrin(
tir.uint32(97),
tir.uint32(3),
tir.broadcast(
tir.load(
"float32",
A,
(
(
((x_outer * 32768) + (x_c * 1024))
+ (k_outer * 4)
)
+ 3
),
),
32,
),
tir.load(
"float32x32",
packedB,
tir.ramp(
(((y_outer * 32768) + (k_outer * 128)) + 96), 1, 32
),
tir.broadcast(True, 32),
),
tir.load(
"float32x32",
C_global,
tir.ramp((x_c * 32), 1, 32),
tir.broadcast(True, 32),
),
dtype="float32x32",
),
tir.broadcast(True, 32),
)
for x_inner in tir.serial(0, 32):
for y_inner in tir.serial(0, 32):
C[
(
(((x_outer * 32768) + (x_inner * 1024)) + (y_outer * 32))
+ y_inner
)
] = tir.load("float32", C_global, ((x_inner * 32) + y_inner))
if tir.TVMBackendFreeWorkspace(1, dev_id, C_global, dtype="int32") != 0:
tir.evaluate(tir.tvm_throw_last_error(dtype="int32"))
if tir.TVMBackendFreeWorkspace(1, dev_id, packedB, dtype="int32") != 0:
tir.evaluate(tir.tvm_throw_last_error(dtype="int32"))
def test_opt_gemm_mod_host():
mod = Module3()
rt_mod = tvm.script.from_source(tvm.script.asscript(mod, True))
tvm.ir.assert_structural_equal(mod, rt_mod, True)
@tvm.script.tir
def opt_conv_tensorcore_normalize(A: ty.handle, W: ty.handle, Conv: ty.handle) -> None:
# function attr dict
tir.func_attr({"global_symbol": "default_function", "tir.noalias": True})
# var definition
bx = tir.env_thread("blockIdx.x")
by = tir.env_thread("blockIdx.y")
bz = tir.env_thread("blockIdx.z")
tx = tir.env_thread("threadIdx.x")
ty = tir.env_thread("threadIdx.y")
tz = tir.env_thread("threadIdx.z")
# buffer definition
Apad_shared = tir.buffer_decl(
[16, 16, 16, 16, 16, 16], dtype="float16", elem_offset=0, align=128, offset_factor=1
)
Apad_shared_wmma_matrix_a = tir.buffer_decl(
[16, 16, 16, 16, 16, 16], dtype="float16", elem_offset=0, align=128, offset_factor=1
)
BA = tir.buffer_decl(
[16, 16], dtype="float16", scope="wmma.matrix_a", align=32, offset_factor=256
)
BB = tir.buffer_decl(
[16, 16], dtype="float16", scope="wmma.matrix_b", align=32, offset_factor=256
)
BC = tir.buffer_decl([16, 16], scope="wmma.accumulator", align=32, offset_factor=256)
Conv_wmma_accumulator = tir.buffer_decl(
[16, 14, 14, 32, 16, 16], elem_offset=0, align=128, offset_factor=1
)
W_shared = tir.buffer_decl(
[3, 3, 16, 32, 16, 16], dtype="float16", elem_offset=0, align=128, offset_factor=1
)
W_shared_wmma_matrix_b = tir.buffer_decl(
[3, 3, 16, 32, 16, 16], dtype="float16", elem_offset=0, align=128, offset_factor=1
)
buffer = tir.buffer_decl([16, 16], dtype="float16", scope="shared", align=32, offset_factor=256)
buffer_1 = tir.buffer_decl(
[16, 16], dtype="float16", scope="wmma.matrix_a", align=32, offset_factor=256
)
buffer_2 = tir.buffer_decl(
[16, 16], dtype="float16", scope="shared", align=32, offset_factor=256
)
buffer_3 = tir.buffer_decl(
[16, 16], dtype="float16", scope="wmma.matrix_b", align=32, offset_factor=256
)
buffer_4 = tir.buffer_decl([16, 16], scope="wmma.accumulator", align=32, offset_factor=256)
buffer_5 = tir.buffer_decl([16, 16], align=32, offset_factor=256)
A_1 = tir.match_buffer(
A, [16, 14, 14, 16, 16, 16], dtype="float16", elem_offset=0, align=128, offset_factor=1
)
W_1 = tir.match_buffer(
W, [3, 3, 16, 32, 16, 16], dtype="float16", elem_offset=0, align=128, offset_factor=1
)
Conv_1 = tir.match_buffer(
Conv, [16, 14, 14, 32, 16, 16], elem_offset=0, align=128, offset_factor=1
)
# body
tir.realize(Conv_1[0:16, 0:14, 0:14, 0:32, 0:16, 0:16], "")
tir.launch_thread(bz, 196)
tir.launch_thread(bx, 2)
tir.launch_thread(by, 4)
tir.launch_thread(ty, 4)
tir.launch_thread(tz, 2)
tir.realize(
Conv_wmma_accumulator[
((bx * 8) + (ty * 2)) : (((bx * 8) + (ty * 2)) + 2),
tir.floordiv(bz, 14) : (tir.floordiv(bz, 14) + 1),
tir.floormod(bz, 14) : (tir.floormod(bz, 14) + 1),
((by * 8) + (tz * 4)) : (((by * 8) + (tz * 4)) + 4),
0:16,
0:16,
],
"wmma.accumulator",
)
for n_c_init in tir.serial(0, 2):
for o_c_init in tir.serial(0, 4):
tir.attr(
[BC, Conv_wmma_accumulator],
"buffer_bind_scope",
tir.tvm_tuple(
(n_c_init + ((bx * 8) + (ty * 2))),
1,
tir.floordiv(bz, 14),
1,
tir.floormod(bz, 14),
1,
(o_c_init + ((by * 8) + (tz * 4))),
1,
0,
16,
0,
16,
dtype="handle",
),
)
tir.evaluate(
tir.tvm_fill_fragment(
BC.data,
16,
16,
16,
tir.floordiv(BC.elem_offset, 256),
tir.float32(0),
dtype="handle",
)
)
for ic_outer in tir.serial(0, 8):
for kh in tir.serial(0, 3):
tir.realize(
Apad_shared[
(bx * 8) : ((bx * 8) + 8),
(tir.floordiv(bz, 14) + kh) : ((tir.floordiv(bz, 14) + kh) + 1),
tir.floormod(bz, 14) : (tir.floormod(bz, 14) + 3),
(ic_outer * 2) : ((ic_outer * 2) + 2),
0:16,
0:16,
],
"shared",
)
for ax2 in tir.serial(0, 3):
for ax3 in tir.serial(0, 2):
for ax4_ax5_fused_outer in tir.serial(0, 8):
tir.launch_thread(tx, 32)
Apad_shared[
((tz + (ty * 2)) + (bx * 8)),
(tir.floordiv(bz, 14) + kh),
(ax2 + tir.floormod(bz, 14)),
(ax3 + (ic_outer * 2)),
tir.floordiv((tx + (ax4_ax5_fused_outer * 32)), 16),
tir.floormod((tx + (ax4_ax5_fused_outer * 32)), 16),
] = tir.if_then_else(
(
(
(
((tir.floordiv(bz, 14) + kh) >= 1)
and (((tir.floordiv(bz, 14) + kh) - 1) < 14)
)
and ((ax2 + tir.floormod(bz, 14)) >= 1)
)
and (((ax2 + tir.floormod(bz, 14)) - 1) < 14)
),
A_1[
((tz + (ty * 2)) + (bx * 8)),
((tir.floordiv(bz, 14) + kh) - 1),
((ax2 + tir.floormod(bz, 14)) - 1),
(ax3 + (ic_outer * 2)),
tir.floordiv((tx + (ax4_ax5_fused_outer * 32)), 16),
tir.floormod((tx + (ax4_ax5_fused_outer * 32)), 16),
],
tir.float16(0),
dtype="float16",
)
tir.realize(
W_shared[
kh : (kh + 1),
0:3,
(ic_outer * 2) : ((ic_outer * 2) + 2),
(by * 8) : ((by * 8) + 8),
0:16,
0:16,
],
"shared",
)
for ax1 in tir.serial(0, 3):
for ax2_1 in tir.serial(0, 2):
tir.launch_thread(tx, 32)
for ax4_ax5_fused_inner in tir.vectorized(0, 8):
W_shared[
kh,
ax1,
(ax2_1 + (ic_outer * 2)),
((tz + (ty * 2)) + (by * 8)),
tir.floordiv((ax4_ax5_fused_inner + (tx * 8)), 16),
tir.floormod((ax4_ax5_fused_inner + (tx * 8)), 16),
] = W_1[
kh,
ax1,
(ax2_1 + (ic_outer * 2)),
((tz + (ty * 2)) + (by * 8)),
tir.floordiv((ax4_ax5_fused_inner + (tx * 8)), 16),
tir.floormod((ax4_ax5_fused_inner + (tx * 8)), 16),
]
for ic_inner in tir.serial(0, 2):
for kw in tir.serial(0, 3):
tir.realize(
Apad_shared_wmma_matrix_a[
((bx * 8) + (ty * 2)) : (((bx * 8) + (ty * 2)) + 2),
(tir.floordiv(bz, 14) + kh) : ((tir.floordiv(bz, 14) + kh) + 1),
(kw + tir.floormod(bz, 14)) : ((kw + tir.floormod(bz, 14)) + 1),
((ic_outer * 2) + ic_inner) : (((ic_outer * 2) + ic_inner) + 1),
0:16,
0:16,
],
"wmma.matrix_a",
)
for ax0 in tir.serial(0, 2):
tir.attr(
[buffer, Apad_shared],
"buffer_bind_scope",
tir.tvm_tuple(
(ax0 + ((bx * 8) + (ty * 2))),
1,
(tir.floordiv(bz, 14) + kh),
1,
(kw + tir.floormod(bz, 14)),
1,
((ic_outer * 2) + ic_inner),
1,
0,
16,
0,
16,
dtype="handle",
),
)
tir.attr(
[buffer_1, Apad_shared_wmma_matrix_a],
"buffer_bind_scope",
tir.tvm_tuple(
(ax0 + ((bx * 8) + (ty * 2))),
1,
(tir.floordiv(bz, 14) + kh),
1,
(kw + tir.floormod(bz, 14)),
1,
((ic_outer * 2) + ic_inner),
1,
0,
16,
0,
16,
dtype="handle",
),
)
tir.evaluate(
tir.tvm_load_matrix_sync(
buffer_1.data,
16,
16,
16,
tir.floordiv(buffer_1.elem_offset, 256),
tir.tvm_access_ptr(
tir.type_annotation(dtype="float16"),
buffer.data,
buffer.elem_offset,
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
tir.realize(
W_shared_wmma_matrix_b[
kh : (kh + 1),
kw : (kw + 1),
((ic_outer * 2) + ic_inner) : (((ic_outer * 2) + ic_inner) + 1),
((by * 8) + (tz * 4)) : (((by * 8) + (tz * 4)) + 4),
0:16,
0:16,
],
"wmma.matrix_b",
)
for ax3_1 in tir.serial(0, 4):
tir.attr(
[buffer_2, W_shared],
"buffer_bind_scope",
tir.tvm_tuple(
kh,
1,
kw,
1,
((ic_outer * 2) + ic_inner),
1,
(ax3_1 + ((by * 8) + (tz * 4))),
1,
0,
16,
0,
16,
dtype="handle",
),
)
tir.attr(
[buffer_3, W_shared_wmma_matrix_b],
"buffer_bind_scope",
tir.tvm_tuple(
kh,
1,
kw,
1,
((ic_outer * 2) + ic_inner),
1,
(ax3_1 + ((by * 8) + (tz * 4))),
1,
0,
16,
0,
16,
dtype="handle",
),
)
tir.evaluate(
tir.tvm_load_matrix_sync(
buffer_3.data,
16,
16,
16,
tir.floordiv(buffer_3.elem_offset, 256),
tir.tvm_access_ptr(
tir.type_annotation(dtype="float16"),
buffer_2.data,
buffer_2.elem_offset,
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
for n_c in tir.serial(0, 2):
for o_c in tir.serial(0, 4):
tir.attr(
[BA, Apad_shared_wmma_matrix_a],
"buffer_bind_scope",
tir.tvm_tuple(
(n_c + ((bx * 8) + (ty * 2))),
1,
(tir.floordiv(bz, 14) + kh),
1,
(tir.floormod(bz, 14) + kw),
1,
((ic_outer * 2) + ic_inner),
1,
0,
16,
0,
16,
dtype="handle",
),
)
tir.attr(
[BB, W_shared_wmma_matrix_b],
"buffer_bind_scope",
tir.tvm_tuple(
kh,
1,
kw,
1,
((ic_outer * 2) + ic_inner),
1,
(o_c + ((by * 8) + (tz * 4))),
1,
0,
16,
0,
16,
dtype="handle",
),
)
tir.attr(
[BC, Conv_wmma_accumulator],
"buffer_bind_scope",
tir.tvm_tuple(
(n_c + ((bx * 8) + (ty * 2))),
1,
tir.floordiv(bz, 14),
1,
tir.floormod(bz, 14),
1,
(o_c + ((by * 8) + (tz * 4))),
1,
0,
16,
0,
16,
dtype="handle",
),
)
tir.evaluate(
tir.tvm_mma_sync(
BC.data,
tir.floordiv(BC.elem_offset, 256),
BA.data,
tir.floordiv(BA.elem_offset, 256),
BB.data,
tir.floordiv(BB.elem_offset, 256),
BC.data,
tir.floordiv(BC.elem_offset, 256),
dtype="handle",
)
)
for n_inner in tir.serial(0, 2):
for o_inner in tir.serial(0, 4):
tir.attr(
[buffer_4, Conv_wmma_accumulator],
"buffer_bind_scope",
tir.tvm_tuple(
((((bx * 4) + ty) * 2) + n_inner),
1,
tir.floordiv(bz, 14),
1,
tir.floormod(bz, 14),
1,
((((by * 2) + tz) * 4) + o_inner),
1,
0,
16,
0,
16,
dtype="handle",
),
)
tir.attr(
[buffer_5, Conv_1],
"buffer_bind_scope",
tir.tvm_tuple(
((((bx * 4) + ty) * 2) + n_inner),
1,
tir.floordiv(bz, 14),
1,
tir.floormod(bz, 14),
1,
((((by * 2) + tz) * 4) + o_inner),
1,
0,
16,
0,
16,
dtype="handle",
),
)
tir.evaluate(
tir.tvm_store_matrix_sync(
buffer_4.data,
16,
16,
16,
tir.floordiv(buffer_4.elem_offset, 256),
tir.tvm_access_ptr(
tir.type_annotation(dtype="float32"),
buffer_5.data,
buffer_5.elem_offset,
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
def test_opt_conv_tensorcore_normalize():
mod = opt_conv_tensorcore_normalize
rt_mod = tvm.script.from_source(tvm.script.asscript(mod, True))
tvm.ir.assert_structural_equal(mod, rt_mod, True)
@tvm.script.tir
def opt_conv_tensorcore_lower(A: ty.handle, W: ty.handle, Conv: ty.handle) -> None:
# function attr dict
tir.func_attr({"global_symbol": "default_function", "tir.noalias": True})
# body
A_1 = tir.match_buffer(
A, [16, 14, 14, 16, 16, 16], dtype="float16", elem_offset=0, align=128, offset_factor=1
)
W_1 = tir.match_buffer(
W, [3, 3, 16, 32, 16, 16], dtype="float16", elem_offset=0, align=128, offset_factor=1
)
Conv_1 = tir.match_buffer(
Conv, [16, 14, 14, 32, 16, 16], elem_offset=0, align=128, offset_factor=1
)
bx = tir.env_thread("blockIdx.x")
by = tir.env_thread("blockIdx.y")
bz = tir.env_thread("blockIdx.z")
tx = tir.env_thread("threadIdx.x")
ty = tir.env_thread("threadIdx.y")
tz = tir.env_thread("threadIdx.z")
tir.launch_thread(bz, 196)
Conv_wmma_accumulator = tir.allocate([2048], "float32", "wmma.accumulator")
Apad_shared = tir.allocate([12288], "float16", "shared")
W_shared = tir.allocate([12288], "float16", "shared")
Apad_shared_wmma_matrix_a = tir.allocate([512], "float16", "wmma.matrix_a")
W_shared_wmma_matrix_b = tir.allocate([1024], "float16", "wmma.matrix_b")
tir.launch_thread(bx, 2)
tir.launch_thread(by, 4)
tir.launch_thread(ty, 4)
tir.launch_thread(tz, 2)
tir.evaluate(
tir.tvm_fill_fragment(Conv_wmma_accumulator, 16, 16, 16, 0, tir.float32(0), dtype="handle")
)
tir.evaluate(
tir.tvm_fill_fragment(Conv_wmma_accumulator, 16, 16, 16, 1, tir.float32(0), dtype="handle")
)
tir.evaluate(
tir.tvm_fill_fragment(Conv_wmma_accumulator, 16, 16, 16, 2, tir.float32(0), dtype="handle")
)
tir.evaluate(
tir.tvm_fill_fragment(Conv_wmma_accumulator, 16, 16, 16, 3, tir.float32(0), dtype="handle")
)
tir.evaluate(
tir.tvm_fill_fragment(Conv_wmma_accumulator, 16, 16, 16, 4, tir.float32(0), dtype="handle")
)
tir.evaluate(
tir.tvm_fill_fragment(Conv_wmma_accumulator, 16, 16, 16, 5, tir.float32(0), dtype="handle")
)
tir.evaluate(
tir.tvm_fill_fragment(Conv_wmma_accumulator, 16, 16, 16, 6, tir.float32(0), dtype="handle")
)
tir.evaluate(
tir.tvm_fill_fragment(Conv_wmma_accumulator, 16, 16, 16, 7, tir.float32(0), dtype="handle")
)
for ic_outer in tir.serial(0, 8):
for kh in tir.serial(0, 3):
for ax2 in tir.serial(0, 3):
with tir.launch_thread(tx, 32):
Apad_shared[
((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61440
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 32)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61408
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 64)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61376
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 96)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61344
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 128)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61312
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 160)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61280
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 192)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61248
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 224)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61216
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 256)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61184
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 288)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61152
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 320)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61120
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 352)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61088
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 384)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61056
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 416)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61024
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 448)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 60992
),
),
tir.float16(0),
dtype="float16",
)
tir.launch_thread(tx, 32)
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 480)
] = tir.if_then_else(
(
(
(
(1 <= (tir.floordiv(bz, 14) + kh))
and ((tir.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + tir.floormod(bz, 14)))
)
and ((ax2 + tir.floormod(bz, 14)) < 15)
),
tir.load(
"float16",
A_1.data,
(
(
(
(
(
(
(((bx * 6422528) + (ty * 1605632)) + (tz * 802816))
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 60960
),
),
tir.float16(0),
dtype="float16",
)
with tir.launch_thread(tx, 32):
tir.store(
W_shared,
tir.ramp((((ty * 512) + (tz * 256)) + (tx * 8)), 1, 8),
tir.load(
"float16x8",
W_1.data,
tir.ramp(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
),
1,
8,
),
tir.broadcast(True, 8),
),
tir.broadcast(True, 8),
)
with tir.launch_thread(tx, 32):
tir.store(
W_shared,
tir.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 2048), 1, 8),
tir.load(
"float16x8",
W_1.data,
tir.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
)
+ 8192
),
1,
8,
),
tir.broadcast(True, 8),
),
tir.broadcast(True, 8),
)
with tir.launch_thread(tx, 32):
tir.store(
W_shared,
tir.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 4096), 1, 8),
tir.load(
"float16x8",
W_1.data,
tir.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
)
+ 131072
),
1,
8,
),
tir.broadcast(True, 8),
),
tir.broadcast(True, 8),
)
with tir.launch_thread(tx, 32):
tir.store(
W_shared,
tir.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 6144), 1, 8),
tir.load(
"float16x8",
W_1.data,
tir.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
)
+ 139264
),
1,
8,
),
tir.broadcast(True, 8),
),
tir.broadcast(True, 8),
)
with tir.launch_thread(tx, 32):
tir.store(
W_shared,
tir.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 8192), 1, 8),
tir.load(
"float16x8",
W_1.data,
tir.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
)
+ 262144
),
1,
8,
),
tir.broadcast(True, 8),
),
tir.broadcast(True, 8),
)
with tir.launch_thread(tx, 32):
tir.store(
W_shared,
tir.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 10240), 1, 8),
tir.load(
"float16x8",
W_1.data,
tir.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
)
+ 270336
),
1,
8,
),
tir.broadcast(True, 8),
),
tir.broadcast(True, 8),
)
for ic_inner in tir.serial(0, 2):
for kw in tir.serial(0, 3):
tir.evaluate(
tir.tvm_load_matrix_sync(
Apad_shared_wmma_matrix_a,
16,
16,
16,
0,
tir.tvm_access_ptr(
tir.type_annotation(dtype="float16"),
Apad_shared,
(((ty * 3072) + (kw * 512)) + (ic_inner * 256)),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
tir.evaluate(
tir.tvm_load_matrix_sync(
Apad_shared_wmma_matrix_a,
16,
16,
16,
1,
tir.tvm_access_ptr(
tir.type_annotation(dtype="float16"),
Apad_shared,
((((ty * 3072) + (kw * 512)) + (ic_inner * 256)) + 1536),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
tir.evaluate(
tir.tvm_load_matrix_sync(
W_shared_wmma_matrix_b,
16,
16,
16,
0,
tir.tvm_access_ptr(
tir.type_annotation(dtype="float16"),
W_shared,
(((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
tir.evaluate(
tir.tvm_load_matrix_sync(
W_shared_wmma_matrix_b,
16,
16,
16,
1,
tir.tvm_access_ptr(
tir.type_annotation(dtype="float16"),
W_shared,
((((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)) + 256),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
tir.evaluate(
tir.tvm_load_matrix_sync(
W_shared_wmma_matrix_b,
16,
16,
16,
2,
tir.tvm_access_ptr(
tir.type_annotation(dtype="float16"),
W_shared,
((((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)) + 512),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
tir.evaluate(
tir.tvm_load_matrix_sync(
W_shared_wmma_matrix_b,
16,
16,
16,
3,
tir.tvm_access_ptr(
tir.type_annotation(dtype="float16"),
W_shared,
((((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)) + 768),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
tir.evaluate(
tir.tvm_mma_sync(
Conv_wmma_accumulator,
0,
Apad_shared_wmma_matrix_a,
0,
W_shared_wmma_matrix_b,
0,
Conv_wmma_accumulator,
0,
dtype="handle",
)
)
tir.evaluate(
tir.tvm_mma_sync(
Conv_wmma_accumulator,
1,
Apad_shared_wmma_matrix_a,
0,
W_shared_wmma_matrix_b,
1,
Conv_wmma_accumulator,
1,
dtype="handle",
)
)
tir.evaluate(
tir.tvm_mma_sync(
Conv_wmma_accumulator,
2,
Apad_shared_wmma_matrix_a,
0,
W_shared_wmma_matrix_b,
2,
Conv_wmma_accumulator,
2,
dtype="handle",
)
)
tir.evaluate(
tir.tvm_mma_sync(
Conv_wmma_accumulator,
3,
Apad_shared_wmma_matrix_a,
0,
W_shared_wmma_matrix_b,
3,
Conv_wmma_accumulator,
3,
dtype="handle",
)
)
tir.evaluate(
tir.tvm_mma_sync(
Conv_wmma_accumulator,
4,
Apad_shared_wmma_matrix_a,
1,
W_shared_wmma_matrix_b,
0,
Conv_wmma_accumulator,
4,
dtype="handle",
)
)
tir.evaluate(
tir.tvm_mma_sync(
Conv_wmma_accumulator,
5,
Apad_shared_wmma_matrix_a,
1,
W_shared_wmma_matrix_b,
1,
Conv_wmma_accumulator,
5,
dtype="handle",
)
)
tir.evaluate(
tir.tvm_mma_sync(
Conv_wmma_accumulator,
6,
Apad_shared_wmma_matrix_a,
1,
W_shared_wmma_matrix_b,
2,
Conv_wmma_accumulator,
6,
dtype="handle",
)
)
tir.evaluate(
tir.tvm_mma_sync(
Conv_wmma_accumulator,
7,
Apad_shared_wmma_matrix_a,
1,
W_shared_wmma_matrix_b,
3,
Conv_wmma_accumulator,
7,
dtype="handle",
)
)
tir.evaluate(
tir.tvm_store_matrix_sync(
Conv_wmma_accumulator,
16,
16,
16,
0,
tir.tvm_access_ptr(
tir.type_annotation(dtype="float32"),
Conv_1.data,
(((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048)) + (tz * 1024)),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
tir.evaluate(
tir.tvm_store_matrix_sync(
Conv_wmma_accumulator,
16,
16,
16,
1,
tir.tvm_access_ptr(
tir.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 256
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
tir.evaluate(
tir.tvm_store_matrix_sync(
Conv_wmma_accumulator,
16,
16,
16,
2,
tir.tvm_access_ptr(
tir.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 512
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
tir.evaluate(
tir.tvm_store_matrix_sync(
Conv_wmma_accumulator,
16,
16,
16,
3,
tir.tvm_access_ptr(
tir.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 768
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
tir.evaluate(
tir.tvm_store_matrix_sync(
Conv_wmma_accumulator,
16,
16,
16,
4,
tir.tvm_access_ptr(
tir.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 1605632
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
tir.evaluate(
tir.tvm_store_matrix_sync(
Conv_wmma_accumulator,
16,
16,
16,
5,
tir.tvm_access_ptr(
tir.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 1605888
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
tir.evaluate(
tir.tvm_store_matrix_sync(
Conv_wmma_accumulator,
16,
16,
16,
6,
tir.tvm_access_ptr(
tir.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 1606144
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
tir.evaluate(
tir.tvm_store_matrix_sync(
Conv_wmma_accumulator,
16,
16,
16,
7,
tir.tvm_access_ptr(
tir.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 1606400
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
def test_opt_conv_tensorcore_lower():
mod = opt_conv_tensorcore_lower
rt_mod = tvm.script.from_source(tvm.script.asscript(mod, True))
tvm.ir.assert_structural_equal(mod, rt_mod, True)
@tvm.script.tir
def opt_conv_tensorcore_mod_host(
args: ty.handle,
arg_type_ids: ty.handle,
num_args: ty.int32,
out_ret_value: ty.handle,
out_ret_tcode: ty.handle,
resource_handle: ty.handle,
) -> ty.int32:
# function attr dict
tir.func_attr(
{
"tir.noalias": True,
"global_symbol": "default_function",
"tir.is_entry_func": True,
"calling_conv": 1,
}
)
# body
stack_tcode: ty.handle = tir.tvm_stack_alloca("arg_tcode", 10, dtype="handle")
stack_value: ty.handle = tir.tvm_stack_alloca("arg_value", 10, dtype="handle")
assert num_args == 3, "default_function: num_args should be 3"
arg0: ty.handle = tir.tvm_struct_get(args, 0, 12, dtype="handle")
arg0_code: ty.int32 = tir.load("int32", arg_type_ids, 0)
arg1: ty.handle = tir.tvm_struct_get(args, 1, 12, dtype="handle")
arg1_code: ty.int32 = tir.load("int32", arg_type_ids, 1)
arg2: ty.handle = tir.tvm_struct_get(args, 2, 12, dtype="handle")
arg2_code: ty.int32 = tir.load("int32", arg_type_ids, 2)
A: ty.handle = tir.tvm_struct_get(arg0, 0, 1, dtype="handle")
tir.attr(A, "storage_alignment", 128)
arg0_shape: ty.handle = tir.tvm_struct_get(arg0, 0, 2, dtype="handle")
arg0_strides: ty.handle = tir.tvm_struct_get(arg0, 0, 3, dtype="handle")
dev_id: ty.int32 = tir.tvm_struct_get(arg0, 0, 9, dtype="int32")
W: ty.handle = tir.tvm_struct_get(arg1, 0, 1, dtype="handle")
tir.attr(W, "storage_alignment", 128)
arg1_shape: ty.handle = tir.tvm_struct_get(arg1, 0, 2, dtype="handle")
arg1_strides: ty.handle = tir.tvm_struct_get(arg1, 0, 3, dtype="handle")
Conv: ty.handle = tir.tvm_struct_get(arg2, 0, 1, dtype="handle")
tir.attr(Conv, "storage_alignment", 128)
arg2_shape: ty.handle = tir.tvm_struct_get(arg2, 0, 2, dtype="handle")
arg2_strides: ty.handle = tir.tvm_struct_get(arg2, 0, 3, dtype="handle")
assert (((arg0_code == 3) or (arg0_code == 13)) or (arg0_code == 7)) or (
arg0_code == 4
), "default_function: Expect arg[0] to be pointer"
assert (((arg1_code == 3) or (arg1_code == 13)) or (arg1_code == 7)) or (
arg1_code == 4
), "default_function: Expect arg[1] to be pointer"
assert (((arg2_code == 3) or (arg2_code == 13)) or (arg2_code == 7)) or (
arg2_code == 4
), "default_function: Expect arg[2] to be pointer"
assert 6 == tir.tvm_struct_get(arg0, 0, 4, dtype="int32"), "arg0.ndim is expected to equal 6"
assert 6 == tir.tvm_struct_get(arg0, 0, 4, dtype="int32"), "arg0.ndim is expected to equal 6"
assert (
(tir.tvm_struct_get(arg0, 0, 5, dtype="uint8") == tir.uint8(2))
and (tir.tvm_struct_get(arg0, 0, 6, dtype="uint8") == tir.uint8(16))
) and (
tir.tvm_struct_get(arg0, 0, 7, dtype="uint16") == tir.uint16(1)
), "arg0.dtype is expected to be float16"
assert 16 == tir.cast(
tir.load("int64", arg0_shape, 0), "int32"
), "Argument arg0.shape[0] has an unsatisfied constraint"
assert 14 == tir.cast(
tir.load("int64", arg0_shape, 1), "int32"
), "Argument arg0.shape[1] has an unsatisfied constraint"
assert 14 == tir.cast(
tir.load("int64", arg0_shape, 2), "int32"
), "Argument arg0.shape[2] has an unsatisfied constraint"
assert 16 == tir.cast(
tir.load("int64", arg0_shape, 3), "int32"
), "Argument arg0.shape[3] has an unsatisfied constraint"
assert 16 == tir.cast(
tir.load("int64", arg0_shape, 4), "int32"
), "Argument arg0.shape[4] has an unsatisfied constraint"
assert 16 == tir.cast(
tir.load("int64", arg0_shape, 5), "int32"
), "Argument arg0.shape[5] has an unsatisfied constraint"
if not (tir.isnullptr(arg0_strides, dtype="bool")):
assert (
(
(
(
(1 == tir.cast(tir.load("int64", arg0_strides, 5), "int32"))
and (16 == tir.cast(tir.load("int64", arg0_strides, 4), "int32"))
)
and (256 == tir.cast(tir.load("int64", arg0_strides, 3), "int32"))
)
and (4096 == tir.cast(tir.load("int64", arg0_strides, 2), "int32"))
)
and (57344 == tir.cast(tir.load("int64", arg0_strides, 1), "int32"))
) and (
802816 == tir.cast(tir.load("int64", arg0_strides, 0), "int32")
), "arg0.strides: expected to be compact array"
tir.evaluate(0)
assert tir.uint64(0) == tir.tvm_struct_get(
arg0, 0, 8, dtype="uint64"
), "Argument arg0.byte_offset has an unsatisfied constraint"
assert 2 == tir.tvm_struct_get(
arg0, 0, 10, dtype="int32"
), "Argument arg0.device_type has an unsatisfied constraint"
assert 6 == tir.tvm_struct_get(arg1, 0, 4, dtype="int32"), "arg1.ndim is expected to equal 6"
assert 6 == tir.tvm_struct_get(arg1, 0, 4, dtype="int32"), "arg1.ndim is expected to equal 6"
assert (
(tir.tvm_struct_get(arg1, 0, 5, dtype="uint8") == tir.uint8(2))
and (tir.tvm_struct_get(arg1, 0, 6, dtype="uint8") == tir.uint8(16))
) and (
tir.tvm_struct_get(arg1, 0, 7, dtype="uint16") == tir.uint16(1)
), "arg1.dtype is expected to be float16"
assert 3 == tir.cast(
tir.load("int64", arg1_shape, 0), "int32"
), "Argument arg1.shape[0] has an unsatisfied constraint"
assert 3 == tir.cast(
tir.load("int64", arg1_shape, 1), "int32"
), "Argument arg1.shape[1] has an unsatisfied constraint"
assert 16 == tir.cast(
tir.load("int64", arg1_shape, 2), "int32"
), "Argument arg1.shape[2] has an unsatisfied constraint"
assert 32 == tir.cast(
tir.load("int64", arg1_shape, 3), "int32"
), "Argument arg1.shape[3] has an unsatisfied constraint"
assert 16 == tir.cast(
tir.load("int64", arg1_shape, 4), "int32"
), "Argument arg1.shape[4] has an unsatisfied constraint"
assert 16 == tir.cast(
tir.load("int64", arg1_shape, 5), "int32"
), "Argument arg1.shape[5] has an unsatisfied constraint"
if not (tir.isnullptr(arg1_strides, dtype="bool")):
assert (
(
(
(
(1 == tir.cast(tir.load("int64", arg1_strides, 5), "int32"))
and (16 == tir.cast(tir.load("int64", arg1_strides, 4), "int32"))
)
and (256 == tir.cast(tir.load("int64", arg1_strides, 3), "int32"))
)
and (8192 == tir.cast(tir.load("int64", arg1_strides, 2), "int32"))
)
and (131072 == tir.cast(tir.load("int64", arg1_strides, 1), "int32"))
) and (
393216 == tir.cast(tir.load("int64", arg1_strides, 0), "int32")
), "arg1.strides: expected to be compact array"
tir.evaluate(0)
assert tir.uint64(0) == tir.tvm_struct_get(
arg1, 0, 8, dtype="uint64"
), "Argument arg1.byte_offset has an unsatisfied constraint"
assert 2 == tir.tvm_struct_get(
arg1, 0, 10, dtype="int32"
), "Argument arg1.device_type has an unsatisfied constraint"
assert dev_id == tir.tvm_struct_get(
arg1, 0, 9, dtype="int32"
), "Argument arg1.device_id has an unsatisfied constraint"
assert 6 == tir.tvm_struct_get(arg2, 0, 4, dtype="int32"), "arg2.ndim is expected to equal 6"
assert 6 == tir.tvm_struct_get(arg2, 0, 4, dtype="int32"), "arg2.ndim is expected to equal 6"
assert (
(tir.tvm_struct_get(arg2, 0, 5, dtype="uint8") == tir.uint8(2))
and (tir.tvm_struct_get(arg2, 0, 6, dtype="uint8") == tir.uint8(32))
) and (
tir.tvm_struct_get(arg2, 0, 7, dtype="uint16") == tir.uint16(1)
), "arg2.dtype is expected to be float32"
assert 16 == tir.cast(
tir.load("int64", arg2_shape, 0), "int32"
), "Argument arg2.shape[0] has an unsatisfied constraint"
assert 14 == tir.cast(
tir.load("int64", arg2_shape, 1), "int32"
), "Argument arg2.shape[1] has an unsatisfied constraint"
assert 14 == tir.cast(
tir.load("int64", arg2_shape, 2), "int32"
), "Argument arg2.shape[2] has an unsatisfied constraint"
assert 32 == tir.cast(
tir.load("int64", arg2_shape, 3), "int32"
), "Argument arg2.shape[3] has an unsatisfied constraint"
assert 16 == tir.cast(
tir.load("int64", arg2_shape, 4), "int32"
), "Argument arg2.shape[4] has an unsatisfied constraint"
assert 16 == tir.cast(
tir.load("int64", arg2_shape, 5), "int32"
), "Argument arg2.shape[5] has an unsatisfied constraint"
if not (tir.isnullptr(arg2_strides, dtype="bool")):
assert (
(
(
(
(1 == tir.cast(tir.load("int64", arg2_strides, 5), "int32"))
and (16 == tir.cast(tir.load("int64", arg2_strides, 4), "int32"))
)
and (256 == tir.cast(tir.load("int64", arg2_strides, 3), "int32"))
)
and (8192 == tir.cast(tir.load("int64", arg2_strides, 2), "int32"))
)
and (114688 == tir.cast(tir.load("int64", arg2_strides, 1), "int32"))
) and (
1605632 == tir.cast(tir.load("int64", arg2_strides, 0), "int32")
), "arg2.strides: expected to be compact array"
tir.evaluate(0)
assert tir.uint64(0) == tir.tvm_struct_get(
arg2, 0, 8, dtype="uint64"
), "Argument arg2.byte_offset has an unsatisfied constraint"
assert 2 == tir.tvm_struct_get(
arg2, 0, 10, dtype="int32"
), "Argument arg2.device_type has an unsatisfied constraint"
assert dev_id == tir.tvm_struct_get(
arg2, 0, 9, dtype="int32"
), "Argument arg2.device_id has an unsatisfied constraint"
tir.evaluate(tir.tvm_struct_set(stack_value, 0, 12, tir.cast(2, "int64"), dtype="int32"))
stack_tcode[0] = 0
tir.evaluate(tir.tvm_struct_set(stack_value, 1, 12, tir.cast(dev_id, "int64"), dtype="int32"))
stack_tcode[1] = 0
tir.evaluate(
tir.tvm_call_packed_lowered(
"__tvm_set_device", stack_value, stack_tcode, 0, 2, dtype="int32"
)
)
tir.attr(0, "compute_scope", "default_function_compute_")
tir.evaluate(tir.tvm_struct_set(stack_value, 0, 12, A, dtype="int32"))
stack_tcode[0] = 3
tir.evaluate(tir.tvm_struct_set(stack_value, 1, 12, W, dtype="int32"))
stack_tcode[1] = 3
tir.evaluate(tir.tvm_struct_set(stack_value, 2, 12, Conv, dtype="int32"))
stack_tcode[2] = 3
tir.evaluate(tir.tvm_struct_set(stack_value, 3, 12, tir.cast(196, "int64"), dtype="int32"))
stack_tcode[3] = 0
tir.evaluate(tir.tvm_struct_set(stack_value, 4, 12, tir.cast(2, "int64"), dtype="int32"))
stack_tcode[4] = 0
tir.evaluate(tir.tvm_struct_set(stack_value, 5, 12, tir.cast(4, "int64"), dtype="int32"))
stack_tcode[5] = 0
tir.evaluate(tir.tvm_struct_set(stack_value, 6, 12, tir.cast(4, "int64"), dtype="int32"))
stack_tcode[6] = 0
tir.evaluate(tir.tvm_struct_set(stack_value, 7, 12, tir.cast(2, "int64"), dtype="int32"))
stack_tcode[7] = 0
tir.evaluate(tir.tvm_struct_set(stack_value, 8, 12, tir.cast(32, "int64"), dtype="int32"))
stack_tcode[8] = 0
tir.evaluate(
tir.tvm_call_packed_lowered(
"default_function_kernel0", stack_value, stack_tcode, 0, 9, dtype="int32"
)
)
def test_opt_conv_tensorcore_mod_host():
mod = opt_conv_tensorcore_mod_host
rt_mod = tvm.script.from_source(tvm.script.asscript(mod, True))
tvm.ir.assert_structural_equal(mod, rt_mod, True)
@tvm.script.tir
def vthread_func(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (16, 16), "float32")
C = tir.match_buffer(c, (16, 16), "float32")
i0 = tir.env_thread("blockIdx.x")
i1 = tir.env_thread("threadIdx.x")
i2 = tir.env_thread("vthread")
tir.launch_thread(i0, 4)
tir.launch_thread(i1, 2)
tir.launch_thread(i2, 2)
B = tir.allocate([16], "float32", "local")
for j in range(16):
B[j] = tir.load("float32", A.data, i0 * 64 + i1 * 32 + i2 * 16 + j) + tir.float32(1)
for j in range(16):
C.data[i0 * 64 + i1 * 32 + i2 * 16 + j] = tir.load("float32", B, j) * tir.float32(2)
def test_vthread():
func = vthread_func
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func, True)
@tvm.script.tir
def matmul(a: ty.handle, b: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, [128, 128])
B = tir.match_buffer(b, [128, 128])
C = tir.match_buffer(c, [128, 128])
with tir.block([128, 128, tir.reduce_axis(0, 128)], "update") as [vi, vj, vk]:
with tir.init():
C[vi, vj] = tir.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@tvm.script.tir
def matmul_original(a: ty.handle, b: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, [128, 128])
B = tir.match_buffer(b, [128, 128])
C = tir.match_buffer(c, [128, 128])
for i, j in tir.grid(128, 128):
with tir.block([128, 128], "init") as [vi, vj]:
C[vi, vj] = tir.float32(0)
for k in range(128):
with tir.block([128, 128, tir.reduce_axis(0, 128)], "update") as [vi, vj, vk]:
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@tvm.script.tir
def element_wise(a: ty.handle, c: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128), "float32")
C = tir.match_buffer(c, (128, 128), "float32")
B = tir.alloc_buffer((128, 128), "float32")
with tir.block([128, 128], "B") as [vi, vj]:
B[vi, vj] = A[vi, vj] * tir.float32(2)
with tir.block([128, 128], "C") as [vi, vj]:
C[vi, vj] = B[vi, vj] + tir.float32(1)
@tvm.script.tir
def predicate(b: ty.handle, c: ty.handle) -> None:
B = tir.match_buffer(b, (16, 16), "float32")
C = tir.match_buffer(c, (16, 16), "float32")
for i, jo, ji in tir.grid(16, 4, 5):
with tir.block([16, 16], "update") as [vi, vj]:
tir.bind(vi, i)
tir.bind(vj, jo * 4 + ji)
tir.where(jo * 4 + ji < 16)
C[vi, vj] = B[vi, vj] + tir.float32(1)
def test_module_define():
func1 = tvm.script.create_module({"matmul": matmul})["matmul"]
func2 = tvm.script.create_module({"element_wise": element_wise})["element_wise"]
func3 = tvm.script.create_module({"predicate": predicate})["predicate"]
mod1 = tvm.script.create_module({"func1": func1, "func2": func2, "func3": func3})
mod2 = tvm.script.create_module({"func1": matmul, "func2": element_wise, "func3": predicate})
tvm.ir.assert_structural_equal(mod1, mod2)
def test_matmul():
func = matmul
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func)
def test_matmul_original():
func = matmul_original
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body.body, tir.stmt.SeqStmt)
assert isinstance(rt_func.body.block.body.body.body[0].block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body.body.body[1], tir.stmt.For)
assert isinstance(rt_func.body.block.body.body.body[1].body.block, tir.stmt.Block)
def test_element_wise():
func = element_wise
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body, tir.stmt.SeqStmt)
assert isinstance(rt_func.body.block.body[0], tir.stmt.For)
assert isinstance(rt_func.body.block.body[0].body, tir.stmt.For)
assert isinstance(rt_func.body.block.body[0].body.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body[1], tir.stmt.For)
assert isinstance(rt_func.body.block.body[1].body, tir.stmt.For)
assert isinstance(rt_func.body.block.body[1].body.body.block, tir.stmt.Block)
def test_predicate():
func = predicate
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body.body.body.block, tir.stmt.Block)
@tvm.script.tir
def for_thread_binding(a: ty.handle, b: ty.handle) -> None:
A = tir.match_buffer(a, (16, 16), "float32")
B = tir.match_buffer(b, (16, 16), "float32")
for i in tir.thread_binding(0, 16, thread="threadIdx.x"):
for j in tir.thread_binding(
0, 16, thread="threadIdx.y", annotations={"attr_key": "attr_value"}
):
A[i, j] = B[i, j] + tir.float32(1)
def test_for_thread_binding():
func = for_thread_binding
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body, tir.stmt.For)
assert rt_func.body.kind == 4
assert rt_func.body.thread_binding.thread_tag == "threadIdx.x"
assert isinstance(rt_func.body.body, tir.stmt.For)
assert rt_func.body.body.kind == 4
assert rt_func.body.body.thread_binding.thread_tag == "threadIdx.y"
assert rt_func.body.body.annotations["attr_key"] == "attr_value"
@tvm.script.tir
def match_buffer_region(a: ty.handle, b: ty.handle) -> None:
A = tir.match_buffer(a, (16, 16, 16), "float32")
B = tir.match_buffer(b, (1), "float32")
with tir.block([16, 4]) as [vi, vj]:
C = tir.match_buffer(A[0:16, vi, vj * 4 : vj * 4 + 4], (16, 1, 4))
with tir.block([4]) as [vii]:
D = tir.match_buffer(C[vii * 4 : vii * 4 + 4, 0, 0:4], (4, 1, 4))
for i, j in tir.grid(4, 4):
B[0] += D[i, 0, j]
def test_match_buffer_region():
func = match_buffer_region
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body, tir.stmt.BlockRealize)
root = rt_func.body.block
assert isinstance(root.body, tir.stmt.For)
assert isinstance(root.body.body, tir.stmt.For)
assert isinstance(root.body.body.body, tir.stmt.BlockRealize)
outer_block = root.body.body.body.block
assert len(outer_block.match_buffers) == 1
buffer_C = outer_block.match_buffers[0].buffer
tvm.ir.assert_structural_equal(buffer_C.shape, [16, 1, 4])
assert isinstance(outer_block.body, tir.stmt.For)
assert isinstance(outer_block.body.body, tir.stmt.BlockRealize)
inner_block = outer_block.body.body.block
assert len(inner_block.match_buffers) == 1
buffer_D = inner_block.match_buffers[0].buffer
tvm.ir.assert_structural_equal(buffer_D.shape, [4, 1, 4])
@tvm.script.tir
def block_elements(a: ty.handle, b: ty.handle) -> None:
A = tir.match_buffer(a, (16, 16), "float32")
B = tir.match_buffer(b, (1, 1), "float32")
with tir.block([1], "update") as [vi]:
tir.bind(vi, 0)
tir.where(True)
tir.reads(A[0:16, 0:16])
tir.writes(B[0, 0])
tir.block_attr({"attr_key": "attr_value"})
C = tir.alloc_buffer((4, 4), dtype="float32")
D = tir.match_buffer(A[0:4, 0], (4, 1))
with tir.init():
B[0, 0] = tir.float32(0)
B[0, 0] = A[0, 0] + B[0, 0] + C[1, 1] + D[2]
def test_block_elements():
func = block_elements
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body, tir.stmt.BlockRealize)
assert isinstance(rt_func.body.block.body.block, tir.stmt.Block)
block = rt_func.body.block.body.block
assert isinstance(block.body, tir.stmt.BufferStore)
assert isinstance(block.init, tir.stmt.BufferStore)
assert len(block.annotations) == 1
assert block.annotations["attr_key"] == "attr_value"
@tvm.script.tir
def opaque_block(a: ty.handle, b: ty.handle) -> None:
A = tir.match_buffer(a, (16, 16), "float32")
B = tir.match_buffer(b, (16, 16), "float32")
for i in range(16):
for j in range(16):
with tir.block([]):
tir.reads([])
tir.writes(A[i, j])
A[i, j] = tir.float32(0)
with tir.block([]):
tir.reads([A[i, 0:16]])
tir.writes([B[i, 0:16]])
for j in range(16):
B[i, j] = A[i, j]
def test_opaque_block():
func = opaque_block
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func)
root_block = rt_func.body.block
assert isinstance(root_block, tir.stmt.Block)
assert isinstance(root_block.body, tir.stmt.For)
assert isinstance(root_block.body.body[0], tir.stmt.For)
assert isinstance(root_block.body.body[0].body, tir.stmt.BlockRealize)
assert isinstance(root_block.body.body[0].body.block, tir.stmt.Block)
assert len(root_block.body.body[0].body.block.iter_vars) == 0
assert isinstance(root_block.body.body[1], tir.stmt.BlockRealize)
assert isinstance(root_block.body.body[1].block, tir.stmt.Block)
assert len(root_block.body.body[1].block.iter_vars) == 0
@tvm.script.tir
def rank0(a: ty.handle) -> None:
A = tir.match_buffer(a, (), "float32")
B = tir.alloc_buffer((), "float32")
A[()] = 2
B[()] = A[()]
def test_rank0_buffers():
func = rank0
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func)
@tvm.script.tir
def rank0_block(a: ty.handle) -> None:
A = tir.match_buffer(a, (), "float32")
B = tir.alloc_buffer((), "float32")
tir.store(B.data, 0, tir.load("float32", A.data, 0))
with tir.block([], "update") as []:
tir.reads([A[()]])
tir.writes([B[()]])
for i in range(1):
B[()] = A[()]
def test_rank0_blocks():
func = rank0_block
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func)
@tvm.script.tir
def select(a: ty.handle) -> None:
A = tir.match_buffer(a, (), "float32")
A[()] = tir.Select(True, 1, 2)
def test_select():
func = select
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func)
@tvm.script.tir
def minmax(a: ty.handle) -> None:
A = tir.match_buffer(a, (), "float32")
A[()] = tir.min(1, 2)
A[()] = tir.max(1, 2)
def test_minmax():
func = minmax
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func)
@tvm.script.tir
def abs(a: ty.handle) -> None:
A = tir.match_buffer(a, (128, 128), "float32")
with tir.block([128, 128], "A") as [vi, vj]:
A[vi, vj] = tir.abs(A[vi, vj])
def test_abs():
func = abs
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func)
@tvm.script.tir
def constant_folding(a: ty.handle) -> None:
A = tir.match_buffer(a, (), "float32")
A[()] = tir.min(2.2, 5.2)
A[()] = tir.max(tir.float32(2.2), tir.float32(tir.float32(5.2)))
A[()] = tir.min(2.2, 5.0)
def test_constant_folding():
func = constant_folding
rt_func = tvm.script.from_source(tvm.script.asscript(func, True))
tvm.ir.assert_structural_equal(func, rt_func)
@tvm.script.tir
def simplify_bracket() -> None:
a = tir.var("int32")
b = tir.var("int32")
c = tir.var("int32")
d = tir.var("int32")
tir.evaluate(a + b * (c + d))
def test_simplify_bracket():
func = simplify_bracket
out_str = tvm.script.asscript(func, True)
assert out_str.count("a + b*(c + d)") == 1
@tvm.script.tir
def var_with_same_name(a: ty.handle) -> None:
A = tir.match_buffer(a, (16, 16), "float32")
with tir.block([16, 16]) as [vi, vj]:
A[vi, vj] = 0
with tir.block([16, 16]) as [vi, vj]:
A[vi, vj] = 0
for i, j in tir.grid(16, 16):
with tir.block([16, 16]) as [vi, vj]:
A[vi, vj] = 0
for i, j in tir.grid(16, 16):
with tir.block([16, 16]) as [vi, vj]:
A[vi, vj] = 0
def test_same_name_var():
func = var_with_same_name
out_str = tvm.script.asscript(func, True)
rt_func = tvm.script.from_source(out_str)
tvm.ir.assert_structural_equal(func, rt_func)
assert out_str.count("with tir.block([16, 16]) as [vi, vj]") == 4
assert out_str.find("vi_") == -1
assert out_str.find("vj_") == -1
assert out_str.count("for i0, i1 in tir.grid(16, 16)") == 2
assert out_str.find("i0_") == -1
assert out_str.find("i1_") == -1
assert out_str.count("for i, j in tir.grid(16, 16)") == 2
assert out_str.find("i_") == -1
assert out_str.find("i_") == -1
@tvm.script.tir
def while_loop(a: ty.handle, b: ty.handle) -> None:
A = tir.match_buffer(a, (16,), "float32")
B = tir.match_buffer(b, (16,), "float32")
i = tir.alloc_buffer((), "int32", scope="local")
with tir.block([16]) as [vi]:
B[vi] = 0
while i[()] < 10:
for j in range(16):
B[j] += A[j]
def test_while_loop():
rt_func = tvm.script.from_source(tvm.script.asscript(while_loop, True))
tvm.ir.assert_structural_equal(while_loop, rt_func)
if __name__ == "__main__":
sys.exit(pytest.main([__file__] + sys.argv[1:]))
|
[
"noreply@github.com"
] |
elphinkuo.noreply@github.com
|
854fa51141498bed3aae4d718f526074f037de86
|
a3bb788789181c7fa7b7dec1f997425334437087
|
/tests/test_logo_into_svg.py
|
469645e5aa632fb590b7cc0f997b540c508acd8b
|
[
"MIT"
] |
permissive
|
tekiela/loev3go
|
b7153ff01bf02a2a03ef489a8bbae1b1a24d4301
|
497f6bd0f9c13d19b3b638e356a143699019513b
|
refs/heads/master
| 2020-08-06T16:53:24.320837
| 2019-04-01T07:51:35
| 2019-04-01T07:51:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
#!/usr/bin/python3
# Run two hardcoded LOGO programs using and save the canvas afterwards.
import src.LogoIntoSVG
lis=src.LogoIntoSVG.LogoIntoSVG()
lis.run_logo_emit_svg(
"""to square
repeat 4 [ fd 10 rt 90 ]
end
square
""", "test-square.svg")
lis.run_logo_emit_svg(
"""to gen :lo :hi :step
make "x []
while [ :lo < (:hi+1) ] [ make "x lput :lo :x make "lo :lo + :step ]
output :x
end
for "l (gen 10 30 5) [repeat 5 [repeat 8 [fd :l rt 45] rt 72]]
""", "test-shape.svg")
|
[
"bojar@ufal.mff.cuni.cz"
] |
bojar@ufal.mff.cuni.cz
|
aef77fdb620292e37981149a8dd9a65be37cb592
|
9f56b67705bfcafef541773a4334815abbc53eb2
|
/methOverload.py
|
7af87df161663dc178ac293a2508f311c551c5f9
|
[] |
no_license
|
sukanyabag/Object-Oriented-Programming--Python
|
5527543cc89b2d8bf60173d5042bc52f9d5a98b2
|
e40cbc9ec099ca8194a79e6f5c3b784c982ba931
|
refs/heads/main
| 2023-07-18T20:10:42.981001
| 2021-09-12T14:27:01
| 2021-09-12T14:27:01
| 370,100,780
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,214
|
py
|
class OverloadDemo:
def multiply(self,a,b):
print(a*b)
def multiply(self,a,b,c):
print(a*b*c)
m=OverloadDemo()
m.multiply(5,10)
'''
op-
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-6-623185909d04> in <module>
5 print(a*b*c)
6 m=OverloadDemo()
----> 7 m.multiply(5,10)
TypeError: multiply() missing 1 required positional argument: 'c'
'''
'''
However, Python does not allow method overloading based on type, number or sequence of method parameters.
In Python, method overloading is a technique to define a method in such a way
that there are more than one way to call it. This is different from other programming languages.
'''
class methodOverloading :
def greeting(self, name=None):
if name is not None:
print(“Welcome “ + name)
else:
print(“Welcome”)
# Create an object referencing by variable ob
ob = methodOverloading()
# call the method greeting without parameter
ob.greeting()
# call the method with parameter
ob.greeting(‘Donald Trump’)
'''
Output:
Welcome
Welcome Donald Trump
'''
|
[
"noreply@github.com"
] |
sukanyabag.noreply@github.com
|
1affd7776d599b99f55b334bed89947f4fe0514d
|
320dab5b2295ed25f02f041b0901cd7451f45662
|
/cosmoslib/utils/mpi.py
|
3fd80b093b2759ca6c73c6ab76f9c94e148e1b39
|
[
"MIT"
] |
permissive
|
guanyilun/cosmoslib
|
fdf920ef580f58ada85ba2b3283c02a4924c48d5
|
fc035ac15b93ec6aaac08221be3ff32488476e1e
|
refs/heads/master
| 2023-03-03T14:17:34.735739
| 2023-02-28T20:29:41
| 2023-02-28T20:29:41
| 159,537,157
| 1
| 1
|
MIT
| 2021-12-14T13:30:49
| 2018-11-28T17:07:38
|
Python
|
UTF-8
|
Python
| false
| false
| 599
|
py
|
"""mpi wrapper imported from pixell to reduce dependency on pixell for
simple things. Notes from original script: Utilities for making mpi
use safer and easier.
"""
from __future__ import print_function
import sys, os, traceback
class FakeCommunicator:
def __init__(self):
self.size = 1
self.rank = 0
FAKE_WORLD = FakeCommunicator()
COMM_WORLD = FAKE_WORLD
COMM_SELF = FAKE_WORLD
disabled = True
try:
if not("DISABLE_MPI" in os.environ and os.environ["DISABLE_MPI"].lower() in ["true","1"]):
from mpi4py.MPI import *
disabled = False
except:
pass
|
[
"zoom.aaron@gmail.com"
] |
zoom.aaron@gmail.com
|
84a710c4d394127df391e8fe17826da9b9740378
|
22af2e48c0720878568e356c766dfb5cabc67a10
|
/mlrecobooks/plotter.py
|
6cd6125eaf958c95546972d9d18ae74ee06bb60a
|
[] |
no_license
|
samyBadjoudj/ml-reco-books-python
|
fd3d407b91772bce1e367a8644dcebdb17e7b64f
|
e3c3cbb3e134a96cb50d0b1762bd0073554f1a84
|
refs/heads/master
| 2022-09-15T02:31:31.555063
| 2020-06-03T13:25:02
| 2020-06-03T13:25:02
| 263,127,199
| 0
| 1
| null | 2020-06-03T13:25:03
| 2020-05-11T18:40:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import mlrecobooks.data_extractor as ml
def plot_distances(data):
distances_ = data["distances"] # sorted(data["distances"], key=lambda d: d["value"])
plt.yticks(np.arange(0, distances_[-1]["value"] + 2, step=0.5))
plt.bar(ml.get_book_distance_titles(distances_), ml.get_book_distance_values(distances_), align="center")
plt.show()
def plot_all_books_scatter(data):
print(data)
data_to_plot = ml.get_high_variance_categories_3d(data)
x_label = data_to_plot["x"]["name"]
y_label = data_to_plot["y"]["name"]
z_label = data_to_plot["z"]["name"]
centroid_to_plot = ml.get_centroid_feature_coordinates(data, x_label, y_label, z_label)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(z_label)
for i, (x, y, z) in enumerate(
zip(data_to_plot["x"]["values"], data_to_plot["y"]["values"], data_to_plot["z"]["values"])):
ax.text(x, y, z, data_to_plot["books"][i])
scat_books = ax.scatter(data_to_plot["x"]["values"], data_to_plot["y"]["values"], data_to_plot["z"]["values"],
c='r',
marker='o')
ax.text(centroid_to_plot["x"], centroid_to_plot["y"], centroid_to_plot["z"], "CENTROID")
centroid = ax.scatter(centroid_to_plot["x"], centroid_to_plot["y"], centroid_to_plot["z"], color="blue", marker="^")
ax.legend((scat_books, centroid), ("books", "centroid"))
plt.gcf().text(0.02, 0.5, "Centroid composition:\n-" + "\n-".join(data["favoritesBooks"]), fontsize=8)
plt.show()
|
[
"samy.badjoudj@gmail.com"
] |
samy.badjoudj@gmail.com
|
fab2f1755281a273c3b1b0d6f33ff645418908b6
|
38fa210fb605e7cfdc039aa2168e1224b6261884
|
/eve_prVoting/wsgi.py
|
4fd01c341a1458764a5d3a505c3ffb6278c47765
|
[] |
no_license
|
NCHUSG/eve_prVoting
|
c4ac639a1c36bc41465fc23069d7a1d1d0a7616e
|
d1cca39816804898021ff2cf397f67368bafa06f
|
refs/heads/master
| 2021-01-10T20:01:13.001055
| 2015-04-13T10:51:04
| 2015-04-13T10:51:04
| 33,862,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for eve_prVoting project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eve_prVoting.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[
"qas612820704@gmail.com"
] |
qas612820704@gmail.com
|
1dcec55534fb5df9bee784f3b8d9245a58335b79
|
f1d3dbcb8a1f979ddbe02dd24967e3c32a83f997
|
/33.py
|
45e0e9d08ee0f99b147123407eeafa3e5ec8ae8b
|
[] |
no_license
|
shenbufan/runoob-Python-100
|
212d8007f45317f4f9841abfa0fd9eec2ee0c31a
|
bd83a642052c64dc9eb95ca16e9a6c6df295619a
|
refs/heads/master
| 2020-03-14T05:40:34.779879
| 2018-04-29T05:50:34
| 2018-04-29T05:50:34
| 131,469,253
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 34
|
py
|
s='efbtbteb'
x=','.join(s)
print x
|
[
"chenleiy@foxmail.com"
] |
chenleiy@foxmail.com
|
b79e17aca4a083d90c9be00c429eebd71d5bfdfd
|
2031771d8c226806a0b35c3579af990dd0747e64
|
/pyobjc-framework-Cocoa/Examples/Foundation/Scripts/rendezvous.py
|
b9650105a6e032f47a25075b050bc659b61b2e6e
|
[
"MIT"
] |
permissive
|
GreatFruitOmsk/pyobjc-mirror
|
a146b5363a5e39181f09761087fd854127c07c86
|
4f4cf0e4416ea67240633077e5665f5ed9724140
|
refs/heads/master
| 2018-12-22T12:38:52.382389
| 2018-11-12T09:54:18
| 2018-11-12T09:54:18
| 109,211,701
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,399
|
py
|
#!/usr/bin/env python
"""
This script using NSNetServiceBrowser to look for local HTTP servers.
"""
from __future__ import print_function
import objc
from Foundation import NSObject, NSRunLoop, NSNetServiceBrowser, NSDate
objc.setVerbose(1)
class PrintingResolverDelegate(NSObject):
def netServiceDidResolveAddress_(self, service):
addresses = service.addresses()
if len(addresses) == 0:
return
print("%s.%s" % (service.name(), service.domain()))
for address in service.addresses():
print(" %s"%(address,))
print("")
service.setDelegate_(None)
def netService_didNotResolve_(self, service, didNotResolve):
print("didNotResolve",didNotResolve)
service.setDelegate_(None)
class PrintingBrowserDelegate(NSObject):
def startLookup(self):
self.delegates = []
for aNetService in self.services:
prd = PrintingResolverDelegate.new()
aNetService.setDelegate_(prd)
aNetService.resolve()
self.delegates.append(prd)
def netServiceBrowserWillSearch_(self, browser):
print("Browsing for advertised services...")
self.services = []
def netServiceBrowserDidStopSearch_(self, browser):
print("Browse complete")
self.startLookup()
def netServiceBrowser_didNotSearch_(self, browser, errorDict):
print("Could not search.")
def netServiceBrowser_didFindService_moreComing_(self, browser, aNetService, moreComing):
print("Found a service: %s %s"%(aNetService.name(), aNetService.domain()))
self.services.append(aNetService)
if not moreComing:
browser.stop()
def netServiceBrowser_didRemoveService_moreComing_(self, browser, aNetService, moreComing):
print("Service removed: %s"%(aNetService.name(),))
if not moreComing:
browser.stop()
def findDomains(serviceName, seconds=5.0):
runloop = NSRunLoop.currentRunLoop()
browser = NSNetServiceBrowser.new()
pbd = PrintingBrowserDelegate.new()
browser.setDelegate_(pbd)
browser.searchForServicesOfType_inDomain_(serviceName, "")
untilWhen = NSDate.dateWithTimeIntervalSinceNow_(seconds)
runloop.runUntilDate_(untilWhen)
if __name__ == '__main__':
# Use '_afpovertcp' instead of '_http' to look for fileservers.
findDomains("_afpovertcp._tcp")
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
ddb87afed26972677d497c7d11989d7ff15aa9a2
|
4b3a4c65e7a4d7cd4bb7f99e1391d06e290cbc67
|
/main.py
|
ddc112797b49deca1c522450aa94e9847cf5029a
|
[] |
no_license
|
muliarska/Vigenere_Cipher
|
313c2967f5f180a6e4ace181daf964aa27bcc24d
|
5650fdb09b37f0bc24ed46ddfc98316db9e17d03
|
refs/heads/master
| 2022-09-12T16:23:05.153252
| 2020-06-01T11:31:50
| 2020-06-01T11:31:50
| 268,071,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
"""Representing an APP"""
from flask import Flask, render_template, request
from vigenere_cipher import VigenereCipher
APP = Flask(__name__)
@APP.route('/', methods=['GET'])
def main():
"""Returns home page"""
return render_template('index.html')
@APP.route('/result', methods=['POST', 'GET'])
def result_page():
"""Returns result page with encoded or decoded
message"""
keyword = request.form['keyword']
choice = request.form['choice']
message = request.form['message']
cipher = VigenereCipher(keyword)
if choice == 'encode':
result = cipher.encode(message)
else:
result = cipher.decode(message)
if result is None:
return render_template('exception.html')
return render_template('result.html', result=result)
if __name__ == '__main__':
APP.run(port=8000)
|
[
"yana.muliarska@gmail.com"
] |
yana.muliarska@gmail.com
|
1e5d170703bb9e35e4cab163bb473ec204f9ff87
|
ccc1026b5156949d8a41a73ebe4ea648057a782c
|
/Answer_06.py
|
cd3804caf614d42b1ada3edadfe71b821165247a
|
[] |
no_license
|
joeljo2104/hacktoberfest_21_CP
|
c6e9da0200a969bbf1a3d2f5a74b34bb5b85a809
|
396e9fdc6fe565add161468729f9ea14a3025053
|
refs/heads/main
| 2023-09-01T12:20:19.730924
| 2021-10-21T09:19:14
| 2021-10-21T09:19:14
| 419,602,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
for t in range(int(input())):
n = int(input())
L = [int(s) for s in input().split()]
R = [int(s) for s in input().split()]
Index = []
for i in range(n):
Index.append( (L[i] * R[i] , R[i], i) )
Result = sorted(Index, key = lambda e: (e[0], e[1], n-e[2]), reverse = True)
print(Result[0][-1]+1)
|
[
"42415617+joeljo2104@users.noreply.github.com"
] |
42415617+joeljo2104@users.noreply.github.com
|
48db549a284d5650092a3311dfe71b784812afe2
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/foodDistribution_20200629175740.py
|
3fc43f7ad7607fb8e59d6e03cc628ae97433ce9f
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 975
|
py
|
def food(arr):
# removes the item at index 0
sandwiches = arr.pop(0)
while sandwiches > 0:
highest = -1
maxred = -1
for i in range(len(arr)):
if arr[i] > 0:
currDiff = 0
if i > 0:
currDiff = currDiff + abs(arr[i]-arr[i-1])
if i < len(arr)-1:
currDiff = currDiff + abs(arr[i] - arr[i+1])
newDiff = 0
if i > 0:
newDiff += abs(arr[i]-1 - arr[i-1])
if i < len(arr)-1:
newDiff = abs(arr[i]-1 - arr[i+1])
red = currDiff - newDiff
if red > maxred :
highest = i
maxred = red
if highest == -1:
return 0
else:
arr[highest] = arr[highest] - 1
sand -=1
diff = 0
for i in range(len(arr))
print(arr)
food([5, 3, 1, 2, 1])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
7003566794da2f9b7dced58889d7079aee2c4e80
|
506a5dbf6818ba64efa3b411a7297d8cfee3695c
|
/Capstone2/parallax.py
|
d84934cd1c7902016549663314f11ef637b0850b
|
[] |
no_license
|
Its-a-me-Ashwin/2Dto3D
|
5af0c57aebb89cf7c1aacddacfba94706728422b
|
664a456b14b5e8528305c0145ff0e8bdfaf8ad46
|
refs/heads/master
| 2023-03-21T16:10:33.524352
| 2021-03-09T11:56:56
| 2021-03-09T11:56:56
| 289,682,081
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,544
|
py
|
from detect import getCameraPosition,getDistanceToMarker
import cv2
import numpy as np
from math import sqrt,sin,cos,tan
convert = 3.1415/180.0
# measured in (cm)
# angles in radians
camDict = {
"f" : 20,
"view":(57,43),
"res" : (640,480)
}
def rotatePoint(x,y,z):
rotations = np.array([
[cos(x)*cos(y),cos(x)*sin(y)*sin(z)-sin(x)*cos(z),cos(x)*sin(y)*cos(z)+sin(x)*sin(z)],
[sin(x)*cos(y),sin(x)*sin(y)*sin(z)+cos(x)*cos(z),sin(x)*sin(y)*cos(z)-cos(x)*sin(z)],
[-sin(y),cos(y)*sin(z),cos(y)*cos(z)]
])
return rotations
def rotate(x,y,z,points):
out = list()
rotationalMatrix = rotatePoint(x,y,z)
for i in range(points.shape[0]):
out.append(np.matmul(rotationalMatrix,points[i]))
out = np.array(out)
return out
def translatePoint(x,y,z):
'''
Makes the translation matrix
'''
translationMatrix = np.array([
[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[x,y,z,1],
])
return translationMatrix
def translate(x,y,z,points):
'''
translate a given set of points about x,y,z axis
'''
translationMatrix = translatePoint(x,y,z)
out = list(map(lambda point:np.matmul(np.append(point,[1.0]),translationMatrix)[:-1],points))
out = np.array(out)
return out
def makeRTMatrices(data1,data2):
data = np.array(data2)-np.array(data1)
T = np.array([data[0],data[1],data[2]]).reshape((1,3))[0]
T = np.array(
[
[0.0,-T[2],T[1]],
[T[2],0.0,-T[0]],
[-T[1],T[0],0.0]
])
R = rotatePoint(data[3],data[4],data[5])
return T,R
def makeEsentialMatrix(data1,data2):
data = np.array(data2)-np.array(data1)
T = np.array([data[0],data[1],data[2]]).reshape((1,3))[0]
T = np.array(
[
[0.0,-T[2],T[1]],
[T[2],0.0,-T[0]],
[-T[1],T[0],0.0]
])
R = rotatePoint(data[3],data[4],data[5])
if not (np.all(abs(np.matmul(R,np.transpose(R))-np.identity(3,dtype=R.dtype))<1e-6)):
print("Error")
return
return np.matmul(R,T)
def camera2canvas(camStuff):
xAngleRange = np.arange(-camStuff["view"][0]/2,camStuff["view"][0]/2,
camStuff["view"][0]/camStuff["res"][0])
yAngleRange = np.arange(-camStuff["view"][1]/2,camStuff["view"][1]/2,
camStuff["view"][1]/camStuff["res"][1])
xAngleRange = xAngleRange * (3.1415/180)
yAngleRange = yAngleRange * (3.1415/180)
coordWRTC = np.zeros((camStuff["res"][0]*camStuff["res"][1],3))
for i in range(camStuff["res"][0]):
for j in range(camStuff["res"][1]):
coordWRTC[i*camStuff["res"][1]+j] = np.array([
camStuff["f"]*tan(xAngleRange[i]),
camStuff["f"]*tan(xAngleRange[j]),
camStuff["f"]
])
return coordWRTC
p1 = '1.jpg'
p2 = '2.jpg'
img1 = cv2.imread(p1,0)
img2 = cv2.imread(p2,0)
# get canvas coordinates
coordWRTC = camera2canvas(camDict)
# get camera position
ret1 = getCameraPosition(img1,6,13.5)
ret2 = getCameraPosition(img2,6,13.5)
# get Essential Matrix
#E = makeEsentialMatrix(ret1,ret2)
# get projected coordinates
coordWRTC1 = rotate(ret1[3],ret1[4],ret1[5],coordWRTC)
coordWRTC1 = translate(ret1[0],ret1[1],ret1[2],coordWRTC)
coordWRTC2 = rotate(ret2[3],ret2[4],ret2[5],coordWRTC)
coordWRTC2 = translate(ret2[0],ret2[1],ret2[2],coordWRTC)
#for i in range(img1.shape[0]):
# for j in range(img1.shape[1]):
|
[
"noreply@github.com"
] |
Its-a-me-Ashwin.noreply@github.com
|
cd4159d17f93ad7f0881371ae0c2741aa2ba0c65
|
d26c51a13125e038194bd296cdefbdc6189e718e
|
/pip_services_runtime/clients/__init__.py
|
e50653cced8b6b22de66112b5d09d1a28a824c02
|
[
"MIT"
] |
permissive
|
pip-services-archive/pip-services-runtime-python
|
0ec2f61ca800d67ed2221a9b18362346c4ae97c5
|
70eca1ffc44bfdc45c9c65b0ee347fa578368849
|
refs/heads/master
| 2020-05-20T18:32:44.087193
| 2017-03-10T01:22:18
| 2017-03-10T01:22:18
| 84,504,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
# -*- coding: utf-8 -*-
"""
pip_services_runtime.clients.__init__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Dependency clients module initialization
:copyright: Digital Living Software Corp. 2015-2016, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
__all__ = ['AbstractClient', 'DirectClient', 'RestClient']
from .AbstractClient import AbstractClient
from .DirectClient import DirectClient
from .RestClient import RestClient
|
[
"seroukhov@gmail.com"
] |
seroukhov@gmail.com
|
6e2bdc1aaa30cf640a7dcc0ed90c80e6d4711d72
|
851bf93c8a3970f16f335a33cb713132024904ef
|
/Day00-14/code/Day10/ex4.py
|
06f1f654bb152f32adf73322d5fab1163c9f8d14
|
[] |
no_license
|
zujl123/Python-100Days
|
dbfb3c9bca4cee08147bb92b0efe2b050c04d2e9
|
3d77b6c7fb8e730ecd82e3327978309474a3fa33
|
refs/heads/master
| 2023-05-30T13:41:24.898106
| 2020-09-26T01:15:11
| 2020-09-26T01:15:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
"""
引发异常和异常栈
Date: 2018-03-13
"""
def f1():
raise AssertionError('发生异常')
def f2():
f1()
def f3():
f2()
f3()
|
[
"skygit@126.com"
] |
skygit@126.com
|
3280d35e4105c391611078f28c6b528dd7482b2a
|
f7110aaab742fc92179302c5874691078ed05158
|
/the_wall/the_wall_app/migrations/0001_initial.py
|
d1132291b5efbbd216af46aa6b7d31072de7f3b7
|
[] |
no_license
|
Moha327/python_extra
|
0f9252a46a652ffe83d97cd0d6906a1835c2abbf
|
a3e1b31831578484c651d76bfd01173fe9d6eb10
|
refs/heads/master
| 2023-05-23T14:03:21.962212
| 2021-06-16T13:50:06
| 2021-06-16T13:50:06
| 377,511,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,102
|
py
|
# Generated by Django 2.2.4 on 2021-05-28 16:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('messages', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message', to='the_wall_app.User')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment', to='the_wall_app.Message')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment', to='the_wall_app.User')),
],
),
]
|
[
"m7amad9595@outlook.com"
] |
m7amad9595@outlook.com
|
38fbdd5bfe0da831b87a2a757f0547cacfffc892
|
954973c1ddaff15347e27ce45a72559bbef1db1f
|
/api_wrapper/libpixyusb_swig/get_blocks.py
|
bfa49d18b1becd7498288b167ef72fc82d28a4d9
|
[] |
no_license
|
Zaki-/api_wrapper
|
aa4ae37c26c67efebb9d8d64a17b9392c89a03e0
|
c9d67606839fc6b2c298d46f1a605eb40287fda7
|
refs/heads/master
| 2021-01-10T14:46:10.805674
| 2015-12-10T06:35:34
| 2015-12-10T06:35:34
| 47,715,131
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 948
|
py
|
from pixy import *
from ctypes import *
import ctypes
import api
import os
import time
import sys
import struct
# Pixy Python SWIG get blocks example #
print ("Pixy Python SWIG Example -- Get Blocks")
# Initialize Pixy Interpreter thread #
pixy_init()
class Blocks (Structure):
_fields_ = [ ("type", c_uint),
("signature", c_uint),
("x", c_uint),
("y", c_uint),
("width", c_uint),
("height", c_uint),
("angle", c_uint) ]
blocks = BlockArray(100)
frame = 0
# Wait for blocks #
while 1:
count = pixy_get_blocks(100, blocks)
if count > 0:
# Blocks found #
print 'frame %3d:' % (frame)
frame = frame + 1
for index in range (0, count):
print '[BLOCK_TYPE=%d SIG=%d X=%3d Y=%3d WIDTH=%3d HEIGHT=%3d]' % (blocks[index].type, blocks[index].signature, blocks[index].x, blocks[index].y, blocks[index].width, blocks[index].height)
|
[
"pi@MIN.(none)"
] |
pi@MIN.(none)
|
a2ae5772aab2ba8ec97cdaaf08f66b953a566ba6
|
90e986d37d83ceabddd8d6300f09a0acea3772f8
|
/prepare_ligand4.py
|
91dd62f26fa319d7260ee7fd0fce2baa8b2032cd
|
[] |
no_license
|
AmauryOvalleMaqueo/Project_week3
|
a8b2daf2a584d0b160bfa8671fda90d495aaf828
|
af9fb41af4d3ef09c87137bb618bb7a39de52d5d
|
refs/heads/master
| 2021-05-05T01:02:28.071956
| 2018-01-30T11:03:19
| 2018-01-30T11:03:19
| 119,523,084
| 0
| 0
| null | 2018-01-30T10:52:18
| 2018-01-30T10:52:18
| null |
UTF-8
|
Python
| false
| false
| 9,228
|
py
|
#!/usr/bin/env /home/test/MGLTools-1.5.6/bin/pythonsh
#
#
#
# $Header: /opt/cvs/python/packages/share1.5/AutoDockTools/Utilities24/prepare_ligand4.py,v 1.10 2010/07/31 00:14:13 rhuey Exp $
#
import os
from MolKit import Read
from AutoDockTools.MoleculePreparation import AD4LigandPreparation
if __name__ == '__main__':
import sys
import getopt
def usage():
"Print helpful, accurate usage statement to stdout."
print "Usage: prepare_ligand4.py -l filename"
print
print " Description of command..."
print " -l ligand_filename (.pdb or .mol2 or .pdbq format)"
print " Optional parameters:"
print " [-v] verbose output"
print " [-o pdbqt_filename] (default output filename is ligand_filename_stem + .pdbqt)"
print " [-d] dictionary to write types list and number of active torsions "
print " [-A] type(s) of repairs to make:\n\t\t bonds_hydrogens, bonds, hydrogens (default is to do no repairs)"
print " [-C] do not add charges (default is to add gasteiger charges)"
print " [-p] preserve input charges on atom type, eg -p Zn"
print " (default is not to preserve charges on any specific atom type)"
print " [-U] cleanup type:\n\t\t nphs_lps, nphs, lps, '' (default is 'nphs_lps') "
print " [-B] type(s) of bonds to allow to rotate "
print " (default sets 'backbone' rotatable and 'amide' + 'guanidinium' non-rotatable)"
print " [-R] index for root"
print " [-F] check for and use largest non-bonded fragment (default is not to do this)"
print " [-M] interactive (default is automatic output)"
print " [-I] string of bonds to inactivate composed of "
print " of zero-based atom indices eg 5_13_2_10 "
print " will inactivate atoms[5]-atoms[13] bond "
print " and atoms[2]-atoms[10] bond "
print " (default is not to inactivate any specific bonds)"
print " [-Z] inactivate all active torsions "
print " (default is leave all rotatable active except amide and guanidinium)"
print " [-g] attach all nonbonded fragments "
print " [-s] attach all nonbonded singletons: "
print " NB: sets attach all nonbonded fragments too"
print " (default is not to do this)"
# process command arguments
try:
opt_list, args = getopt.getopt(sys.argv[1:], 'l:vo:d:A:Cp:U:B:R:MFI:Zgsh')
except getopt.GetoptError, msg:
print 'prepare_ligand4.py: %s' %msg
usage()
sys.exit(2)
# initialize required parameters
#-l: ligand
ligand_filename = None
# optional parameters
verbose = None
add_bonds = False
#-A: repairs to make: add bonds and/or hydrogens
repairs = ""
#-C default: add gasteiger charges
charges_to_add = 'gasteiger'
#-p preserve charges on specific atom types
preserve_charge_types=''
#-U: cleanup by merging nphs_lps, nphs, lps
cleanup = "nphs_lps"
#-B named rotatable bond type(s) to allow to rotate
#allowed_bonds = ""
allowed_bonds = "backbone"
#-r root
root = 'auto'
#-o outputfilename
outputfilename = None
#-F check_for_fragments
check_for_fragments = False
#-I bonds_to_inactivate
bonds_to_inactivate = ""
#-Z inactivate_all_torsions
inactivate_all_torsions = False
#-g attach_nonbonded_fragments
attach_nonbonded_fragments = False
#-s attach_nonbonded_singletons
attach_singletons = False
#-m mode
mode = 'automatic'
#-d dictionary
dict = None
#'l:vo:d:A:CKU:B:R:MFI:Zgs'
for o, a in opt_list:
#print "o=", o, " a=", a
if o in ('-l', '--l'):
ligand_filename = a
if verbose: print 'set ligand_filename to ', a
if o in ('-v', '--v'):
verbose = True
if verbose: print 'set verbose to ', True
if o in ('-o', '--o'):
outputfilename = a
if verbose: print 'set outputfilename to ', a
if o in ('-d', '--d'):
dict = a
if verbose: print 'set dict to ', a
if o in ('-A', '--A'):
repairs = a
if verbose: print 'set repairs to ', a
if o in ('-C', '--C'):
charges_to_add = None
if verbose: print 'do not add charges'
if o in ('-p', '--p'):
preserve_charge_types+=a
preserve_charge_types+=','
if verbose: print 'preserve initial charges on ', preserve_charge_types
if o in ('-U', '--U'):
cleanup = a
if verbose: print 'set cleanup to merge ', a
if o in ('-B', '--B'):
allowed_bonds = a
if verbose: print 'allow ', a, 'bonds set to rotate'
if o in ('-R', '--R'):
root = a
if verbose: print 'set root to ', root
if o in ('-F', '--F'):
check_for_fragments = True
if verbose: print 'set check_for_fragments to True'
if o in ('-M', '--M'):
mode = a
if verbose: print 'set mode to ', a
if o in ('-I', '--I'):
bonds_to_inactivate = a
if verbose: print 'set bonds_to_inactivate to ', a
if o in ('-Z', '--Z'):
inactivate_all_torsions = True
if verbose: print 'set inactivate_all_torsions to ', inactivate_all_torsions
if o in ('-g', '--g'):
attach_nonbonded_fragments = True
if verbose: print 'set attach_nonbonded_fragments to ', attach_nonbonded_fragments
if o in ('-s', '--s'):
attach_singletons = True
if verbose: print 'set attach_singletons to ', attach_singletons
if o in ('-h', '--'):
usage()
sys.exit()
if not ligand_filename:
print 'prepare_ligand4: ligand filename must be specified.'
usage()
sys.exit()
if attach_singletons:
attach_nonbonded_fragments = True
if verbose: print "using attach_singletons so attach_nonbonded_fragments also"
mols = Read(ligand_filename)
if verbose: print 'read ', ligand_filename
mol = mols[0]
if len(mols)>1:
if verbose:
print "more than one molecule in file"
#use the one molecule with the most atoms
ctr = 1
for m in mols[1:]:
ctr += 1
if len(m.allAtoms)>len(mol.allAtoms):
mol = m
if verbose:
print "mol set to ", ctr, "th molecule with", len(mol.allAtoms), "atoms"
coord_dict = {}
for a in mol.allAtoms: coord_dict[a] = a.coords
mol.buildBondsByDistance()
if charges_to_add is not None:
preserved = {}
preserved_types = preserve_charge_types.split(',')
for t in preserved_types:
if not len(t): continue
ats = mol.allAtoms.get(lambda x: x.autodock_element==t)
for a in ats:
if a.chargeSet is not None:
preserved[a] = [a.chargeSet, a.charge]
if verbose:
print "setting up LPO with mode=", mode,
print "and outputfilename= ", outputfilename
print "and check_for_fragments=", check_for_fragments
print "and bonds_to_inactivate=", bonds_to_inactivate
LPO = AD4LigandPreparation(mol, mode, repairs, charges_to_add,
cleanup, allowed_bonds, root,
outputfilename=outputfilename,
dict=dict, check_for_fragments=check_for_fragments,
bonds_to_inactivate=bonds_to_inactivate,
inactivate_all_torsions=inactivate_all_torsions,
attach_nonbonded_fragments=attach_nonbonded_fragments,
attach_singletons=attach_singletons)
#do something about atoms with too many bonds (?)
#FIX THIS: could be peptide ligand (???)
# ??use isPeptide to decide chargeSet??
if charges_to_add is not None:
#restore any previous charges
for atom, chargeList in preserved.items():
atom._charges[chargeList[0]] = chargeList[1]
atom.chargeSet = chargeList[0]
if verbose: print "returning ", mol.returnCode
bad_list = []
for a in mol.allAtoms:
if a in coord_dict.keys() and a.coords!=coord_dict[a]:
bad_list.append(a)
if len(bad_list):
print len(bad_list), ' atom coordinates changed!'
for a in bad_list:
print a.name, ":", coord_dict[a], ' -> ', a.coords
else:
if verbose: print "No change in atomic coordinates"
if mol.returnCode!=0:
sys.stderr.write(mol.returnMsg+"\n")
sys.exit(mol.returnCode)
# To execute this command type:
# prepare_ligand4.py -l pdb_file -v
|
[
"a.ovalle.maqueo@student.rug.nl"
] |
a.ovalle.maqueo@student.rug.nl
|
7fd6b79f34e457b9696acf97e858fbd5b3c61b9a
|
6c5963f7943faa1662f89a48da16d132664bf704
|
/test1.py
|
27039db9885167ec5f14341f3c669eeea2875e0f
|
[] |
no_license
|
ArmGono/exepermental
|
292f0ff622f6f3c67a3960ac733260289967813e
|
51a4956ac7e5c8e0d01db21ff657dc05a939e248
|
refs/heads/master
| 2021-05-06T00:23:58.071778
| 2018-01-12T14:57:21
| 2018-01-12T14:57:21
| 117,253,464
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
from Tkinter import *
def donothing():
filewin = Toplevel(root)
button = Button(filewin, text="Do nothing button")
button.pack()
root = Tk()
menubar = Menu(root)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="New", command=donothing)
filemenu.add_command(label="Open", command=donothing)
filemenu.add_command(label="Save", command=donothing)
filemenu.add_command(label="Save as...", command=donothing)
filemenu.add_command(label="Close", command=donothing)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=root.quit)
menubar.add_cascade(label="File", menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label="Undo", command=donothing)
editmenu.add_separator()
editmenu.add_command(label="Cut", command=donothing)
editmenu.add_command(label="Copy", command=donothing)
editmenu.add_command(label="Paste", command=donothing)
editmenu.add_command(label="Delete", command=donothing)
editmenu.add_command(label="Select All", command=donothing)
menubar.add_cascade(label="Edit", menu=editmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="Help Index", command=donothing)
helpmenu.add_command(label="About...", command=donothing)
menubar.add_cascade(label="Help", menu=helpmenu)
root.config(menu=menubar)
root.mainloop()
|
[
"admin@armrus.net"
] |
admin@armrus.net
|
407083d7eb61e9434b8c2bba66744abbcb075fc0
|
bf0b9cf4eeff63cf69c527d6428ed09dbd27a97c
|
/signaldemo/signal_demo_app/models.py
|
5a3eb57ab847c2453cc3e7c3cf45a737591f32f7
|
[] |
no_license
|
sammaurya/django_signal_demo
|
2d01156b03dbed222c12e81aea92cdd243f2b72d
|
d91e99d36ef8c0989dbbe692af35542791da9f77
|
refs/heads/master
| 2022-10-27T10:00:14.180115
| 2020-06-01T11:27:24
| 2020-06-01T11:27:24
| 268,501,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
from django.db import models
# Create your models here.
class UserProfile(models.Model):
first_name = models.CharField(max_length=150)
last_name = models.CharField(max_length=150)
username = models.CharField(max_length=150, primary_key=True)
email = models.EmailField()
created_by = models.DateTimeField(auto_now=True)
updated_by = models.DateTimeField(auto_now_add=True)
def full_name(self):
return self.first_name + " " + self.last_name;
class Book(models.Model):
author = models.ManyToManyField(UserProfile)
title = models.CharField(max_length=250)
|
[
"sammaurya196@gmail.com"
] |
sammaurya196@gmail.com
|
1495e55cfb32899ec170afb7f2b156c62356f21c
|
09376d059b8898ff637c4e4190619bf0ca8f536a
|
/python_scripts/standard_star.py
|
84f2fdee2080818d2fc2c5619a7e311c4bccc44d
|
[] |
no_license
|
samvaughan/KMOS_reduction
|
d954917db2242641d8c2f3447e3d09399245f3c8
|
7c4358c84e326a2829b01364ce3bc3826acaf9b7
|
refs/heads/master
| 2021-01-18T15:54:13.349330
| 2017-08-15T15:22:49
| 2017-08-15T15:22:49
| 100,390,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,595
|
py
|
"""
Take a folder containing observations of a telluric star.
Make a .sof file of those observations, as well as the required calibrations.
Make the requried output directories.
Run the esorex recipe kmos_std_star with that .sof file
"""
import sys
import os
import argparse
from astropy.io import fits
import glob
import logging
from optparse import OptionParser
from KMOS_tools import kmos_functions as KF
if __name__=='__main__':
parser = argparse.ArgumentParser(description="KMOS Standard Star Reduction Script")
parser.add_argument('reduced_file_destination', type=str, help='Location for reduced standard star cubes')
parser.add_argument('input_files', type=str, help='Location of standard star files')
parser.add_argument('calibration_data_location', type=str, help='Location of (dynamic) calibration files: XCAL, YCAL, LCAL, MASTER_FLAT')
parser.add_argument('--static_calib_location', type=str, help='Optional: Location of static calibration files (WAVE_BAND). Otherwise assume /Data/KCLASH/Data/Static_Cals/cal/')
args=parser.parse_args()
reduced_file_destination=os.path.abspath(args.reduced_file_destination)
file_location=os.path.abspath(args.input_files)
calibration_data_location=os.path.abspath(args.calibration_data_location)
static_calib_location=args.static_calib_location
if static_calib_location is not None:
kmos_static_calib_directory=os.path.abspath(static_calib_location)
else:
kmos_static_calib_directory='/Data/KCLASH/Data/Static_Cals/cal'
#Optional arguments. Code left over from original script and isn't used any more
usage = "usage: %prog [options] data_dir"
parser = OptionParser(usage=usage, description="KMOS Calibration Data Generation Script")
parser.add_option("-p", "--parallel", action="store_true", dest="parallel", default=False, help="Parallel execution of esorex")
parser.add_option("-d", "--description", action="store_true", dest="description", default=False, help="Detailed Description")
parser.add_option("-b", "--band", default="All", help="Band that needs to be reduced (H, K, HK, YJ, IZ) [default: %default]")
(options, args) = parser.parse_args()
logging.info("Parallel Mode: {0}".format(options.parallel))
logging.info("Description required: {0}".format(options.description))
logging.info("Desired Band: {0}".format(options.band))
# Loop on all files in the input directory
star_list = []
for file in glob.glob(file_location+"/KMOS*star*.fits"):
# Read the Primary header
fname=os.path.abspath(file)
hdu = fits.getheader(fname, 0)
tpl_id = hdu['HIERARCH ESO TPL ID']
# Only keep the proper TPL.ID
if tpl_id in ["KMOS_spec_cal_stdstar"]:
star_list.append({ 'name': fname,
'tpl_id': tpl_id,
'tpl_start': hdu['HIERARCH ESO TPL START'],
'tpl_nexp': hdu['HIERARCH ESO TPL NEXP'],
'tpl_expno': hdu['HIERARCH ESO TPL EXPNO'],
'dpr_type': hdu['HIERARCH ESO DPR TYPE'],
'obs_start': hdu['HIERARCH ESO OBS START'],
'band': hdu['HIERARCH ESO INS GRAT1 ID']})
#if not os.path.exists("{}/*STAR_SPEC*.fits")
KF.multiple_log("Standard Star Reduction")
KF.reduce_std_star(reduced_file_destination, calibration_data_location, star_list, 'kmos_std_star', options, reduced_dark_folder=None, reduced_flat_folder=None, kmos_static_calib_directory=kmos_static_calib_directory)
|
[
"sam.vaughan@physics.ox.ac.uk"
] |
sam.vaughan@physics.ox.ac.uk
|
bb8ce9acbb13856c54c27b49ea8749c8003a528e
|
0a181da79fbd1354d5cad0e3c6abafff7011006b
|
/week9/day04.py
|
3f32dd3dad9f088eaf3133087bbe637f9e081b8a
|
[] |
no_license
|
vibhor-vibhav-au6/APJKalam
|
14b08520f3fd9b08cc700d130e4c278ffce90c50
|
bf747787d5585df4e48b7cc9bf4ca8a353a81aa3
|
refs/heads/master
| 2023-06-05T06:07:49.523004
| 2021-06-22T04:37:24
| 2021-06-22T04:37:24
| 365,747,187
| 7
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
'''Find whether an array is subset of another array:'''
def subsetHelper(arr1,arr2):
# arr1 = [1,2,3]
# arr2 = [2,2,2,3]
for i in arr1:
if arr2.count(i) < arr1.count(i):
return False
return True
def subset(arr1, arr2):
if len(arr1) >= len(arr2):
return subsetHelper(arr2,arr1)
else:
return subsetHelper(arr1,arr2)
a = [11, 1, 13, 21, 3, 7]
b = [11, 3, 7, 1, 13, 21]
# print (subset(a,b))
'''Sort an array of 0s, 1s and 2s'''
def sort012(arr):
return [0 for i in range(arr.count(0))]+[1 for i in range(arr.count(1))]+[2 for i in range(arr.count(2))]
# print(sort012([0, 1, 2, 0, 1, 2]))
'''Sort an array in wave form
'''
def waveSort(arr, n):
for i in range(0, n, 2):
if (i> 0 and arr[i] < arr[i-1]):
arr[i],arr[i-1] = arr[i-1],arr[i]
if (i < n-1 and arr[i] < arr[i+1]):
arr[i],arr[i+1] = arr[i+1],arr[i]
|
[
"53352793+vibhor-vibhav-au6@users.noreply.github.com"
] |
53352793+vibhor-vibhav-au6@users.noreply.github.com
|
f5baa97cee2609c7e575f7e42302d4e1c8060fe1
|
c214cf3758518d58aa420ba287888ffecf5cf981
|
/scripts/twosides/twosides.py
|
fb05ae121a0dd257c0a7eb7820d369f431f73814
|
[] |
no_license
|
Fimwu7/gnn-ddi-ibm-umass
|
df2542b14a96288e1dc237ea6bb8f4fc51cf1a3d
|
d325e10fa4cb47d86ec77ce759170163d76115f3
|
refs/heads/main
| 2023-05-23T15:13:13.621106
| 2021-06-21T00:37:11
| 2021-06-21T00:37:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,609
|
py
|
r""" Replace drug ids in twosides database with corresponding drugbank ids.
"""
import pandas as pd
import numpy as np
TWOSIDES = '../../data/twosides/TWOSIDES.csv'
TWOSIDES_TO_DB = '../../data/twosides/rxnorm-drugbank-omop-mapping-CLEANED.tsv'
NEW_TWOSIDES = '../../data/twosides/TWOSIDES_DB.csv'
def main():
with open(TWOSIDES, 'r') as twosides:
with open(TWOSIDES_TO_DB, 'r') as twosides_to_db:
twosides_lines = twosides.readlines()[1:]
two_db_map_lines = twosides_to_db.readlines()[1:]
# get only the needed columns from twosides
twosides_split = [a.strip().split(',') for a in twosides_lines]
twosides_clean = [(a[0], a[2], a[4]) for a in twosides_split]
for i in range(5):
print(twosides_clean[5])
# make a mapping from rx to db
two_db_map_split = [
a.strip().split('\t') for a in two_db_map_lines
]
for i in range(5):
print(two_db_map_split[i])
two_db_map = dict([(a[0], a[2]) for a in two_db_map_split])
new_twosides = open(NEW_TWOSIDES, 'w+')
new_twosides.write('d1,d2,rel\n')
for d1, d2, rel in twosides_clean:
new_d1, new_d2 = d1, d2
if d1 in two_db_map:
new_d1 = two_db_map[d1]
if d2 in two_db_map:
new_d2 = two_db_map[d2]
new_twosides.write(','.join([new_d1, new_d2, rel]) + '\n')
new_twosides.close()
if __name__ == '__main__':
main()
|
[
"rishabhgupta@umass.edu"
] |
rishabhgupta@umass.edu
|
eca6db22f7240cc2fe93f3c8294c9566e37070a4
|
33a6519cf8dd7e7a1c54e2ecc7335a31055dd1ca
|
/src/test_arm/arm/listener.py
|
a490e14b80f2991832012c4d338e8173b5e5c208
|
[] |
no_license
|
joehjhuang/team4_arm_ws
|
c014cee5d16a8cc6057b90474ade391f169bb8f9
|
0f67d0b0dc3891d85fc5a8fb2321fe2ea064a8ca
|
refs/heads/master
| 2021-08-23T15:45:24.026335
| 2017-12-05T13:42:12
| 2017-12-05T13:42:12
| 112,145,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
def callback(data):
rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data)
def listener():
rospy.init_node('listener',anonymous=True)
rospy.Subscriber("chatter",String,callback)
rospy.spin()
if __name__ == '__main__':
listener()
|
[
"joehuang@mit.edu"
] |
joehuang@mit.edu
|
57242d8a647160217b314b3932dcfd3d2e9e382e
|
1ef8931c3daec617cf06799246e346146d500135
|
/diypedia/api/views.py
|
8cacc18306b5a41cdc32283e1130d70b7d792299
|
[] |
no_license
|
allmy3/DIYpedia-clone-Django
|
b17ad383be5662685a0a4ed34b0ce03fdef992fb
|
238e4bdf72bfd9ab342b731d244318bf1232a21f
|
refs/heads/main
| 2023-07-13T22:06:00.322249
| 2021-08-12T14:07:15
| 2021-08-12T14:07:15
| 395,338,953
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
from rest_framework.response import Response
from rest_framework.views import APIView
from posts.models import *
from users.models import *
from .serializers import PostListSerializer, PostDetailSerializer, CategoryCreateSerializer, CategorySerializer
class PostListAPIview(APIView):
def get(self, request):
queryset = Post.objects.all()
serializer_for_qs = PostListSerializer(
instance=queryset,
many=True
)
return Response(serializer_for_qs.data)
class PostDetailAPIview(APIView):
def get(self, request, pk):
queryset = Post.objects.get(id=pk)
serializer_for_qs = PostDetailSerializer(
instance=queryset,
)
return Response(serializer_for_qs.data)
class CategoryListAPIview(APIView):
def get(self, request):
queryset = Category.objects.all()
serializer_for_qs = CategorySerializer(
instance=queryset,
many=True
)
return Response(serializer_for_qs.data)
class CategoryCreateAPIview(APIView):
def post(self, request):
category = CategoryCreateSerializer(data=request.data)
if category.is_valid():
category.save()
return Response(status=201)
|
[
"79646356+allmy3@users.noreply.github.com"
] |
79646356+allmy3@users.noreply.github.com
|
426da1a0244539ea8cc8154d7640df96e5aef3fc
|
b23a2f17713479e4116f4a32a7e961900e8cc5f6
|
/getfile.py
|
3351489704aaabfe2576e20de72ba4e39793a724
|
[] |
no_license
|
nkoster/gpgchat
|
065b47aaa3203d95541b60015b2e93be84b323ec
|
aaf9b68334b731da7be2488c7dfb4249ad869f60
|
refs/heads/master
| 2020-03-23T12:31:23.167382
| 2018-07-24T00:12:12
| 2018-07-24T00:12:12
| 141,564,215
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,509
|
py
|
import gi
import os
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GObject
class GetFile(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Choose a File")
origin = os.path.dirname(os.path.realpath(__file__))
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.add(vbox)
self.progressbar = Gtk.ProgressBar()
vbox.pack_start(self.progressbar, True, True, 0)
image = Gtk.Image.new_from_file(origin + '/upload.png')
vbox.add(image)
self.timeout_id = GObject.timeout_add(5, self.on_timeout, None)
self.activity_mode = False
self.label1 = Gtk.Label('<No file selected>')
vbox.add(self.label1)
button1 = Gtk.Button("Choose File")
button1.connect("clicked", self.on_file_clicked)
vbox.add(button1)
self.t = 0.0005
self.forward = True
def on_timeout(self, user_data):
if self.activity_mode:
self.progressbar.pulse()
else:
new_value = self.progressbar.get_fraction() + self.t
if new_value > 1:
self.t = -0.0005
if self.forward:
self.progressbar.set_inverted(True)
self.forward = False
else:
self.progressbar.set_inverted(False)
self.forward = True
if new_value < 0:
self.t = 0.0005
#self.progressbar.pulse()
self.progressbar.set_fraction(new_value)
return True
def on_file_clicked(self, data):
dialog = Gtk.FileChooserDialog("Please choose a file", self,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
self.add_filters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
#print("Open clicked")
print(dialog.get_filename())
self.label1.set_text(dialog.get_filename())
Gtk.main_quit()
elif response == Gtk.ResponseType.CANCEL:
print("Cancel clicked")
dialog.destroy()
def add_filters(self, dialog):
filter_any = Gtk.FileFilter()
filter_any.set_name("Any files")
filter_any.add_pattern("*")
dialog.add_filter(filter_any)
win = GetFile()
win.connect("destroy", Gtk.main_quit)
win.show_all()
Gtk.main()
|
[
"n.koster@portavita.eu"
] |
n.koster@portavita.eu
|
4279c2961966ea407c9bb4122a1b4d94c4ef2d81
|
bb1fa823f9e99814343ac567279ac0c61b733f4c
|
/random.py
|
794d48d9668f2fb7ee4b8bf914c9eb9ec8ac2d65
|
[] |
no_license
|
knockcat/Python
|
430679ae88c0b6b2eba52c4ae40fc91c6ced7814
|
1441b2e0c662fa00b30e829a2b219b6f2d7942dc
|
refs/heads/main
| 2023-08-17T10:08:51.553700
| 2021-10-11T14:06:01
| 2021-10-11T14:06:01
| 410,287,798
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
'''
Create a program using random.random() which generates numbers in a range a to b
def generateRandoms(a,b):
rerurn N
'''
import random
print(random.random())
def generateRandoms(a,b):
N = random.randrange(a,b)
return N
print(generateRandoms(10,489)
|
[
"noreply@github.com"
] |
knockcat.noreply@github.com
|
3f290a195113dd6a7d88952146a0350d3c4005df
|
fb426133a4d2af01ae95bb16b57ac167ee809e4b
|
/envs/data_utils.py
|
a559e07314912457c6a0da963a54714d62c513d1
|
[
"MIT"
] |
permissive
|
hpi-sam/RL_4_Feedback_Control
|
83a7cac4c66f96365135a769ae3e9cc3ba4df63c
|
7e30e660f426f7f62a740e9fd4dafb32e3222690
|
refs/heads/main
| 2023-06-07T06:28:55.321712
| 2021-06-22T09:57:26
| 2021-06-22T09:57:26
| 378,118,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,945
|
py
|
import sys
from scipy import stats
from tqdm import tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "Times New Roman"
import warnings
warnings.filterwarnings("ignore")
ROUNDED_ACCURACY = 4
def transform_data(data: pd.DataFrame) -> pd.DataFrame:
transformed = data
for index, row in transformed.iterrows():
raw = row['raw']
transformed.loc[index, 'sqt'] = np.sqrt(raw) # is the same as np.power(raw, (1 / 2))
transformed.loc[index, 'cube'] = np.power(raw, (1 / 3))
np.seterr(divide='ignore')
transformed.loc[index, 'log10'] = np.where(raw > 0, np.log10(raw), 0)
transformed.loc[index, 'ln'] = np.where(raw > 0, np.log(raw), 0)
transformed.loc[index, 'log2'] = np.where(raw > 0, np.log2(raw), 0)
return transformed.round(ROUNDED_ACCURACY)
def get_ordering(data: pd.DataFrame) -> pd.DataFrame:
'''
The ordering determines the sequence of the shifting of the <component,failure> groups.
:param data: the dataset
:return: A data frame with the mean values for each <component,failure> group sorted by the mean value, component name.
'''
mean_values_of_the_groups = data.groupby([data.columns[0], data.columns[1]])[data.columns[2]].mean().reset_index()
return mean_values_of_the_groups.sort_values(by=[data.columns[2], data.columns[0]], ascending=True)
def shift_data(data: pd.DataFrame, spread_multiplication_factor: int = 1, min_std=0.001):
ordering = get_ordering(data)
# set all standard deviation with zero to one
stdev_values = data.groupby([data.columns[0], data.columns[1]])[data.columns[2]].std().reset_index().fillna(min_std)
stdev_values.loc[stdev_values[stdev_values.columns[2]] == 0, stdev_values.columns[2]] = min_std
data_new = data.copy()
previous = None
tie_count = 0
for _, values in ordering.iterrows():
if previous is not None:
# standard deviation
std_pre = stdev_values[(stdev_values[data.columns[0]] == previous[0]) & (stdev_values[data.columns[1]] == previous[1])][data.columns[2]].tolist()[0]
std_cur = stdev_values[(stdev_values[data.columns[0]] == values[0]) & (stdev_values[data.columns[1]] == values[1])][data.columns[2]].tolist()[0]
# mean value
mean_pre = previous[2]
mean_cur = values[2]
# check for ties
if mean_cur == mean_pre:
tie_count += std_pre
# shift the data
spread = (std_pre + std_cur) * spread_multiplication_factor + tie_count
data_new.loc[(data_new[data.columns[0]] == values[0]) & (data_new[data.columns[1]] == values[1]), data.columns[2]] += spread
previous = values
return data_new.round(ROUNDED_ACCURACY)
def ARol(start, mu, N, theta=0.1, sigma=1):
'''
An auto-regressive model combined with an Ornstein–Uhlenbeck procedure.
:param start: the starting value of the series -> max of a <componenten, failure>
:param mu: value to end with -> mean of a <componenten, failure>
:param theta: how fast to converge -> fixed to 0.1
:param N: number of series points to create
:return: generated series
'''
series = [start]
for t in range(N):
series.append(series[-1] + theta * (mu - series[-1]) + sigma * np.random.normal(0., 1))
return series
def GARCH(mean, N, epsilon=0.1, alpha=0.0001, beta=0.1):
'''
:param mean: the starting value of the series and central point of the time series
:param N: number of series points to create
:param epsilon: a factor
:param alpha: how much the previous series point has an inluence on the new value -> high parameter = high variance
:param beta: how much the noise has in influence
:return: generated series
'''
n1 = 50 # data points to drop
n2 = N + n1 # sum of two numbers
noise = np.random.normal(0., 1, n2) # the variance of the series, a random sample from a distribution
series = [mean]
for t in range(n2):
variance = np.sqrt(epsilon + alpha * series[t-1]**2 + beta * noise[t-1]**2)
series.append(noise[t] * variance + mean)
return series[n1-1:-1]
def create_non_stationary_data(model: str, data: pd.DataFrame, N=100, distinguishable=False):
'''
:param model: choose between AR_ol, GARCH
:param data: data to work on
:param ARCH_theta:
:param N: number of series points to create
:return: a pandas dataframe with non-stationary series for each <component,failure> combination
'''
evaluated_data = data.groupby([data.columns[0], data.columns[1]])[data.columns[2]].agg([max, 'mean']).reset_index()
# empty dataframe for saving the new series non stationary series points
non_stationary_series = pd.DataFrame(columns=[data.columns[0], data.columns[1], data.columns[2]])
# iterate through all <component_failure> groups
for index, group in tqdm(evaluated_data.iterrows(), total=evaluated_data.shape[0]):
# create the series points using a model
series = []
if model == 'ARol':
series = ARol(group['max'], group['mean'], N)
elif model == 'GARCH':
series = GARCH(group['mean'], N)
else:
print('Model is not provided.')
sys.exit(0)
# saving series points in dataframe
for s in series:
new_row = pd.DataFrame({data.columns[0]: group[0], data.columns[1]: group[1], data.columns[2]: s}, index=[0])
non_stationary_series = non_stationary_series.append(new_row, ignore_index=True)
# plot series
plt.plot(series)
dist = '_dist' if distinguishable else ''
plt.xlabel('Time')
plt.ylabel('Time Series Value')
plt.savefig('data_analysis/04_plots/nonstationary_' + data.columns[2] + '_' + model + '_' + data.columns[2] + dist + '.pdf')
plt.show()
return non_stationary_series.round(ROUNDED_ACCURACY)
def execute_ttest(shifted_data: pd.DataFrame) -> pd.DataFrame:
ttest_results = pd.DataFrame(columns=['component_1', 'failure_1', 'component_2', 'failure_2', 'statistic', 'pvalue'])
ordering = get_ordering(shifted_data)
data_grouped = shifted_data.groupby([shifted_data.columns[0], shifted_data.columns[1]])[shifted_data.columns[2]].apply(list).reset_index()
# evaluation starts here
previous = None
for index, name in ordering.iterrows():
if previous is not None:
# get the values for the previous and current <component, failure> group
values_pre = data_grouped.loc[(data_grouped[data_grouped.columns[0]] == previous[0]) & (data_grouped[data_grouped.columns[1]] == previous[1])][data_grouped.columns[2]].tolist()[0]
values_cur = data_grouped.loc[(data_grouped[data_grouped.columns[0]] == name[0]) & (data_grouped[data_grouped.columns[1]] == name[1])][data_grouped.columns[2]].tolist()[0]
# execute ttest
result = stats.ttest_ind(values_pre, values_cur)
new_row = {'component_1': previous[0], 'failure_1': previous[1],
'component_2': name[0], 'failure_2': name[1],
'statistic': result[0], 'pvalue': result[1] if result[1] != np.NaN else 1}
ttest_results = ttest_results.append(new_row, ignore_index=True)
previous = name
return ttest_results
def get_distinguishable_groups(ttest_results: pd.DataFrame, significance_level: float = 0.05) -> [()]:
'''
Returns a list of tuples of <component,failure> groups which are considered to be distinguishable.
:param ttest_results: A dataframe with the ttest results for a gorup pair.
:param significance level: by default 0.05
:return: A list of tuples with distinguishable group pairs.
'''
distinguisable_pairs = ttest_results.loc[(ttest_results['pvalue'] < significance_level), ['component_1', 'failure_1', 'component_2', 'failure_2']]
first_list = list(zip(distinguisable_pairs.component_1, distinguisable_pairs.failure_1))
second_list = list(zip(distinguisable_pairs.component_2, distinguisable_pairs.failure_2))
return list(set(first_list + second_list))
def filter_dataset(data: pd.DataFrame, component_failure_list: list) -> pd.DataFrame:
'''
Returns a dataframe with only these <component,failure> groups which are in the component_failure_list.
:param data: dataframe to be filtered
:param component_failure_list: a list of tuples, which indicates all <component,failure> groups to be maintained in the dataset.
:return: a filtered dataset
'''
filtered_data = pd.DataFrame(columns=data.columns)
for cf in component_failure_list:
selection = data[((data[data.columns[0]] == cf[0]) & (data[data.columns[1]] == cf[1]))]
filtered_data = pd.concat([filtered_data, selection], sort=False, ignore_index=True)
return filtered_data
|
[
"rachel.brabender@student.hpi.de"
] |
rachel.brabender@student.hpi.de
|
27eab4cbe98168742d3f6ccf3d5325268d87fdc4
|
ebf2024bd355f66f7ccb7d96345250522c4aa298
|
/yoti_python_sdk/version.py
|
70e0898b716e1ce7c38fc51d5c14ae402d9c61f4
|
[
"MIT"
] |
permissive
|
blockchain-Bobby/yoti-python-sdk
|
1af4d5f971343133f81699827ecb8b8b48720c7c
|
14e89e159a23176f1b1c84ff49d845e7e54b421f
|
refs/heads/master
| 2020-05-27T23:49:27.004758
| 2019-04-23T10:13:03
| 2019-04-23T13:41:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46
|
py
|
# -*- coding: utf-8 -*-
__version__ = "2.7.1"
|
[
"echarrod@users.noreply.github.com"
] |
echarrod@users.noreply.github.com
|
d210b2cc0dd16f39a0b573034a3ead4fbef0ae4b
|
54e7532c89d8c88c86b50d8b4966b21df4732416
|
/maml_examples/cluster_maml_trpo_cheetah.py
|
795a9001970a5ea4321f9a07be5d5977684a56e8
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
chi6/Model-based-meta-learning-rl
|
e6facccc139c1f7d8afb17e7d9ff92a64d7ac65f
|
fda134dcbd87ef3e91f339ea2f836f28ec5f7784
|
refs/heads/master
| 2020-03-25T04:33:49.083196
| 2018-08-03T08:52:56
| 2018-08-03T08:52:56
| 143,401,489
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,136
|
py
|
from sandbox.rocky.tf.algos.maml_trpo import MAMLTRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
from rllab.envs.mujoco.half_cheetah_env_rand import HalfCheetahEnvRand
from rllab.envs.mujoco.half_cheetah_env_rand_direc import HalfCheetahEnvRandDirec
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import stub, run_experiment_lite
#from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from sandbox.rocky.tf.policies.maml_minimal_gauss_mlp_policy import MAMLGaussianMLPPolicy
from sandbox.rocky.tf.envs.base import TfEnv
import tensorflow as tf
stub(globals())
from rllab.misc.instrument import VariantGenerator, variant
class VG(VariantGenerator):
@variant
def fast_lr(self):
return [0.1]
@variant
def meta_step_size(self):
return [0.01]
@variant
def fast_batch_size(self):
return [20] # #10, 20, 40
@variant
def meta_batch_size(self):
return [40] # at least a total batch size of 400. (meta batch size*fast batch size)
@variant
def seed(self):
return [1]
@variant
def direc(self): # directionenv vs. goal velocity
return [False]
# should also code up alternative KL thing
variants = VG().variants()
max_path_length = 200
num_grad_updates = 1
use_maml=True
for v in variants:
direc = v['direc']
learning_rate = v['meta_step_size']
if direc:
env = TfEnv(normalize(HalfCheetahEnvRandDirec()))
else:
env = TfEnv(normalize(HalfCheetahEnvRand()))
policy = MAMLGaussianMLPPolicy(
name="policy",
env_spec=env.spec,
grad_step_size=v['fast_lr'],
hidden_nonlinearity=tf.nn.relu,
hidden_sizes=(100,100),
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = MAMLTRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=v['fast_batch_size'], # number of trajs for grad update
max_path_length=max_path_length,
meta_batch_size=v['meta_batch_size'],
num_grad_updates=num_grad_updates,
n_itr=800,
use_maml=use_maml,
step_size=v['meta_step_size'],
plot=False,
)
direc = 'direc' if direc else ''
run_experiment_lite(
algo.train(),
exp_prefix='trpo_maml_cheetah' + direc + str(max_path_length),
exp_name='maml'+str(int(use_maml))+'_fbs'+str(v['fast_batch_size'])+'_mbs'+str(v['meta_batch_size'])+'_flr_' + str(v['fast_lr']) + '_mlr' + str(v['meta_step_size']),
# Number of parallel workers for sampling
n_parallel=8,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="gap",
snapshot_gap=25,
sync_s3_pkl=True,
python_command='python3',
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=v["seed"],
mode="local",
#mode="ec2",
variant=v,
# plot=True,
# terminate_machine=False,
)
|
[
"chi.zhang@horizon.ai"
] |
chi.zhang@horizon.ai
|
f56b768381f0da04743a6976acf4ff2dafa9ea41
|
09c689c2d7db4fd80d0809048585919a2772ce2a
|
/proiect.py
|
2d7365754502d30713e7040f03231ed5767bdc4b
|
[] |
no_license
|
RotaruGeorge/Proiect.LP.image_color_space
|
d2ed1c0fcbd5c8f4937abff1906b5b14d75f750b
|
34a2fd2e63027144f1f88a44124aa654e8b9e1a5
|
refs/heads/main
| 2023-05-09T00:06:14.173241
| 2021-05-24T05:55:27
| 2021-05-24T05:55:27
| 365,841,324
| 1
| 1
| null | 2021-05-20T05:53:53
| 2021-05-09T20:24:22
|
Python
|
UTF-8
|
Python
| false
| false
| 3,245
|
py
|
import cv2
import numpy as np
from tkinter import *
from tkinter import filedialog
import os
import tkinter as tk
from PIL import Image, ImageTk
from PIL import Image, ImageFont, ImageDraw
def stackImages(scale,imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range ( 0, rows):
for y in range ( 0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank]*rows
hor_con = [imageBlank]*rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None, scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor = np.hstack(imgArray)
ver = hor
return ver
def showimage():
fln = filedialog.askopenfilename(initialdir=os.getcwd(), title="Select image file", filetypes=(("JPG File", "*.jpg"), ("PNG file", "*.png"), ("All File", "*.*")))
img = Image.open(fln)
img.thumbnail((350,350))
img = ImageTk.PhotoImage(img)
lbl.configure(image=img)
lbl.image = img
img=cv2.imread(fln)
img = cv2.resize(img, (500, 500), None, None, None)
kernel = np.ones((5,5),np.uint8)
print(kernel)
image_Gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
image_XYZ = cv2.cvtColor(img, cv2.COLOR_RGB2XYZ)
image_LAB = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
image_BGR = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
image_HSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
image_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
image_YUL = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
StackedImages = stackImages(0.6,([img,image_Gray,image_XYZ,image_LAB],[image_YUL,image_BGR,image_HLS,image_HSV]))
cv2.imshow("Tipuri de spatiu de culoare", StackedImages)
img = Image.open("",image_Gray)
title_font = ImageFont.truetype('arial', 24)
cv2.waitKey(0)
cv2.destroyAllWindows()
root = Tk()
frm = Frame(root)
frm.pack(side=BOTTOM, padx=15, pady=15)
lbl= Label(root)
lbl.pack()
btn = Button(frm, text="Cauta imagine",command=showimage)
btn.pack(side=tk.LEFT)
btn = Button(frm, text="Exit",command=lambda: exit())
btn.pack(side=tk.LEFT, padx=10)
root.title("Imagine RGB")
root.geometry("300x500")
root.mainloop()
|
[
"noreply@github.com"
] |
RotaruGeorge.noreply@github.com
|
5cd800c170ee03a8adb3ac3339e617e5d8a29474
|
094f8021f4afa10edb83d0e613890624f9268e3e
|
/vision_test.py
|
b087c9105554668bd9e95b97c7d99aa89e098e91
|
[] |
no_license
|
mdose/Xibbit
|
3fe3ce1b46fcd646ef04a46a5f1199c664856852
|
23be84cbadbe22d506da03453db16db765ed5157
|
refs/heads/master
| 2022-12-17T04:17:28.382647
| 2018-01-30T05:50:27
| 2018-01-30T05:50:27
| 99,608,797
| 0
| 0
| null | 2022-12-08T00:37:19
| 2017-08-07T18:43:43
|
Python
|
UTF-8
|
Python
| false
| false
| 710
|
py
|
import io
import os
# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types
# Instantiates a client
client = vision.ImageAnnotatorClient()
# for each artwork run everything below
# The name of the image file to annotate
file_name = os.path.join(
os.path.dirname(__file__),
'static/img/Mona_Lisa.jpg')
# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
# Performs label detection on the image file
response = client.label_detection(image=image)
labels = response.label_annotations
print 'Labels:'
for label in labels:
print label.description
|
[
"megan.e.dose@gmail.com"
] |
megan.e.dose@gmail.com
|
9e54a89ace4cb633ba4d375d05e8a5ed12718b7f
|
b08d42933ac06045905d7c005ca9c114ed3aecc0
|
/src/coefSubset/evaluate/ranks/fifth/rank_2aq1_C.py
|
803fc6f7cced548af7722d0cfac6e9f911e4ec98
|
[] |
no_license
|
TanemuraKiyoto/PPI-native-detection-via-LR
|
d148d53f5eb60a4dda5318b371a3048e3f662725
|
897e7188b0da94e87126a4acc0c9a6ff44a64574
|
refs/heads/master
| 2022-12-05T11:59:01.014309
| 2020-08-10T00:41:17
| 2020-08-10T00:41:17
| 225,272,083
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,204
|
py
|
# 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '2aq1.csv'
identifier = 'C'
thresholdCoef = 0.2
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/fifth/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/fifth/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Drop features with coefficients below threshold
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs = coefs[np.abs(coefs['coefficients']) < thresholdCoef]
dropList = list(coefs.index)
del coefs
df1.drop(dropList, axis = 1, inplace = True)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
|
[
"tanemur1@msu.edu"
] |
tanemur1@msu.edu
|
741a2eade9a96c9ce55757c4ca24892457cdad15
|
41ca5def1befabece018da6102bb79b3baba6370
|
/Options_V2.py
|
b13a28047534709972f7d216cab470c74a43eaa4
|
[] |
no_license
|
CesarKagohara/options
|
498ec122df64a1d3ad8354c78a8118f0c344a991
|
dc9ce040baa3f8abc53b7154bf0733f8ae42abfa
|
refs/heads/master
| 2023-07-02T08:53:43.919404
| 2021-08-12T17:38:28
| 2021-08-12T17:38:28
| 275,586,098
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,622
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[44]:
import pandas as pd
import requests
import json
import numpy as np
from datetime import date
from pandas.tseries.offsets import BDay
def b3sa_Options(token):
r = requests.get(token)
vJson = json.loads(r.content)
vJson['token']
optionList = 'https://arquivos.b3.com.br/api/download/?token={}'.format(vJson['token'])
r1 = requests.get(optionList)
data = r1.content.decode("ISO-8859-1").splitlines()
df = pd.DataFrame(data)
df.dropna(inplace = True)
df = pd.DataFrame(df.applymap(str))
df = df[0].str.split(";", n = 100, expand = True)
df.columns = df.iloc[0]
df = df[1:]
df = df.reset_index()
df.drop("index", axis = 1, inplace=True)
return df
token = 'https://arquivos.b3.com.br/api/download/requestname?fileName=InstrumentsConsolidated&date={}&recaptchaToken='.format(date.today().strftime("%Y-%m-%d"))
df=b3sa_Options(token)
df2 = df[["TckrSymb", "Asst", "SgmtNm", "SctyCtgyNm", "XprtnDt", "TradgEndDt", "OptnTp", "ExrcPric", "OptnStyle"]]
df2 = df2[df2['SctyCtgyNm']=='OPTION ON EQUITIES']
df2['ExrcPric'] = [x.replace(',', '.') for x in df2['ExrcPric']]
df2['ExrcPric'] = df2['ExrcPric'].astype(float)
lastBD = pd.datetime.today() - BDay(1)
lastBD = lastBD.strftime("%Y-%m-%d")
token = 'https://arquivos.b3.com.br/api/download/requestname?fileName=TradeInformationConsolidated&date={}'.format(lastBD)
df_trade=b3sa_Options(token)
df_trade = df_trade[(df_trade['SgmtNm']=='EQUITY PUT') | (df_trade['SgmtNm']=='EQUITY CALL')]
df_trade=df_trade[(df_trade['TradQty']!='')]
df_trade["Strike"]=0.0
df_trade["Ticker"]=""
df_trade["Validade"]=""
df_trade["Tipo"]=""
df_trade["Price"] = 0.0
for i in df_trade.TckrSymb:
try:
df_trade.ix[df_trade.loc[df_trade['TckrSymb']==i].index.values[0],'Strike'] = df2.loc[df2["TckrSymb"]==i].ExrcPric.values[0]
df_trade.ix[df_trade.loc[df_trade['TckrSymb']==i].index.values[0],'Ticker'] = df2.loc[df2["TckrSymb"]==i].Asst.values[0]
df_trade.ix[df_trade.loc[df_trade['TckrSymb']==i].index.values[0],'Validade'] = df2.loc[df2["TckrSymb"]==i].XprtnDt.values[0]
df_trade.ix[df_trade.loc[df_trade['TckrSymb']==i].index.values[0],'Tipo'] = df2.loc[df2["TckrSymb"]==i].OptnStyle.values[0]
except:
pass
vTopacoes = pd.read_excel(r"C:\Users\kiyo_\Desktop\projects\options\Cotacao.xlsx")
for i in df_trade.Ticker:
try:
df_trade.ix[df_trade.loc[df_trade['Ticker']==i].index.values,'Price'] = vTopacoes.loc[vTopacoes[0]==i].Atual.values[0]
except:
pass
df_trade = df_trade[df_trade["Price"]!=0.0]
df_trade = df_trade[(df_trade["Price"]/df_trade["Strike"]>0.99) & (df_trade["Price"]/df_trade["Strike"]<1.01)]
new_df1=df_trade[["Ticker","TckrSymb","SgmtNm","Tipo","Validade","LastPric","TradQty","FinInstrmQty","Strike","Price"]]
new_df1=new_df1.reset_index()
new_df1=new_df1.drop(['index'], axis=1)
new_df1["LastPric"]=new_df1["LastPric"].apply(lambda x: x.replace(',','.'))
new_df1["LastPric"]=new_df1["LastPric"].astype(float)
new_df1["Validade"] = pd.to_datetime(new_df1["Validade"])
new_df1["Days"] = new_df1["Validade"].apply(lambda x: x - pd.datetime.today())
new_df1["Days"] = new_df1["Days"].apply(lambda x: x.days)
new_df1["TradQty"]=new_df1["TradQty"].astype(int)
new_df1 = new_df1[new_df1["TradQty"]>5]
new_df1 = new_df1[["Ticker","TckrSymb","SgmtNm","Validade","Tipo","Days","Strike"]]
new_df1["Ticker1"] = new_df1["Ticker"].apply(lambda x: "=@BULLDDE|MOFV!"+x)
new_df1["TckrSymb1"] = new_df1["TckrSymb"].apply(lambda x: "=@BULLDDE|MOFC!"+x)
new_df1.to_csv("Options_V2.csv")
# In[ ]:
|
[
"53622177+boxveil@users.noreply.github.com"
] |
53622177+boxveil@users.noreply.github.com
|
94589aa547fcc98cedf05d1daaeb74814cf76e32
|
2f5972af16cd8264b775d53a21d08d145f554b1e
|
/api/api.py
|
5130a2b4676942d39f73ba4dd5ef6f8e63ca0a8d
|
[] |
no_license
|
Bill4869/reclycloth
|
6d874ad8734dbcf9583ec0ac6762f91152edbbcf
|
2ab0e71a314cdfbcd3d42a388b1ca33ed8642188
|
refs/heads/main
| 2023-08-29T20:44:21.679071
| 2021-10-10T06:29:22
| 2021-10-10T06:29:22
| 414,591,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,312
|
py
|
from pydantic.main import BaseModel
from fastapi import FastAPI
from PIL import Image
from fastapi import FastAPI, Request, File, UploadFile
from typing import List
import io
import cv2
import base64
import numpy as np
from skgtoimg import skech2img
from skgtoimg_shoes import skech2img_shoes
from gen_mask import gen_mask
from PIL import Image
import cv2
from change_color import color2gray
from mask2img import mask2img
from txt_syn_trans import texture_synth
from txt_syn_trans import texture_synth
from segment import segment
from blend import blend
from fastapi.middleware.cors import CORSMiddleware
from make_mask import get_mask
from skgtoimg import pil_loader
from fastapi.middleware.cors import CORSMiddleware
use_txt_syn=False
app = FastAPI()
test_dir="test/"
origins = [
"http://127.0.0.1:8887",
"*",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class imgs(BaseModel):
skg: str
txt: str
def base642img(code):
img=code.split(',',1)[1]
img = base64.b64decode(img) # base64に変換された画像データを元のバイナリデータに変換 # bytes
img = io.BytesIO(img) # _io.BytesIO pillowで扱えるように変換
img = Image.open(img).convert("RGB")
return img
def resize(img, long_side_px=512):
org_h, org_w, c = img.shape
scale = max(org_h, org_w)
h = int(org_h / scale * long_side_px)
w = int(org_w / scale * long_side_px)
resized = cv2.resize(img, (w, h))
return resized
@app.post('/api/imgtoimg') # methodとendpointの指定
async def skechtoimg(Images:imgs):
skg=base642img(Images.skg)
txt=base642img(Images.txt)
txt = np.array(txt, dtype=np.uint8)
img = np.array(skg, dtype=np.uint8)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
#txt = cv2.cvtColor(txt, cv2.COLOR_RGB2BGR)
img = resize(img, long_side_px=512)
img_txt = resize(img, long_side_px=512)
h,w,c=txt.shape
s=70
print("sss",txt.shape)
#txt=txt[h//2-s:h//2+s,w//2-s:w//2+s,:]
s=min(h,w)
txt=txt[s//4:s*3//4,s//4:s*3//4,:]
h,w,c=txt.shape
txt = cv2.resize(txt, dsize=(100, 100))
h,w,c=txt.shape
seg = segment(img)
mask=gen_mask(seg)
cv2.imwrite("mask.png",mask)
img=img.astype(np.float32)
img_txt=img_txt.astype(np.float32)
trans = texture_synth(txt, img_txt, patch_length= int(80))
trans_resize = resize(trans,long_side_px=512)
img = blend(img, mask, trans_resize)
ret, dst_data = cv2.imencode('.jpg', img)
dst_str = base64.b64encode(dst_data)
return {"response": dst_str}
@app.post('/api/skechtoimg_cloth') # methodとendpointの指定
async def skechtoimg(Images:imgs):
#skg= Image.open(io.BytesIO(file[0].file.read())).convert('RGB')
#txt= Image.open(io.BytesIO(files[1].file.read())).convert('RGB')
skg=base642img(Images.skg)
txt=base642img(Images.txt)
txt = np.array(txt, dtype=np.uint8)
img = np.array(skg, dtype=np.uint8)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
txt = cv2.cvtColor(txt, cv2.COLOR_RGB2BGR)
cv2.imwrite("input.png",img)
img,part_mask=color2gray(img)
img=mask2img(img)
#background,body,hair,face,Upper clothes, Down clothes , shoes
up_cloth_mask=part_mask[4]
up_cloth_mask=cv2.resize(up_cloth_mask, dsize=(256, 256))
up_cloth_mask[up_cloth_mask!=0]=255
print("cloth",up_cloth_mask.shape)
cv2.imwrite("output.png",img)
cv2.imwrite("output2.png",up_cloth_mask)
cv2.imwrite("txt.png",txt)
h,w,c=txt.shape
s=70
print("sss",txt.shape)
#txt=txt[h//2-s:h//2+s,w//2-s:w//2+s,:]
s=min(h,w)
txt=txt[s//4:s*3//4,s//4:s*3//4,:]
h,w,c=txt.shape
txt = cv2.resize(txt, dsize=(100, 100))
h,w,c=txt.shape
cv2.imwrite("txt.png",txt)
print(type(img[12,1,1]))
trans = texture_synth(txt, img,patch_length = min(h,w)//2)
trans = cv2.cvtColor(trans, cv2.COLOR_BGR2RGB)
img = blend(img, up_cloth_mask, trans)
img = resize(img,long_side_px=512)
cv2.imwrite("output3.png",img)
ret, dst_data = cv2.imencode('.jpg', img)
dst_str = base64.b64encode(dst_data)
return {"response": dst_str}
@app.post('/api/skechtoimg_bag') # methodとendpointの指定
async def skechtoimg(Images:imgs):
skg=base642img(Images.skg)
txt=base642img(Images.txt)
img = np.array(skg, dtype=np.uint8)
img=get_mask(img)
seg=Image.fromarray(img)
img=skech2img(skg,seg,txt)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = resize(img,long_side_px=512)
if use_txt_syn:
img = cv2.resize(img, dsize=(256,256))
mask=1*((img[:,:,0]>=240)*(img[:,:,1]>=240)*(img[:,:,2]>=240)).astype(np.float32)
mask=255*(1-mask)
img=img.astype(np.float32)
txt = np.array(txt, dtype=np.uint8)
txt = cv2.cvtColor(txt, cv2.COLOR_RGB2BGR)
h,w,c=txt.shape
s=min(h,w)
txt=txt[s//4:s*3//4,s//4:s*3//4,:]
cv2.imwrite("txt.png",txt)
cv2.imwrite("msk.png",mask)
cv2.imwrite("img.png",img)
h,w,c=txt.shape
txt = cv2.resize(txt, dsize=(100, 100))
h,w,c=txt.shape
trans = texture_synth(txt, img,patch_length = min(h,w)//2)
trans = cv2.cvtColor(trans, cv2.COLOR_BGR2RGB)
img = blend(img, mask, trans)
ret, dst_data = cv2.imencode('.jpg', img)
dst_str = base64.b64encode(dst_data)
return {"response": dst_str}
@app.post('/api/skechtoimg_shoes') # methodとendpointの指定
async def skechtoimg(Images:imgs):
skg=base642img(Images.skg)
txt=base642img(Images.txt)
img = np.array(skg, dtype=np.uint8)
img=get_mask(img)
seg=Image.fromarray(img)
img=skech2img_shoes(skg,seg,txt)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = resize(img,long_side_px=512)
if use_txt_syn:
img = cv2.resize(img, dsize=(256,256))
mask=1*((img[:,:,0]>=240)*(img[:,:,1]>=240)*(img[:,:,2]>=240)).astype(np.float32)
mask=255*(1-mask)
img=img.astype(np.float32)
txt = np.array(txt, dtype=np.uint8)
txt = cv2.cvtColor(txt, cv2.COLOR_RGB2BGR)
h,w,c=txt.shape
s=min(h,w)
txt=txt[s//4:s*3//4,s//4:s*3//4,:]
cv2.imwrite("txt.png",txt)
cv2.imwrite("msk.png",mask)
cv2.imwrite("img.png",img)
h,w,c=txt.shape
txt = cv2.resize(txt, dsize=(100, 100))
h,w,c=txt.shape
trans = texture_synth(txt, img,patch_length = min(h,w)//2)
trans = cv2.cvtColor(trans, cv2.COLOR_BGR2RGB)
img = blend(img, mask, trans)
ret, dst_data = cv2.imencode('.jpg', img)
dst_str = base64.b64encode(dst_data)
return {"response": dst_str}
@app.post('/api/debug') # methodとendpointの指定
async def skechtoimg(files: List[UploadFile] = File(...)):
img1= Image.open(io.BytesIO(files[0].file.read())).convert('RGB')
img2= Image.open(io.BytesIO(files[1].file.read())).convert('RGB')
img1 = np.array(img1, dtype=np.uint8)
img2 = np.array(img2, dtype=np.uint8)
w,h,c=img1.shape
img2.resize(w,h,c)
alpha=0.5
img=img1*alpha+img2*(1-alpha)
ret, dst_data = cv2.imencode('.jpg', img)
dst_str = base64.b64encode(dst_data)
return {"response": dst_str}
|
[
"chanvongnaraz.khampasith.vl@tut.jp"
] |
chanvongnaraz.khampasith.vl@tut.jp
|
c55deeff8c7eb2de39cdc09670d1bb9f2cdd8518
|
a3c264f6d04ee21fab3240de20655efccd231b40
|
/password_manager_app/models.py
|
754f72022a77fa3a1914ff07b47e7d5039b3bc92
|
[] |
no_license
|
JacekEjsmont/PasswordManager
|
19a3c824685ca275e3ee5b07ea9eba4dd838278f
|
c84115172d4e929f22709d3fe77a8e427ad5299f
|
refs/heads/master
| 2022-12-09T13:25:36.347987
| 2018-11-14T09:58:29
| 2018-11-14T09:58:29
| 156,843,737
| 0
| 0
| null | 2022-12-08T01:17:33
| 2018-11-09T10:02:02
|
Python
|
UTF-8
|
Python
| false
| false
| 427
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Entry(models.Model):
site_name = models.CharField(max_length=20)
site_url = models.URLField()
login_name = models.CharField(max_length=20)
login_password = models.CharField(max_length=30)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.site_name
|
[
"jacek.ejsmont@gmail.com"
] |
jacek.ejsmont@gmail.com
|
924e7157f296dca05fe05d28ec7c1ab78be05493
|
65d730c548eb08650d7d279b2382b25e6d89518d
|
/entity/nonce.py
|
77307045e478d9fea473466109a8f85735655ff5
|
[] |
no_license
|
zooeZuo/MI_eSE
|
c33da76226a8d06ed17793e030afdc7b7cf0dbd0
|
c532fcaa5a3b73a80f51d01b1312d76d8aed67fb
|
refs/heads/master
| 2020-03-09T16:26:57.853369
| 2018-04-10T06:33:23
| 2018-04-10T06:33:23
| 128,885,017
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,197
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# date :2018/1/
# discription :
# vision :
# copyright :All copyright reserved by FMSH company
__author__ = 'zuodengbo'
import random
base = [str(x) for x in range(10)] + [chr(x) for x in range(ord('A'), ord('A') + 6)]
# 二进制to十进制
def bin2dec(str_num):
return str(int(str_num, 2))
# 十六进制to十进制
def hex2dec(str_num):
return str(int(str_num.upper(), 16))
# 十进制to二进制
def dec2bin(str_num):
num = int(str_num)
mid = []
while True:
if num == 0:
break
num, rem = divmod(num, 2)
mid.append(base[rem])
return ''.join([str(y) for y in min[:: -1]])
# 十进制to八进制 oct()
# 十进制to十六进制 hex()
def dec2hex(str_num):
num = int(str_num)
if num == 0:
return '0'
mid = []
while True:
if num == 0:
break
num, rem = divmod(num, 16)
mid.append(base[rem])
return ''.join([str(y) for y in min[:: -1]])
# 十六进制to二进制
def hex2bin(str_num):
return dec2bin(hex2dec(str_num.upper()))
# 二进制to十六进制
def bin2hex(str_num):
return dec2hex(bin2dec(str_num))
# 十六进制to字符串
def hex2str(data, l=16):
data = data[2:]
if data[len(data) - 1] == 'L':
data = data[:len(data) - 1]
while len(data) < l:
data = '0' + data
return data.upper()
def rand_hex(length):
num = ''
for i in range(0, length):
num += hex2str(hex(random.randint(0, 15)), 1)
return num.upper()
# 产生32随机数
def Task_Id_Generator():
taskid = rand_hex(32)
return taskid
# 10字节随机数
def Terminal_Generator_10():
term = rand_hex(20)
return term
# 产生8字节终端随机数
def Terminal_Generator_8():
terminal = rand_hex(16)
return terminal
# 产生6字节终端随机数
def Terminal_Generator_6():
term = rand_hex(12)
return term
# 产生4字节流水
def Serial_4():
serial = rand_hex(8)
return serial
if __name__ == '__main__':
p = Terminal_Generator_8()
q = Terminal_Generator_6()
d = Serial_4()
print(p)
print(q)
print(d)
|
[
"zooe.zuo@foxmail.com"
] |
zooe.zuo@foxmail.com
|
099dd6bad485d4212982d773b3a35f5e21d0d0ec
|
85f87d9475639047f1f85b29b608386fbe968e3d
|
/py/day_04/solve.py
|
c07e55d3f9b9fa13124d4556861594b0e985a91b
|
[
"MIT"
] |
permissive
|
Thundzz/advent-of-code-2020
|
0a7154a54e258ae078e8c9b6c00bc7164df4a945
|
17a66c20340357e44d61d8a6fd92bbf860f51195
|
refs/heads/main
| 2023-02-06T12:27:37.629049
| 2020-12-25T18:42:47
| 2020-12-25T18:42:47
| 317,969,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,260
|
py
|
from collections import namedtuple, Counter
from operator import add, mul
from functools import reduce
def parse_field(raw_field):
return raw_field.split(":")
def parse_input(filename):
with open(filename) as file:
lines = [l.strip() for l in file.readlines()]
passports = []
passport = {}
for line in lines:
if not line:
passports.append(passport)
passport = {}
else:
fields = dict([parse_field(rf) for rf in line.split()])
passport = { **passport, **fields }
passports.append(passport)
return passports
def has_mandatory_fields(passport, mandatory_fields):
return all([
field in passport
for field in mandatory_fields
])
ValidationRule = namedtuple("ValidationRule", ["parsing_fn", "validation_fn"])
Height = namedtuple("Height", ["value", "unit"])
def validate_rule(value, validation_rule):
parsed = None
try:
parsed = validation_rule.parsing_fn(value)
except Exception as e:
print("parsing error", value, validation_rule)
return False
if parsed:
return validation_rule.validation_fn(parsed)
def validates_rules(passport, rules):
return all([
fieldName in passport and validate_rule(passport[fieldName], rule)
for fieldName, rule in rules
])
def parse_year(data):
digits = set(map(str, range(10)))
assert(len(data) == 4)
assert(all([d in digits for d in data]))
return int(data)
def parse_height(data):
digits = set(map(str, range(10)))
unit = data[-2:]
value = data[:-2]
assert(unit in {"cm", "in"})
assert(all([(d in digits) for d in value]))
return Height(int(value), unit)
def validate_height(height):
if height.unit == "cm":
return 150 <= height.value <= 193
elif height.unit == "in":
return 59 <= height.value <= 76
else:
raise Exception("This should not happen")
def validate_eye_color(data):
valid_clrs = { "amb","blu","brn","gry","grn","hzl","oth" }
return data in valid_clrs
def validate_hair_color(haircolor):
valid_chars = set(list(range(10)) + ["a", "b", "c", "d", "e", "f"])
chars_are_valid = all([c in valid_chars for c in list(haircolor[1:])])
return haircolor[0] == "#"
def validate_cid(cid):
digits = set(map(str, range(10)))
return len(cid) == 9 and all([d in digits for d in cid])
def main():
passports = parse_input("input.txt")
mandatory_fields = [ "byr","iyr","eyr","hgt","hcl","ecl","pid" ]
identity = lambda x: x
rules = {
"byr" : ValidationRule(parse_year, lambda x: 1920 <= x <= 2002),
"iyr" : ValidationRule(parse_year, lambda x: 2010 <= x <= 2020),
"eyr" : ValidationRule(parse_year, lambda x: 2020 <= x <= 2030),
"hgt" : ValidationRule(parse_height, validate_height),
"hcl" : ValidationRule(identity, validate_hair_color),
"ecl" : ValidationRule(identity, validate_eye_color),
"pid" : ValidationRule(identity, validate_cid)
}
valid_passports_simple = [
passport for passport in passports
if has_mandatory_fields(passport, mandatory_fields)
]
valid_passports_complex = [
passport for passport in passports
if validates_rules(passport, rules.items())
]
# print(passports[0])
# for x, rule in rules.items():
# res = validate_rule(passports[0][x], rule)
# print(x, rule, res)
print(len(valid_passports_simple))
print(len(valid_passports_complex))
if __name__ == '__main__':
main()
|
[
"yacinenew@gmail.com"
] |
yacinenew@gmail.com
|
70988926cd823593d4fc66c9e0c48b210f99b7ac
|
13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab
|
/home--tommy--mypy/mypy/lib/python2.7/site-packages/statsmodels/stats/gof.py
|
1ae102ea9d91e9c46090866a204b6ab8d7d3d984
|
[
"Unlicense"
] |
permissive
|
tommybutler/mlearnpy2
|
8ec52bcd03208c9771d8d02ede8eaa91a95bda30
|
9e5d377d0242ac5eb1e82a357e6701095a8ca1ff
|
refs/heads/master
| 2022-10-24T23:30:18.705329
| 2022-10-17T15:41:37
| 2022-10-17T15:41:37
| 118,529,175
| 0
| 2
|
Unlicense
| 2022-10-15T23:32:18
| 2018-01-22T23:27:10
|
Python
|
UTF-8
|
Python
| false
| false
| 16,592
|
py
|
'''extra statistical function and helper functions
contains:
* goodness-of-fit tests
- powerdiscrepancy
- gof_chisquare_discrete
- gof_binning_discrete
Author: Josef Perktold
License : BSD-3
changes
-------
2013-02-25 : add chisquare_power, effectsize and "value"
'''
from statsmodels.compat.python import range, lrange, string_types
import numpy as np
from scipy import stats
# copied from regression/stats.utils
def powerdiscrepancy(observed, expected, lambd=0.0, axis=0, ddof=0):
"""Calculates power discrepancy, a class of goodness-of-fit tests
as a measure of discrepancy between observed and expected data.
This contains several goodness-of-fit tests as special cases, see the
describtion of lambd, the exponent of the power discrepancy. The pvalue
is based on the asymptotic chi-square distribution of the test statistic.
freeman_tukey:
D(x|\theta) = \sum_j (\sqrt{x_j} - \sqrt{e_j})^2
Parameters
----------
o : Iterable
Observed values
e : Iterable
Expected values
lambd : float or string
* float : exponent `a` for power discrepancy
* 'loglikeratio': a = 0
* 'freeman_tukey': a = -0.5
* 'pearson': a = 1 (standard chisquare test statistic)
* 'modified_loglikeratio': a = -1
* 'cressie_read': a = 2/3
* 'neyman' : a = -2 (Neyman-modified chisquare, reference from a book?)
axis : int
axis for observations of one series
ddof : int
degrees of freedom correction,
Returns
-------
D_obs : Discrepancy of observed values
pvalue : pvalue
References
----------
Cressie, Noel and Timothy R. C. Read, Multinomial Goodness-of-Fit Tests,
Journal of the Royal Statistical Society. Series B (Methodological),
Vol. 46, No. 3 (1984), pp. 440-464
Campbell B. Read: Freeman-Tukey chi-squared goodness-of-fit statistics,
Statistics & Probability Letters 18 (1993) 271-278
Nobuhiro Taneichi, Yuri Sekiya, Akio Suzukawa, Asymptotic Approximations
for the Distributions of the Multinomial Goodness-of-Fit Statistics
under Local Alternatives, Journal of Multivariate Analysis 81, 335?359 (2002)
Steele, M. 1,2, C. Hurst 3 and J. Chaseling, Simulated Power of Discrete
Goodness-of-Fit Tests for Likert Type Data
Examples
--------
>>> observed = np.array([ 2., 4., 2., 1., 1.])
>>> expected = np.array([ 0.2, 0.2, 0.2, 0.2, 0.2])
for checking correct dimension with multiple series
>>> powerdiscrepancy(np.column_stack((observed,observed)).T, 10*expected, lambd='freeman_tukey',axis=1)
(array([[ 2.745166, 2.745166]]), array([[ 0.6013346, 0.6013346]]))
>>> powerdiscrepancy(np.column_stack((observed,observed)).T, 10*expected,axis=1)
(array([[ 2.77258872, 2.77258872]]), array([[ 0.59657359, 0.59657359]]))
>>> powerdiscrepancy(np.column_stack((observed,observed)).T, 10*expected, lambd=0,axis=1)
(array([[ 2.77258872, 2.77258872]]), array([[ 0.59657359, 0.59657359]]))
>>> powerdiscrepancy(np.column_stack((observed,observed)).T, 10*expected, lambd=1,axis=1)
(array([[ 3., 3.]]), array([[ 0.5578254, 0.5578254]]))
>>> powerdiscrepancy(np.column_stack((observed,observed)).T, 10*expected, lambd=2/3.0,axis=1)
(array([[ 2.89714546, 2.89714546]]), array([[ 0.57518277, 0.57518277]]))
>>> powerdiscrepancy(np.column_stack((observed,observed)).T, expected, lambd=2/3.0,axis=1)
(array([[ 2.89714546, 2.89714546]]), array([[ 0.57518277, 0.57518277]]))
>>> powerdiscrepancy(np.column_stack((observed,observed)), expected, lambd=2/3.0, axis=0)
(array([[ 2.89714546, 2.89714546]]), array([[ 0.57518277, 0.57518277]]))
each random variable can have different total count/sum
>>> powerdiscrepancy(np.column_stack((observed,2*observed)), expected, lambd=2/3.0, axis=0)
(array([[ 2.89714546, 5.79429093]]), array([[ 0.57518277, 0.21504648]]))
>>> powerdiscrepancy(np.column_stack((observed,2*observed)), expected, lambd=2/3.0, axis=0)
(array([[ 2.89714546, 5.79429093]]), array([[ 0.57518277, 0.21504648]]))
>>> powerdiscrepancy(np.column_stack((2*observed,2*observed)), expected, lambd=2/3.0, axis=0)
(array([[ 5.79429093, 5.79429093]]), array([[ 0.21504648, 0.21504648]]))
>>> powerdiscrepancy(np.column_stack((2*observed,2*observed)), 20*expected, lambd=2/3.0, axis=0)
(array([[ 5.79429093, 5.79429093]]), array([[ 0.21504648, 0.21504648]]))
>>> powerdiscrepancy(np.column_stack((observed,2*observed)), np.column_stack((10*expected,20*expected)), lambd=2/3.0, axis=0)
(array([[ 2.89714546, 5.79429093]]), array([[ 0.57518277, 0.21504648]]))
>>> powerdiscrepancy(np.column_stack((observed,2*observed)), np.column_stack((10*expected,20*expected)), lambd=-1, axis=0)
(array([[ 2.77258872, 5.54517744]]), array([[ 0.59657359, 0.2357868 ]]))
"""
o = np.array(observed)
e = np.array(expected)
if not isinstance(lambd, string_types):
a = lambd
else:
if lambd == 'loglikeratio': a = 0
elif lambd == 'freeman_tukey': a = -0.5
elif lambd == 'pearson': a = 1
elif lambd == 'modified_loglikeratio': a = -1
elif lambd == 'cressie_read': a = 2/3.0
else:
raise ValueError('lambd has to be a number or one of ' + \
'loglikeratio, freeman_tukey, pearson, ' +\
'modified_loglikeratio or cressie_read')
n = np.sum(o, axis=axis)
nt = n
if n.size>1:
n = np.atleast_2d(n)
if axis == 1:
nt = n.T # need both for 2d, n and nt for broadcasting
if e.ndim == 1:
e = np.atleast_2d(e)
if axis == 0:
e = e.T
if np.all(np.sum(e, axis=axis) == n):
p = e/(1.0*nt)
elif np.all(np.sum(e, axis=axis) == 1):
p = e
e = nt * e
else:
raise ValueError('observed and expected need to have the same ' +\
'number of observations, or e needs to add to 1')
k = o.shape[axis]
if e.shape[axis] != k:
raise ValueError('observed and expected need to have the same ' +\
'number of bins')
# Note: taken from formulas, to simplify cancel n
if a == 0: # log likelihood ratio
D_obs = 2*n * np.sum(o/(1.0*nt) * np.log(o/e), axis=axis)
elif a == -1: # modified log likelihood ratio
D_obs = 2*n * np.sum(e/(1.0*nt) * np.log(e/o), axis=axis)
else:
D_obs = 2*n/a/(a+1) * np.sum(o/(1.0*nt) * ((o/e)**a - 1), axis=axis)
return D_obs, stats.chi2.sf(D_obs,k-1-ddof)
#todo: need also binning for continuous distribution
# and separated binning function to be used for powerdiscrepancy
def gof_chisquare_discrete(distfn, arg, rvs, alpha, msg):
'''perform chisquare test for random sample of a discrete distribution
Parameters
----------
distname : string
name of distribution function
arg : sequence
parameters of distribution
alpha : float
significance level, threshold for p-value
Returns
-------
result : bool
0 if test passes, 1 if test fails
Notes
-----
originally written for scipy.stats test suite,
still needs to be checked for standalone usage, insufficient input checking
may not run yet (after copy/paste)
refactor: maybe a class, check returns, or separate binning from
test results
'''
# define parameters for test
## n=2000
n = len(rvs)
nsupp = 20
wsupp = 1.0/nsupp
## distfn = getattr(stats, distname)
## np.random.seed(9765456)
## rvs = distfn.rvs(size=n,*arg)
# construct intervals with minimum mass 1/nsupp
# intervalls are left-half-open as in a cdf difference
distsupport = lrange(max(distfn.a, -1000), min(distfn.b, 1000) + 1)
last = 0
distsupp = [max(distfn.a, -1000)]
distmass = []
for ii in distsupport:
current = distfn.cdf(ii,*arg)
if current - last >= wsupp-1e-14:
distsupp.append(ii)
distmass.append(current - last)
last = current
if current > (1-wsupp):
break
if distsupp[-1] < distfn.b:
distsupp.append(distfn.b)
distmass.append(1-last)
distsupp = np.array(distsupp)
distmass = np.array(distmass)
# convert intervals to right-half-open as required by histogram
histsupp = distsupp+1e-8
histsupp[0] = distfn.a
# find sample frequencies and perform chisquare test
#TODO: move to compatibility.py
freq, hsupp = np.histogram(rvs,histsupp)
cdfs = distfn.cdf(distsupp,*arg)
(chis,pval) = stats.chisquare(np.array(freq),n*distmass)
return chis, pval, (pval > alpha), 'chisquare - test for %s' \
'at arg = %s with pval = %s' % (msg,str(arg),str(pval))
# copy/paste, remove code duplication when it works
def gof_binning_discrete(rvs, distfn, arg, nsupp=20):
'''get bins for chisquare type gof tests for a discrete distribution
Parameters
----------
rvs : array
sample data
distname : string
name of distribution function
arg : sequence
parameters of distribution
nsupp : integer
number of bins. The algorithm tries to find bins with equal weights.
depending on the distribution, the actual number of bins can be smaller.
Returns
-------
freq : array
empirical frequencies for sample; not normalized, adds up to sample size
expfreq : array
theoretical frequencies according to distribution
histsupp : array
bin boundaries for histogram, (added 1e-8 for numerical robustness)
Notes
-----
The results can be used for a chisquare test ::
(chis,pval) = stats.chisquare(freq, expfreq)
originally written for scipy.stats test suite,
still needs to be checked for standalone usage, insufficient input checking
may not run yet (after copy/paste)
refactor: maybe a class, check returns, or separate binning from
test results
todo :
optimal number of bins ? (check easyfit),
recommendation in literature at least 5 expected observations in each bin
'''
# define parameters for test
## n=2000
n = len(rvs)
wsupp = 1.0/nsupp
## distfn = getattr(stats, distname)
## np.random.seed(9765456)
## rvs = distfn.rvs(size=n,*arg)
# construct intervals with minimum mass 1/nsupp
# intervalls are left-half-open as in a cdf difference
distsupport = lrange(max(distfn.a, -1000), min(distfn.b, 1000) + 1)
last = 0
distsupp = [max(distfn.a, -1000)]
distmass = []
for ii in distsupport:
current = distfn.cdf(ii,*arg)
if current - last >= wsupp-1e-14:
distsupp.append(ii)
distmass.append(current - last)
last = current
if current > (1-wsupp):
break
if distsupp[-1] < distfn.b:
distsupp.append(distfn.b)
distmass.append(1-last)
distsupp = np.array(distsupp)
distmass = np.array(distmass)
# convert intervals to right-half-open as required by histogram
histsupp = distsupp+1e-8
histsupp[0] = distfn.a
# find sample frequencies and perform chisquare test
freq,hsupp = np.histogram(rvs,histsupp)
#freq,hsupp = np.histogram(rvs,histsupp,new=True)
cdfs = distfn.cdf(distsupp,*arg)
return np.array(freq), n*distmass, histsupp
# -*- coding: utf-8 -*-
"""Extension to chisquare goodness-of-fit test
Created on Mon Feb 25 13:46:53 2013
Author: Josef Perktold
License: BSD-3
"""
def chisquare(f_obs, f_exp=None, value=0, ddof=0, return_basic=True):
'''chisquare goodness-of-fit test
The null hypothesis is that the distance between the expected distribution
and the observed frequencies is ``value``. The alternative hypothesis is
that the distance is larger than ``value``. ``value`` is normalized in
terms of effect size.
The standard chisquare test has the null hypothesis that ``value=0``, that
is the distributions are the same.
Notes
-----
The case with value greater than zero is similar to an equivalence test,
that the exact null hypothesis is replaced by an approximate hypothesis.
However, TOST "reverses" null and alternative hypothesis, while here the
alternative hypothesis is that the distance (divergence) is larger than a
threshold.
References
----------
McLaren, ...
Drost,...
See Also
--------
powerdiscrepancy
scipy.stats.chisquare
'''
f_obs = np.asarray(f_obs)
n_bins = len(f_obs)
nobs = f_obs.sum(0)
if f_exp is None:
# uniform distribution
f_exp = np.empty(n_bins, float)
f_exp.fill(nobs / float(n_bins))
f_exp = np.asarray(f_exp, float)
chisq = ((f_obs - f_exp)**2 / f_exp).sum(0)
if value == 0:
pvalue = stats.chi2.sf(chisq, n_bins - 1 - ddof)
else:
pvalue = stats.ncx2.sf(chisq, n_bins - 1 - ddof, value**2 * nobs)
if return_basic:
return chisq, pvalue
else:
return chisq, pvalue #TODO: replace with TestResults
def chisquare_power(effect_size, nobs, n_bins, alpha=0.05, ddof=0):
'''power of chisquare goodness of fit test
effect size is sqrt of chisquare statistic divided by nobs
Parameters
----------
effect_size : float
This is the deviation from the Null of the normalized chi_square
statistic. This follows Cohen's definition (sqrt).
nobs : int or float
number of observations
n_bins : int (or float)
number of bins, or points in the discrete distribution
alpha : float in (0,1)
significance level of the test, default alpha=0.05
Returns
-------
power : float
power of the test at given significance level at effect size
Notes
-----
This function also works vectorized if all arguments broadcast.
This can also be used to calculate the power for power divergence test.
However, for the range of more extreme values of the power divergence
parameter, this power is not a very good approximation for samples of
small to medium size (Drost et al. 1989)
References
----------
Drost, ...
See Also
--------
chisquare_effectsize
statsmodels.stats.GofChisquarePower
'''
crit = stats.chi2.isf(alpha, n_bins - 1 - ddof)
power = stats.ncx2.sf(crit, n_bins - 1 - ddof, effect_size**2 * nobs)
return power
def chisquare_effectsize(probs0, probs1, correction=None, cohen=True, axis=0):
'''effect size for a chisquare goodness-of-fit test
Parameters
----------
probs0 : array_like
probabilities or cell frequencies under the Null hypothesis
probs1 : array_like
probabilities or cell frequencies under the Alternative hypothesis
probs0 and probs1 need to have the same length in the ``axis`` dimension.
and broadcast in the other dimensions
Both probs0 and probs1 are normalized to add to one (in the ``axis``
dimension).
correction : None or tuple (nobs, df)
If None, then the effect size is the chisquare statistic divide by
the number of observations.
If the correction is a tuple (nobs, df), then the effectsize is
corrected to have less bias and a smaller variance. However, the
correction can make the effectsize negative. In that case, the
effectsize is set to zero.
Pederson and Johnson (1990) as referenced in McLaren et all. (1994)
cohen : bool
If True, then the square root is returned as in the definition of the
effect size by Cohen (1977), If False, then the original effect size
is returned.
axis : int
If the probability arrays broadcast to more than 1 dimension, then
this is the axis over which the sums are taken.
Returns
-------
effectsize : float
effect size of chisquare test
'''
probs0 = np.asarray(probs0, float)
probs1 = np.asarray(probs1, float)
probs0 = probs0 / probs0.sum(axis)
probs1 = probs1 / probs1.sum(axis)
d2 = ((probs1 - probs0)**2 / probs0).sum(axis)
if correction is not None:
nobs, df = correction
diff = ((probs1 - probs0) / probs0).sum(axis)
d2 = np.maximum((d2 * nobs - diff - df) / (nobs - 1.), 0)
if cohen:
return np.sqrt(d2)
else:
return d2
|
[
"tbutler.github@internetalias.net"
] |
tbutler.github@internetalias.net
|
035e0656aea599dd700ee14ad8f6ce0b6188591d
|
ef76dd094c83343af4e5f305320b500f13fe635f
|
/user/decorator.py
|
6b19467ec47254acb68c392342ab45edc2148343
|
[] |
no_license
|
keywookkim/11-WeWantedExplorers-backend
|
aaa8c0a4756fb84d556caca83049156acd3ccd3e
|
588e67388153e34699011eb1cfba10aaf6266ebf
|
refs/heads/master
| 2023-08-13T17:03:58.692981
| 2021-07-02T00:33:32
| 2021-07-02T00:33:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
import bcrypt
import jwt
from django.http import JsonResponse
from wwe.settings import SECRET_KEY, ALGORITHM
from .models import UserAccount
def signin_decorator(func):
def wrapper(self, request, *args, **kwargs):
if not request.headers.get('Authorization', None) :
return JsonResponse({"message" : "INVALID_SIGNIN"}, status=401)
try:
access_token = request.headers.get('Authorization', None)
if access_token :
payload = jwt.decode(access_token, SECRET_KEY, algorithm = ALGORITHM)
user = UserAccount.objects.get(id = payload['user_id'])
request.user = user
return func(self, request, *args, **kwargs)
except jwt.exceptions.DecodeError:
return JsonResponse({'message':'INVALID_TOKEN'}, status = 401)
except UserAccount.DoesNotExist:
return JsonResponse({'message':'INVALID_USER'}, status = 401)
return wrapper
|
[
"42701133+soheon-lee@users.noreply.github.com"
] |
42701133+soheon-lee@users.noreply.github.com
|
e0445908419c92743dbb47292ef27376cbbdcffb
|
d49eff68460d328627d59d651fafb48810d76f8e
|
/test.py
|
25ed9a1967e97158546fe84768cfff4320a6d89c
|
[] |
no_license
|
Ronaldo009/genPlanByGA
|
2661fa4f657edf07762dd3149568d13943f38067
|
0ac0b1aeec8d3b83607f88437229ecc7d62e9de0
|
refs/heads/master
| 2021-01-21T22:10:21.658824
| 2017-06-23T03:25:40
| 2017-06-23T03:25:40
| 95,179,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,429
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/6/13 上午11:50
# @Author : Huang HUi
# @Site :
# @File : test.py
# @Software: PyCharm
import random
GIVEN_QUERY = {'days': [4,14], 'countries': [{'country_id': 28, 'day': None}],
'regions': [{'region_id': 2, 'day': None}, {'region_id': 27, 'day': 1}, {'region_id': 69, 'day': None}], 'pois': [],
'regionNotGo': [], 'poiNotGo': [], 'regionSorted': [135, 131], 'availableMonths': [1,2,3,4,5,6,7,8,9,10],
'price': [0, 80000], 'hotelRating': None, 'arrivalRegionId': None, 'departRegionId': None}
aa=[{'region_id': 2, 'days': 2}, {'region_id': 27, 'days': 1}, {'region_id': 69, 'days': 1}, {'region_id': 3, 'days': 1}]
regionsMapInGenPlan = {x['region_id']: x['days'] for x in aa}
countryIds = list(map(lambda x: x['country_id'], GIVEN_QUERY['countries']))
days=GIVEN_QUERY['days']
regions=GIVEN_QUERY['regions']
regionDic=list(map(lambda x:{x['region_id']:x['day']},regions))
bb=[2,3,4,5]
regionsMapInQuery = {x['region_id']: x['day'] for x in regions}
aa=dict(a=3)
if (set(regionsMapInQuery.keys()) - set(regionsMapInGenPlan.keys())):
print("ssssss")
print(regionsMapInQuery.items())
print(regionsMapInGenPlan.items())
a=[1,2,0,4,5]
b=[11,22,33,44]
c=99
f=[]
g=[]
flag=True
while flag:
try:
flag=False
f.append(c/a[1])
except:
print("ass")
flag=True
print(f)
cc=7
if cc<10:
k=1
elif cc<8:
|
[
"693012166@qq.com"
] |
693012166@qq.com
|
1a9fdad84082ad62c188334c8445b608a7a5a019
|
e36c5a91306f8d8cf487368d3a1dfae4c03da3c0
|
/build/kobuki/kobuki_rapps/catkin_generated/pkg.installspace.context.pc.py
|
372c7404214955533195ccc02886f75fca384f77
|
[] |
no_license
|
DocDouze/RobMob
|
84ae5b96a16028586c9da2008f7c7772bdaa1334
|
6a2e7505eb2207d61b1c354cfd255075b1efbc73
|
refs/heads/master
| 2020-04-11T07:24:28.958201
| 2018-12-17T11:56:54
| 2018-12-17T11:56:54
| 161,607,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "kobuki_rapps"
PROJECT_SPACE_DIR = "/home/aubailly/Bureau/RobMob/install"
PROJECT_VERSION = "0.7.6"
|
[
"quentin.aubailly@gmail.com"
] |
quentin.aubailly@gmail.com
|
d90fd35bf40e7a1f2b6daeb33e81655229a263b7
|
bf2055e49649d849874bd997d84b80eb9ae5cfaf
|
/app/config/settings/production.py
|
10d395de3a4fe62e5ccd0275b974071c2e19802f
|
[] |
no_license
|
jmichalicek/bash-shell.net
|
99b2fb7b99c7838065703329cf01707b3a4c0737
|
225f21c05289362c91d23561f3d32123e642ca94
|
refs/heads/main
| 2023-08-31T10:25:01.152443
| 2023-08-25T13:26:38
| 2023-08-25T13:26:38
| 12,030,543
| 3
| 1
| null | 2023-08-25T13:26:40
| 2013-08-11T03:17:14
|
Python
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
import os
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from .base import *
sentry_sdk.init(
dsn=os.environ.get('SENTRY_DSN', ''),
integrations=[DjangoIntegration()],
# Since I have no traffic, this might be really low
traces_sample_rate=0.2,
# If you wish to associate users to errors (assuming you are using
# django.contrib.auth) you may enable sending PII data.
send_default_pii=True,
)
|
[
"jmichalicek@gmail.com"
] |
jmichalicek@gmail.com
|
a13a7592a7f4d8f2714173051ff6dfe2495563d5
|
7134a4b998b6c353fdb113c49d80965753661b42
|
/marltoolbox/experiments/rllib_api/amtft_various_env.py
|
f6900c8bcfbed26d0c4e1219d03eea149f0418aa
|
[
"MIT"
] |
permissive
|
xingxiaoyu1109/marltoolbox
|
806b8ad720055a01c663ef15880b7808b460c5a2
|
cae1ba94ccb44700b66a32e0734a0f11c9c6c7fe
|
refs/heads/master
| 2023-05-07T14:56:37.367845
| 2021-06-02T15:54:27
| 2021-06-02T15:54:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,420
|
py
|
import copy
import logging
import os
import ray
from ray import tune
from ray.rllib.agents import dqn
from ray.rllib.agents.dqn.dqn_torch_policy import postprocess_nstep_and_prio
from ray.rllib.utils import merge_dicts
from ray.rllib.utils.schedules import PiecewiseSchedule
from ray.tune.integration.wandb import WandbLogger
from ray.tune.logger import DEFAULT_LOGGERS
from marltoolbox.algos import amTFT
from marltoolbox.envs import (
matrix_sequential_social_dilemma,
vectorized_coin_game,
vectorized_mixed_motive_coin_game,
ssd_mixed_motive_coin_game,
)
from marltoolbox.envs.utils.wrappers import (
add_RewardUncertaintyEnvClassWrapper,
)
from marltoolbox.scripts import aggregate_and_plot_tensorboard_data
from marltoolbox.utils import (
exploration,
log,
postprocessing,
miscellaneous,
plot,
self_and_cross_perf,
callbacks,
)
logger = logging.getLogger(__name__)
def main(debug, train_n_replicates=None, filter_utilitarian=None, env=None):
hparams = get_hyperparameters(
debug, train_n_replicates, filter_utilitarian, env
)
if hparams["load_plot_data"] is None:
ray.init(
num_cpus=os.cpu_count(), num_gpus=0, local_mode=hparams["debug"]
)
# Train
if hparams["load_policy_data"] is None:
tune_analysis_per_welfare = train_for_each_welfare_function(
hparams
)
else:
tune_analysis_per_welfare = load_tune_analysis(
hparams["load_policy_data"]
)
# Eval & Plot
analysis_metrics_per_mode = config_and_evaluate_cross_play(
tune_analysis_per_welfare, hparams
)
ray.shutdown()
else:
tune_analysis_per_welfare = None
# Plot
analysis_metrics_per_mode = config_and_evaluate_cross_play(
tune_analysis_per_welfare, hparams
)
return tune_analysis_per_welfare, analysis_metrics_per_mode
def get_hyperparameters(
debug,
train_n_replicates=None,
filter_utilitarian=None,
env=None,
reward_uncertainty=0.0,
):
if debug:
train_n_replicates = 2
n_times_more_utilitarians_seeds = 1
elif train_n_replicates is None:
n_times_more_utilitarians_seeds = 4
train_n_replicates = 4
else:
n_times_more_utilitarians_seeds = 4
n_seeds_to_prepare = train_n_replicates * (
1 + n_times_more_utilitarians_seeds
)
pool_of_seeds = miscellaneous.get_random_seeds(n_seeds_to_prepare)
exp_name, _ = log.log_in_current_day_dir("amTFT")
hparams = {
"debug": debug,
"filter_utilitarian": filter_utilitarian
if filter_utilitarian is not None
else not debug,
"seeds": pool_of_seeds,
"train_n_replicates": train_n_replicates,
"n_times_more_utilitarians_seeds": n_times_more_utilitarians_seeds,
"exp_name": exp_name,
"log_n_points": 250,
"load_plot_data": None,
# Example: "load_plot_data": ".../SelfAndCrossPlay_save.p",
"load_policy_data": None,
# "load_policy_data": {
# "Util": [
# ".../IBP/amTFT/trials/"
# "DQN_AsymCoinGame_...",
# ".../IBP/amTFT/trials/"
# "DQN_AsymCoinGame_..."],
# 'IA':[
# ".../temp/IBP/amTFT/trials/"
# "DQN_AsymCoinGame_...",
# ".../IBP/amTFT/trials/"
# "DQN_AsymCoinGame_..."],
# },
# "load_policy_data": {
# "Util": [
# "~/dev-maxime/CLR/vm-data/instance-60-cpu-1-preemtible/amTFT"
# "/2021_03_28/19_38_55/utilitarian_welfare/coop"
# "/DQN_VectMixedMotiveCG_06231_00000_0_seed=1616960338_2021-03-29_00-52-23/checkpoint_250/checkpoint-250",
# # "~/dev-maxime/CLR/vm-data/instance-60-cpu-1-preemtible/amTFT"
# # "/2021_03_24/18_22_47/utilitarian_welfare/coop"
# # "/DQN_VectMixedMotiveCG_e1de7_00001_1_seed=1616610171_2021-03-25_00-27-29/checkpoint_250/checkpoint-250",
# # "~/dev-maxime/CLR/vm-data/instance-60-cpu-1-preemtible/amTFT"
# # "/2021_03_24/18_22_47/utilitarian_welfare/coop"
# # "/DQN_VectMixedMotiveCG_e1de7_00002_2_seed=1616610172_2021-03-25_00-27-29/checkpoint_250/checkpoint-250",
# ],
# 'IA':[
# "~/dev-maxime/CLR/vm-data/instance-60-cpu-1-preemtible"
# "/amTFT/2021_03_28/19_38_55/inequity_aversion_welfare/coop"
# "/DQN_VectMixedMotiveCG_d5a2a_00000_0_seed=1616960335_2021-03-28_21-23-26/checkpoint_250/checkpoint-250",
# # "~/dev-maxime/CLR/vm-data/instance-60-cpu-1-preemtible"
# # "/amTFT/2021_03_24/18_22_47/inequity_aversion_welfare/coop"
# # "/DQN_VectMixedMotiveCG_9cfe6_00001_1_seed=1616610168_2021-03-24_20-22-11/checkpoint_250/checkpoint-250",
# # "~/dev-maxime/CLR/vm-data/instance-60-cpu-1-preemtible"
# # "/amTFT/2021_03_24/18_22_47/inequity_aversion_welfare/coop"
# # "/DQN_VectMixedMotiveCG_9cfe6_00002_2_seed=1616610169_2021-03-24_20-22-11/checkpoint_250/checkpoint-250",
# ],
# },
# "load_policy_data": {
# "Util": [
# "~/ray_results/amTFT"
# "/2021_03_24/18_22_47/utilitarian_welfare/coop"
# "/DQN_VectMixedMotiveCG_e1de7_00000_0_seed=1616610170_2021-03-25_00-27-29/checkpoint_250/checkpoint-250",
# "~/ray_results/amTFT"
# "/2021_03_24/18_22_47/utilitarian_welfare/coop"
# "/DQN_VectMixedMotiveCG_e1de7_00001_1_seed=1616610171_2021-03-25_00-27-29/checkpoint_250/checkpoint-250",
# "~/ray_results/amTFT"
# "/2021_03_24/18_22_47/utilitarian_welfare/coop"
# "/DQN_VectMixedMotiveCG_e1de7_00002_2_seed=1616610172_2021-03-25_00-27-29/checkpoint_250/checkpoint-250",
# ],
# 'IA': [
# "~/ray_results"
# "/amTFT/2021_03_24/18_22_47/inequity_aversion_welfare/coop"
# "/DQN_VectMixedMotiveCG_9cfe6_00000_0_seed=1616610167_2021-03-24_20-22-10/checkpoint_250/checkpoint-250",
# "~/ray_results"
# "/amTFT/2021_03_24/18_22_47/inequity_aversion_welfare/coop"
# "/DQN_VectMixedMotiveCG_9cfe6_00001_1_seed=1616610168_2021-03-24_20-22-11/checkpoint_250/checkpoint-250",
# "~/ray_results"
# "/amTFT/2021_03_24/18_22_47/inequity_aversion_welfare/coop"
# "/DQN_VectMixedMotiveCG_9cfe6_00002_2_seed=1616610169_2021-03-24_20-22-11/checkpoint_250/checkpoint-250",
# ],
# },
"amTFTPolicy": amTFT.AmTFTRolloutsTorchPolicy,
"welfare_functions": [
(postprocessing.WELFARE_INEQUITY_AVERSION, "inequity_aversion"),
(postprocessing.WELFARE_UTILITARIAN, "utilitarian"),
],
"jitter": 0.05,
"hiddens": [64],
"gamma": 0.96,
# If not in self play then amTFT
# will be evaluated against a naive selfish policy or an exploiter
"self_play": True,
# "self_play": False, # Not tested
"env_name": "IteratedPrisonersDilemma" if env is None else env,
# "env_name": "IteratedAsymBoS" if env is None else env,
# "env_name": "CoinGame" if env is None else env,
# "env_name": "AsymCoinGame" if env is None else env,
# "env_name": "MixedMotiveCoinGame" if env is None else env,
# "env_name": "SSDMixedMotiveCoinGame" if env is None else env,
"overwrite_reward": True,
"explore_during_evaluation": True,
"reward_uncertainty": reward_uncertainty,
}
hparams = modify_hyperparams_for_the_selected_env(hparams)
hparams["plot_keys"] = amTFT.PLOT_KEYS + hparams["plot_keys"]
hparams["plot_assemblage_tags"] = (
amTFT.PLOT_ASSEMBLAGE_TAGS + hparams["plot_assemblage_tags"]
)
return hparams
def load_tune_analysis(grouped_checkpoints_paths: dict):
tune_analysis = {}
msg = "start load_tune_analysis"
print(msg)
logger.info(msg)
for group_name, checkpoints_paths in grouped_checkpoints_paths.items():
one_tune_analysis = miscellaneous.load_one_tune_analysis(
checkpoints_paths, n_dir_level_between_ckpt_and_exp_state=3
)
tune_analysis[group_name] = one_tune_analysis
msg = "end load_tune_analysis"
print(msg)
logger.info(msg)
return tune_analysis
def modify_hyperparams_for_the_selected_env(hp):
hp["plot_keys"] = (
amTFT.PLOT_KEYS + aggregate_and_plot_tensorboard_data.PLOT_KEYS
)
hp["plot_assemblage_tags"] = (
amTFT.PLOT_ASSEMBLAGE_TAGS
+ aggregate_and_plot_tensorboard_data.PLOT_ASSEMBLAGE_TAGS
)
mul_temp = 1.0
hp["punishment_multiplier"] = 3.0
hp["buf_frac"] = 0.125
hp["training_intensity"] = 10
# hp["rollout_length"] = 40
# hp["n_rollout_replicas"] = 20
hp["rollout_length"] = 4
hp["n_rollout_replicas"] = 5
if "CoinGame" in hp["env_name"]:
hp["plot_keys"] += vectorized_coin_game.PLOT_KEYS
hp["plot_assemblage_tags"] += vectorized_coin_game.PLOT_ASSEMBLAGE_TAGS
hp["n_steps_per_epi"] = 20 if hp["debug"] else 100
hp["n_epi"] = 10 if hp["debug"] else 4000
hp["base_lr"] = 0.1
hp["bs_epi_mul"] = 1
hp["both_players_can_pick_the_same_coin"] = False
hp["sgd_momentum"] = 0.9
hp["lambda"] = 0.96
hp["alpha"] = 0.0
hp["beta"] = 0.5
hp["debit_threshold"] = 30.0
hp["jitter"] = 0.02
hp["filter_utilitarian"] = False
hp["target_network_update_freq"] = 100 * hp["n_steps_per_epi"]
hp["last_exploration_temp_value"] = 0.03 * mul_temp
hp["temperature_schedule"] = PiecewiseSchedule(
endpoints=[
(0, 2.0 * mul_temp),
(
int(hp["n_steps_per_epi"] * hp["n_epi"] * 0.20),
0.5 * mul_temp,
),
(
int(hp["n_steps_per_epi"] * hp["n_epi"] * 0.60),
hp["last_exploration_temp_value"],
),
],
outside_value=hp["last_exploration_temp_value"],
framework="torch",
)
if "AsymCoinGame" in hp["env_name"]:
hp["x_limits"] = (-0.5, 3.0)
hp["y_limits"] = (-1.1, 0.6)
hp["env_class"] = vectorized_coin_game.AsymVectorizedCoinGame
elif "MixedMotiveCoinGame" in hp["env_name"]:
if "SSDMixedMotiveCoinGame" in hp["env_name"]:
hp["debit_threshold"] = 3.0
hp["x_limits"] = (-0.25, 1.0)
hp["y_limits"] = (-0.25, 1.5)
hp[
"env_class"
] = ssd_mixed_motive_coin_game.SSDMixedMotiveCoinGame
else:
hp["x_limits"] = (-2.0, 2.0)
hp["y_limits"] = (-0.5, 3.0)
hp[
"env_class"
] = vectorized_mixed_motive_coin_game.VectMixedMotiveCG
hp["both_players_can_pick_the_same_coin"] = True
else:
hp["x_limits"] = (-0.5, 0.6)
hp["y_limits"] = (-0.5, 0.6)
hp["env_class"] = vectorized_coin_game.VectorizedCoinGame
else:
hp["plot_keys"] += matrix_sequential_social_dilemma.PLOT_KEYS
hp[
"plot_assemblage_tags"
] += matrix_sequential_social_dilemma.PLOT_ASSEMBLAGE_TAGS
hp["base_lr"] = 0.03
hp["bs_epi_mul"] = 1
hp["n_steps_per_epi"] = 20
hp["n_epi"] = 10 if hp["debug"] else 800
hp["lambda"] = 0.96
hp["alpha"] = 0.0
hp["beta"] = 1.0
hp["sgd_momentum"] = 0.0
hp["debit_threshold"] = 10.0
hp["target_network_update_freq"] = 30 * hp["n_steps_per_epi"]
hp["last_exploration_temp_value"] = 0.1 * mul_temp
hp["temperature_schedule"] = PiecewiseSchedule(
endpoints=[
(0, 2.0 * mul_temp),
(
int(hp["n_steps_per_epi"] * hp["n_epi"] * 0.33),
0.5 * mul_temp,
),
(
int(hp["n_steps_per_epi"] * hp["n_epi"] * 0.66),
hp["last_exploration_temp_value"],
),
],
outside_value=hp["last_exploration_temp_value"],
framework="torch",
)
if "IteratedPrisonersDilemma" in hp["env_name"]:
hp["filter_utilitarian"] = False
hp["x_limits"] = (-3.5, 0.5)
hp["y_limits"] = (-3.5, 0.5)
hp["utilitarian_filtering_threshold"] = -2.5
hp[
"env_class"
] = matrix_sequential_social_dilemma.IteratedPrisonersDilemma
elif "IteratedAsymBoS" in hp["env_name"]:
hp["x_limits"] = (-0.1, 4.1)
hp["y_limits"] = (-0.1, 4.1)
hp["utilitarian_filtering_threshold"] = 3.2
hp["env_class"] = matrix_sequential_social_dilemma.IteratedAsymBoS
else:
raise NotImplementedError(f'hp["env_name"]: {hp["env_name"]}')
hp["lr_schedule"] = [
(0, 0.0),
(int(hp["n_steps_per_epi"] * hp["n_epi"] * 0.05), hp["base_lr"]),
(int(hp["n_steps_per_epi"] * hp["n_epi"]), hp["base_lr"] / 1e9),
]
hp["plot_axis_scale_multipliers"] = (
(1 / hp["n_steps_per_epi"]), # for x axis
(1 / hp["n_steps_per_epi"]),
) # for y axis
hp["env_class"] = add_RewardUncertaintyEnvClassWrapper(
env_class=hp["env_class"],
reward_uncertainty_std=hp["reward_uncertainty"],
)
return hp
def train_for_each_welfare_function(hp):
tune_analysis_per_welfare = {}
for welfare_fn, welfare_group_name in hp["welfare_functions"]:
print("==============================================")
print(
"Going to start two_steps_training with welfare function",
welfare_fn,
)
if welfare_fn == postprocessing.WELFARE_UTILITARIAN:
hp = preprocess_utilitarian_config(hp)
stop, env_config, rllib_config = get_rllib_config(hp, welfare_fn)
exp_name = os.path.join(hp["exp_name"], welfare_fn)
results = amTFT.train_amtft(
stop_config=stop,
rllib_config=rllib_config,
name=exp_name,
TrainerClass=dqn.DQNTrainer,
plot_keys=hp["plot_keys"],
plot_assemblage_tags=hp["plot_assemblage_tags"],
debug=hp["debug"],
log_to_file=not hp["debug"],
loggers=None if hp["debug"] else DEFAULT_LOGGERS + (WandbLogger,),
)
if welfare_fn == postprocessing.WELFARE_UTILITARIAN:
results, hp = postprocess_utilitarian_results(
results, env_config, hp
)
tune_analysis_per_welfare[welfare_group_name] = results
return tune_analysis_per_welfare
def preprocess_utilitarian_config(hp):
hp_copy = copy.deepcopy(hp)
if hp_copy["filter_utilitarian"]:
hp_copy["train_n_replicates"] = (
hp_copy["train_n_replicates"]
* hp_copy["n_times_more_utilitarians_seeds"]
)
return hp_copy
def get_rllib_config(hp, welfare_fn, eval=False):
stop = {
"episodes_total": hp["n_epi"],
}
env_config = get_env_config(hp)
policies = get_policies(hp, env_config, welfare_fn, eval)
selected_seeds = hp["seeds"][: hp["train_n_replicates"]]
hp["seeds"] = hp["seeds"][hp["train_n_replicates"] :]
rllib_config = {
"env": hp["env_class"],
"env_config": env_config,
"multiagent": {
"policies": policies,
"policy_mapping_fn": lambda agent_id: agent_id,
# When replay_mode=lockstep, RLlib will replay all the agent
# transitions at a particular timestep together in a batch.
# This allows the policy to implement differentiable shared
# computations between agents it controls at that timestep.
# When replay_mode=independent,
# transitions are replayed independently per policy.
# "replay_mode": "lockstep",
"observation_fn": amTFT.observation_fn,
},
"gamma": hp["gamma"],
"seed": tune.grid_search(selected_seeds),
# === Optimization ===
# Learning rate for adam optimizer
"lr": hp["base_lr"],
# Learning rate schedule
"lr_schedule": hp["lr_schedule"],
# If not None, clip gradients during optimization at this value
"grad_clip": 1,
# Update the replay buffer with this many samples at once. Note that
# this setting applies per-worker if num_workers > 1.
"rollout_fragment_length": hp["n_steps_per_epi"],
# Size of a batch sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": int(hp["n_steps_per_epi"] * hp["bs_epi_mul"]),
"training_intensity": hp["training_intensity"],
# Minimum env steps to optimize for per train call. This value does
# not affect learning, only the length of iterations.
"timesteps_per_iteration": hp["n_steps_per_epi"]
if hp["debug"]
else int(hp["n_steps_per_epi"] * hp["n_epi"] / hp["log_n_points"]),
"min_iter_time_s": 0.0,
# General config
"framework": "torch",
# LE supports only 1 worker only otherwise
# it would be mixing several opponents trajectories
"num_workers": 0,
# LE supports only 1 env per worker only otherwise
# several episodes would be played at the same time
"num_envs_per_worker": 1,
# Callbacks that will be run during various phases of training. See the
# `DefaultCallbacks` class and
# `examples/custom_metrics_and_callbacks.py` for more usage
# information.
"callbacks": callbacks.merge_callbacks(
amTFT.AmTFTCallbacks,
log.get_logging_callbacks_class(
log_full_epi=True, log_full_epi_interval=100
),
),
"logger_config": {
"wandb": {
"project": "amTFT",
"group": hp["exp_name"],
"api_key_file": os.path.join(
os.path.dirname(__file__), "../../../api_key_wandb"
),
"log_config": True,
},
},
# === DQN Models ===
# Update the target network every `target_network_update_freq` steps.
"target_network_update_freq": hp["target_network_update_freq"],
# === Replay buffer ===
# Size of the replay buffer. Note that if async_updates is set, then
# each worker will have a replay buffer of this size.
"buffer_size": max(
int(hp["n_steps_per_epi"] * hp["n_epi"] * hp["buf_frac"]), 5
),
# Whether to use dueling dqn
"dueling": True,
# Dense-layer setup for each the advantage branch and the value branch
# in a dueling architecture.
"hiddens": hp["hiddens"],
# Whether to use double dqn
"double_q": True,
# If True prioritized replay buffer will be used.
"prioritized_replay": False,
"model": {
# Number of hidden layers for fully connected net
"fcnet_hiddens": hp["hiddens"],
# Nonlinearity for fully connected net (tanh, relu)
"fcnet_activation": "relu",
},
# How many steps of the model to sample before learning starts.
"learning_starts": int(hp["n_steps_per_epi"] * hp["bs_epi_mul"]),
# === Exploration Settings ===
# Default exploration behavior, iff `explore`=None is passed into
# compute_action(s).
# Set to False for no exploration behavior (e.g., for evaluation).
"explore": True,
# Provide a dict specifying the Exploration object's config.
"exploration_config": {
# The Exploration class to use. In the simplest case,
# this is the name (str) of any class present in the
# `rllib.utils.exploration` package.
# You can also provide the python class directly or
# the full location of your class (e.g.
# "ray.rllib.utils.exploration.epsilon_greedy.
# EpsilonGreedy").
"type": exploration.SoftQSchedule,
# Add constructor kwargs here (if any).
"temperature_schedule": hp["temperature_schedule"],
},
}
if "CoinGame" in hp["env_name"]:
rllib_config["model"] = {
"dim": env_config["grid_size"],
"conv_filters": [[16, [3, 3], 1], [32, [3, 3], 1]],
# [Channel, [Kernel, Kernel], Stride]]
}
return stop, env_config, rllib_config
def get_env_config(hp):
if "CoinGame" in hp["env_name"]:
env_config = {
"players_ids": ["player_red", "player_blue"],
"max_steps": hp["n_steps_per_epi"],
"grid_size": 3,
"both_players_can_pick_the_same_coin": hp[
"both_players_can_pick_the_same_coin"
],
}
else:
env_config = {
"players_ids": ["player_row", "player_col"],
"max_steps": hp["n_steps_per_epi"],
}
return env_config
def get_policies(hp, env_config, welfare_fn, eval=False):
PolicyClass = hp["amTFTPolicy"]
NestedPolicyClass, CoopNestedPolicyClass = get_nested_policy_class(
hp, welfare_fn
)
if eval:
NestedPolicyClass = CoopNestedPolicyClass
amTFT_config_update = merge_dicts(
amTFT.DEFAULT_CONFIG,
{
# Set to True to train the nested policies and to False to use them
"working_state": "train_coop",
"welfare_key": welfare_fn,
"verbose": 1 if hp["debug"] else 0,
# "verbose": 1 if hp["debug"] else 2,
"punishment_multiplier": hp["punishment_multiplier"],
"debit_threshold": hp["debit_threshold"],
"rollout_length": min(hp["n_steps_per_epi"], hp["rollout_length"]),
"n_rollout_replicas": hp["n_rollout_replicas"],
"optimizer": {
"sgd_momentum": hp["sgd_momentum"],
},
"nested_policies": [
{"Policy_class": CoopNestedPolicyClass, "config_update": {}},
{"Policy_class": NestedPolicyClass, "config_update": {}},
{"Policy_class": CoopNestedPolicyClass, "config_update": {}},
{"Policy_class": NestedPolicyClass, "config_update": {}},
],
},
)
policy_1_config = copy.deepcopy(amTFT_config_update)
policy_1_config["own_policy_id"] = env_config["players_ids"][0]
policy_1_config["opp_policy_id"] = env_config["players_ids"][1]
policy_2_config = copy.deepcopy(amTFT_config_update)
policy_2_config["own_policy_id"] = env_config["players_ids"][1]
policy_2_config["opp_policy_id"] = env_config["players_ids"][0]
policies = {
env_config["players_ids"][0]: (
# The default policy is DQN defined in DQNTrainer but
# we overwrite it to use the LE policy
PolicyClass,
hp["env_class"](env_config).OBSERVATION_SPACE,
hp["env_class"].ACTION_SPACE,
policy_1_config,
),
env_config["players_ids"][1]: (
PolicyClass,
hp["env_class"](env_config).OBSERVATION_SPACE,
hp["env_class"].ACTION_SPACE,
policy_2_config,
),
}
return policies
def get_nested_policy_class(hp, welfare_fn):
NestedPolicyClass = amTFT.DEFAULT_NESTED_POLICY_SELFISH
CoopNestedPolicyClass = NestedPolicyClass.with_updates(
# TODO problem: this prevent to use HP searches on gamma etc.
postprocess_fn=miscellaneous.merge_policy_postprocessing_fn(
postprocessing.welfares_postprocessing_fn(
add_utilitarian_welfare=(
welfare_fn == postprocessing.WELFARE_UTILITARIAN
),
add_inequity_aversion_welfare=(
welfare_fn == postprocessing.WELFARE_INEQUITY_AVERSION
),
inequity_aversion_alpha=hp["alpha"],
inequity_aversion_beta=hp["beta"],
inequity_aversion_gamma=hp["gamma"],
inequity_aversion_lambda=hp["lambda"],
),
postprocess_nstep_and_prio,
)
)
return NestedPolicyClass, CoopNestedPolicyClass
def postprocess_utilitarian_results(results, env_config, hp):
hp_cp = copy.deepcopy(hp)
if hp["filter_utilitarian"]:
hp_cp["train_n_replicates"] = (
hp_cp["train_n_replicates"]
// hp_cp["n_times_more_utilitarians_seeds"]
)
results = miscellaneous.filter_tune_results(
results,
metric=f"policy_reward_mean/{env_config['players_ids'][0]}",
metric_threshold=hp_cp["utilitarian_filtering_threshold"]
* hp_cp["n_steps_per_epi"],
metric_mode="last-5-avg",
threshold_mode="above",
)
if len(results.trials) > hp_cp["train_n_replicates"]:
results.trials = results.trials[: hp_cp["train_n_replicates"]]
elif len(results.trials) < hp_cp["train_n_replicates"]:
print("WARNING: not enough Utilitarian trials above threshold!!!")
return results, hp_cp
def config_and_evaluate_cross_play(tune_analysis_per_welfare, hp):
config_eval, env_config, stop, hp_eval = generate_eval_config(hp)
return evaluate_self_play_cross_play(
tune_analysis_per_welfare, config_eval, env_config, stop, hp_eval
)
def evaluate_self_play_cross_play(
tune_analysis_per_welfare, config_eval, env_config, stop, hp_eval
):
exp_name = os.path.join(hp_eval["exp_name"], "eval")
evaluator = self_and_cross_perf.SelfAndCrossPlayEvaluator(
exp_name=exp_name,
local_mode=hp_eval["debug"],
)
analysis_metrics_per_mode = evaluator.perform_evaluation_or_load_data(
evaluation_config=config_eval,
stop_config=stop,
policies_to_load_from_checkpoint=copy.deepcopy(
env_config["players_ids"]
),
tune_analysis_per_exp=tune_analysis_per_welfare,
TrainerClass=dqn.DQNTrainer,
n_self_play_per_checkpoint=hp_eval["n_self_play_per_checkpoint"],
n_cross_play_per_checkpoint=hp_eval["n_cross_play_per_checkpoint"],
to_load_path=hp_eval["load_plot_data"],
)
if "CoinGame" in hp_eval["env_name"]:
background_area_coord = None
else:
background_area_coord = hp_eval["env_class"].PAYOUT_MATRIX
plot_config = plot.PlotConfig(
xlim=hp_eval["x_limits"],
ylim=hp_eval["y_limits"],
markersize=5,
alpha=1.0,
jitter=hp_eval["jitter"],
xlabel="player 1 payoffs",
ylabel="player 2 payoffs",
plot_max_n_points=hp_eval["train_n_replicates"],
x_scale_multiplier=hp_eval["plot_axis_scale_multipliers"][0],
y_scale_multiplier=hp_eval["plot_axis_scale_multipliers"][1],
background_area_coord=background_area_coord,
)
evaluator.plot_results(
analysis_metrics_per_mode,
plot_config=plot_config,
x_axis_metric=f"policy_reward_mean/{env_config['players_ids'][0]}",
y_axis_metric=f"policy_reward_mean/{env_config['players_ids'][1]}",
)
print_inequity_aversion_welfare(env_config, analysis_metrics_per_mode)
return analysis_metrics_per_mode
def generate_eval_config(hp):
hp_eval = modify_hp_for_evaluation(hp)
fake_welfare_function = postprocessing.WELFARE_INEQUITY_AVERSION
stop, env_config, rllib_config = get_rllib_config(
hp_eval, fake_welfare_function, eval=True
)
config_eval = modify_config_for_evaluation(
rllib_config, hp_eval, env_config
)
return config_eval, env_config, stop, hp_eval
def modify_hp_for_evaluation(hp: dict, eval_over_n_epi: int = 1):
hp_eval = copy.deepcopy(hp)
# TODO is the overwrite_reward hp useless?
hp_eval["overwrite_reward"] = False
hp_eval["n_epi"] = eval_over_n_epi
hp_eval["n_steps_per_epi"] = 5 if hp_eval["debug"] else 100
hp_eval["bs_epi_mul"] = 1
hp_eval["plot_axis_scale_multipliers"] = (
# for x axis
(1 / hp_eval["n_steps_per_epi"]),
# for y axis
(1 / hp_eval["n_steps_per_epi"]),
)
hp_eval["n_self_play_per_checkpoint"] = 1
hp_eval["n_cross_play_per_checkpoint"] = min(
5,
(
(hp_eval["train_n_replicates"] * len(hp_eval["welfare_functions"]))
- 1
),
)
return hp_eval
def modify_config_for_evaluation(config_eval, hp, env_config):
config_eval["explore"] = False
config_eval["seed"] = None
policies = config_eval["multiagent"]["policies"]
for policy_id in policies.keys():
policy_config = policies[policy_id][3]
policy_config["working_state"] = "eval_amtft"
if not hp["self_play"]:
naive_player_id = env_config["players_ids"][-1]
naive_player_policy_config = policies[naive_player_id][3]
naive_player_policy_config["working_state"] = "eval_naive_selfish"
if hp["explore_during_evaluation"]:
tmp_mul = 1.0
config_eval["explore"] = (miscellaneous.OVERWRITE_KEY, True)
config_eval["exploration_config"] = {
"type": config_eval["exploration_config"]["type"],
"temperature_schedule": PiecewiseSchedule(
endpoints=[
(0, tmp_mul * hp["last_exploration_temp_value"]),
(0, tmp_mul * hp["last_exploration_temp_value"]),
],
outside_value=tmp_mul * hp["last_exploration_temp_value"],
framework="torch",
),
}
if hp["debug"] and hp.get("debit_threshold_debug_override", True):
for policy_id in policies.keys():
policies[policy_id][3]["debit_threshold"] = 0.5
policies[policy_id][3]["last_k"] = hp["n_steps_per_epi"] - 1
return config_eval
def print_inequity_aversion_welfare(env_config, analysis_metrics_per_mode):
plotter = self_and_cross_perf.SelfAndCrossPlayPlotter()
plotter._reset(
x_axis_metric=f"nested_policy/{env_config['players_ids'][0]}/worker_0/"
f"policy_0/sum_over_epi_inequity_aversion_welfare",
y_axis_metric=f"nested_policy/{env_config['players_ids'][1]}/worker_0/"
f"policy_0/sum_over_epi_inequity_aversion_welfare",
metric_mode="avg",
)
for mode_metric in analysis_metrics_per_mode:
print("mode_metric", mode_metric[0], mode_metric[3])
x, y = plotter._extract_x_y_points(mode_metric[1])
print("x", x)
print("y", y)
if __name__ == "__main__":
debug_mode = True
main(debug_mode)
|
[
"maxime.riche@geoapi-airbusds.com"
] |
maxime.riche@geoapi-airbusds.com
|
84cc6b5087235720d2d70cc3a974c5b7ffb22590
|
41b8504bc0a7f2aaedd2f4986c3e66fcae7d9b2b
|
/pyoembed/data_types/rich.py
|
8ec6615f222fd87f7d50c6e76bde486707bb55ed
|
[
"BSD-3-Clause"
] |
permissive
|
conversence/pyoembed
|
b15124dc092c8863e01ab1128f888bfc21a889b7
|
f3d819418877565cd88839054bd826ad79d4f9cf
|
refs/heads/master
| 2020-05-25T14:55:30.936629
| 2020-05-01T18:25:10
| 2020-05-01T18:25:10
| 187,857,180
| 0
| 0
|
BSD-3-Clause
| 2019-05-21T14:44:37
| 2019-05-21T14:44:36
| null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
from pyoembed.data_types import BaseDataType
class RichDataType(BaseDataType):
priority = 3
name = 'rich'
required_fields = ['html', 'width', 'height']
|
[
"rafael@rafaelmartins.eng.br"
] |
rafael@rafaelmartins.eng.br
|
68f6a28d48852a7385fe6d0e1fde46a273192a95
|
741d9469b5dce73f1a91525ee0495402f3e3db53
|
/posts/views.py
|
62c2916c80dede5b914a0833c23c6b1367cd2546
|
[] |
no_license
|
adcabrod/Pr-ctica3djangopython
|
241fa168c25b88eacaa246106d8550eed9155c82
|
c09d52926cb6816fec6a04b8f64a46e8e8b0277b
|
refs/heads/master
| 2021-01-09T06:01:28.843336
| 2017-02-03T23:30:27
| 2017-02-03T23:30:27
| 80,878,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,502
|
py
|
from django.shortcuts import render
from posts.models import Post
from django.contrib.auth.models import User
from django.http import HttpResponseNotFound, HttpResponse
from django.views import View
from django.utils import timezone
from django.shortcuts import render
from posts.models import Post
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from practica3.forms import PostForm
from django.urls import reverse
class home(View):
def get(self, request):
"""
Renderiza el home con un listado de posts
:param request: objeto HttpRequest con los datos de la petición
:return: objeto HttpResponse con los datos de la respuesta
"""
# recupera todos los posts de la base de datos y los ordeno por fecha de publicación
posts = Post.objects.filter(publication_date__lte=timezone.now()).order_by('-publication_date')
context ={'posts_list': posts[:7]}
return render(request, 'posts/home.html', context)
class blogsView(View):
def get(self, request):
"""
Renderiza el /blogs con un listado de los blogs, un blog por usuario
:param request: objeto HttpRequest con los datos de la petición
:return: objeto HttpRequest con los datos de la respuesta
"""
blog = User.objects.order_by('username')
context = {'blogs_list': blog[:7]}
return render(request, 'posts/blogs.html', context)
class PostView(View):
def get (self, request, username, pk):
"""
Renderiza un post en detalle
:param request:objeto HttpRequest con los datos de la petición
:param pk: clave primaria del post a recuperar
:return: objeto httpResponse con los datos de la respuesta
"""
post = PostQueryset.get_posts_by_user(request.user, username).filter(pk=pk)
context = {'post': post[0], 'username': username}
return render(request, 'posts/postView.html', context)
class blogDetailView(View):
def get(self, request, username):
"""
Renderiza los artículos de un usuario
:param request: objeto HttpRequest con los datos de la petición
:param username: username del autor del artículo a recuperar
:return: objeto HttpResponse con los datos de la respuesta
"""
# Muestro los post de un usuario en concreto
posts = PostQueryset.get_posts_by_user(request.user, username).order_by('-publication_date')
context = {'posts_list': posts, 'username': username}
return render(request, 'posts/userBlog.html', context)
class PostCreationView(View):
@method_decorator(login_required())
def get(self, request):
"""
Presenta el formulario para crear un post
:param request: objeto HttpRequest con los datos de la petición
:return: objeto HttpResponse con los datos de la respuesta
"""
message = None
post_form = PostForm()
context = {'form': post_form, 'message': message}
return render(request, 'posts/post_creation.html', context)
@method_decorator(login_required())
def post(self, request):
"""
Presenta el formulario para crear un post y, en caso de que la petición sea POST la valida
y la crea en caso de que sea válida
:param request: objeto HttpRequest con los datos de la petición
:return: objeto HttpResponse con los datos de la respuesta
"""
message = None
post_with_user = Post(owner=request.user)
post_form = PostForm(request.POST, instance=post_with_user)
if post_form.is_valid():
new_post = post_form.save()
post_form = PostForm()
message = 'Post creado satisfactoriamente. <a href="{0}">Ver post</a>'.format(
reverse('post_view', args=[new_post.owner.username, new_post.pk])
)
# reverse - django hace la revision de la url post_view con los argumentos username y pk
context = {'form': post_form, 'message': message}
return render(request, 'posts/post_creation.html', context)
class PostQueryset(object):
@staticmethod
def get_posts_by_user(user, username):
posts = Post.objects.all().select_related("owner")
if not user.is_authenticated():
posts = posts.filter(publication_date__lte=timezone.now(), owner__username=username)
elif not user.is_superuser:
if user.username == username:
posts = posts.filter(owner=user)
else:
posts = posts.filter(publication_date__tle=timezone.now(), owner__username=username)
else:
posts = posts.filter(owner__username=username)
return posts
class PostListApiQueryset(object):
@staticmethod
def get_post_by_user(user, username):
posts = Post.object.all().select_related("owner")
if not user.is_authenticated():
posts = posts.filter(publication_date_lte=timezone.now(), owner__username=username)
elif not user.is_superuser:
if user.username == username:
posts = posts.filter(owner=user)
else:
posts = posts.filter(publication_date__lte=timezone.now(), owner__username=username)
else:
posts = posts.filter(owner__username=username)
return posts
|
[
"adriancabrod@gmail.com"
] |
adriancabrod@gmail.com
|
1336583395efc34384b1a523b1411becf3b1882a
|
a19ebd3f326c4f2a8dad58bda6de946ac8366e02
|
/catkin_ws/build/tf2_ros/catkin_generated/pkg.develspace.context.pc.py
|
168e25456249981067feddeffee6e2f74274a093
|
[] |
no_license
|
pynpyn/ballbotRepo
|
c9a1ba431c071ea8298cb42b01fcc03438acbdda
|
84e8740f7c945fd6e8b70584d46a8402fa9aa973
|
refs/heads/master
| 2020-04-01T22:14:21.101449
| 2018-11-14T17:58:11
| 2018-11-14T17:58:11
| 153,697,917
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/yinan/ballbotRepo/catkin_ws/src/geometry2/tf2_ros/include".split(';') if "/home/yinan/ballbotRepo/catkin_ws/src/geometry2/tf2_ros/include" != "" else []
PROJECT_CATKIN_DEPENDS = "actionlib;actionlib_msgs;geometry_msgs;message_filters;roscpp;rosgraph;tf2;tf2_msgs;tf2_py".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ltf2_ros".split(';') if "-ltf2_ros" != "" else []
PROJECT_NAME = "tf2_ros"
PROJECT_SPACE_DIR = "/home/yinan/ballbotRepo/catkin_ws/devel/.private/tf2_ros"
PROJECT_VERSION = "0.6.3"
|
[
"pynpyn1016@gmail.com"
] |
pynpyn1016@gmail.com
|
09a2b490113541fcea24bf983ccd3be258fb29d0
|
71536013ef36dfca22f43f822d5c8f5c42d763da
|
/testfile.py
|
871798321c4f73a51f07240d4f2e079f71a266eb
|
[] |
no_license
|
jerryneal/TradeChart
|
9b179c541778fd3417c80f9e9d89aaf1c068ca42
|
51dbc269bd4697751ad1ad68c3e700b89439e159
|
refs/heads/master
| 2021-01-12T11:27:29.305368
| 2016-12-03T15:47:05
| 2016-12-03T15:47:05
| 72,930,614
| 0
| 0
| null | 2016-11-22T16:12:38
| 2016-11-05T14:17:50
|
Python
|
UTF-8
|
Python
| false
| false
| 976
|
py
|
from time import time
from math import exp, sqrt, log
from random import gauss, seed
if __name__ == '__main__':
seed(20000)
t0 = time()
# Parameters
S0 = 100. #Initial Value
K = 105. # strike price
T = 1.0 # Maturity
r = 0.05 # riskless short rate
sigma = 0.2 # volatility
M = 50
dt = T / M
I = 250000
# Simulating I paths with M time stamps
S = []
for i in range(I):
path = []
for t in range(M + 1):
if t == 0:
path.append(S0)
else:
z = gauss(0.0,1.0)
St = path[t - 1] * exp((r - 0.5 * sigma ** 2) * dt + sigma * sqrt(dt) * z)
path.append(St)
S.append(path)
# Calculating the Actual Simulation
C0 = exp(-r * T) * sum([max(path[-1] - K,0) for path in S]) / I
# Results output
tpy = time() - t0
print "European Option value %7.3f" % C0
print "Duration in Seconds %7.3f" % tpy
|
[
"mckenzo12@live.com"
] |
mckenzo12@live.com
|
a13b7f305965e6ed44d45a697ccf6e784f56af46
|
77e7e25a625504774a10ec3aac196370e135640e
|
/youtubeclone_api/comments/serializers.py
|
e26532d3c4593fa2814a65d6cdc482ed501af6a7
|
[] |
no_license
|
DaveyCrockett/YouTube_Clone_API
|
e9817810caede21b0b495463341dd22bace95bc7
|
45b74108b8a10ee8e63be3b2972199126adacb79
|
refs/heads/main
| 2023-05-18T23:54:21.611857
| 2021-06-02T13:53:58
| 2021-06-02T13:53:58
| 370,783,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
from rest_framework import serializers
from .models import Comment
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ['comment', 'commentId', 'videoId']
|
[
"augustspies22@gmail.com"
] |
augustspies22@gmail.com
|
f42db6bb9776d4f80162c4e50b2a19955c741082
|
625ccb2daedc094862cda12eb8805884912462c2
|
/FigforResponse/R_3_3/code/runvConv.py
|
78cf227d00aa337e83c63795ad0ddf29ffb559d3
|
[] |
no_license
|
shunsunsun/vConv-Figures_and_Tables
|
4a5bf20f18aa6f44e6ec2fcc9f265aa510e71928
|
e1813d43352eb763fd0c8b5b90b4c92e83792fe4
|
refs/heads/main
| 2023-04-13T11:56:11.641674
| 2021-04-30T02:53:44
| 2021-04-30T02:53:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
import os
from datetime import datetime
def tictoc():
return datetime.now().minute + datetime.now().second + datetime.now().microsecond * (10 ** -6)
def vConv(filename):
"""
use memechip
:param InputFile: fasta file
:return:
"""
DataRoot = "../../../data/chip-seqFa/"
tmp_cmd = "python vConv-basedmotifdiscovery.py "+DataRoot+filename
print(tmp_cmd)
os.system(tmp_cmd)
def mkdir(path):
isExists = os.path.exists(path)
if not isExists:
os.makedirs(path)
return (True)
else:
return False
if __name__ == '__main__':
import glob
files = open("./fastause.txt").readlines()
for filename in files:
filename = filename.replace("\n","")
vConv(filename)
|
[
"lijy@mail.cbi.pku.edu.cn"
] |
lijy@mail.cbi.pku.edu.cn
|
6f93a99b11370bb5c25429eb3103be6ed4897061
|
df7807bb90d2bc3ad6dab75e50bba34a7f497c17
|
/Rollar-Coaster.py
|
a676d4c3c39f655d2597876658408c5ecaa988f2
|
[] |
no_license
|
DhruvUpadhyaya/Python
|
68b7f09252c9d6ad0c48f08ebb67d275557ad71e
|
d80891ac1bd8b7f7f61d62aebb2f9f7253cbc946
|
refs/heads/main
| 2023-03-30T13:58:00.133932
| 2021-04-11T12:28:38
| 2021-04-11T12:28:38
| 354,930,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
#Rollar Coaster Ticket project
print('Welcome to Rollar Coaster ride')
height = float(input('Enter your height in m '))
age = int(input('Enter your age '))
total=0
if height > 120:
print('You can ride')
if age<12:
print('You need to pay $5')
total=5
elif age<=18:
print('You need to pay $7')
total=7
else:
print('You need to pay $12')
total=12
else:
print('Grow up baby')
pic = input('Do you want photos? YES or NO ')
if(pic == 'YES'):
print("You need to pay additional $3")
total+=3
print(f"Your Total amount is:${total} ")
|
[
"2793dhruv@gmail.com"
] |
2793dhruv@gmail.com
|
4bc673e6bf00bef90ef3ddde63aedb247acdb117
|
032a59902e47f6843ac9c76f6e27eb1d4a78c27d
|
/scripts/python/blend_sat/grid_convert.py
|
851b413692424e4d6da518c70edca3055d81b5bc
|
[
"Apache-2.0"
] |
permissive
|
OSADP/Pikalert-Vehicle-Data-Translator-
|
17411c602879eb4fb080201973b4a966f9405a4b
|
295da604408f6f13af0301b55476a81311459386
|
refs/heads/master
| 2021-03-27T12:02:18.535636
| 2017-04-03T16:09:38
| 2017-04-03T16:09:38
| 25,056,408
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,964
|
py
|
#! /usr/bin/env python
import os,sys
import time
import name_schema
import index_file
import log
import tim
convert_command = "grid_convert"
def grid_convert(pres_time, infile_path, var_names_file, cdl_file, output_dir, out_index_file, logfile, input_base, output_base, testing):
success = 0
logfile_path = logfile.get_log_path()
# Construct output path names.
ifo = name_schema.Fcst_fname(input_base, "nc")
infile = os.path.basename(infile_path)
file_date_str = ifo.get_date(infile)
file_date_tup = tim.datetotuple(file_date_str)
file_date = tim.mkgmtime(file_date_tup)
output_path = os.path.join(output_dir, file_date_str)
if (not os.path.exists(output_path)):
logfile.write_time("Info: Executing mkdir -p %s\n" % output_path)
if not testing:
ret = os.system("mkdir -p %s 2> /dev/null" % output_path)
if (ret != 0):
logfile.write_time("Error: Unable to make directory.\n")
return (0)
ofo = name_schema.Fcst_fname(output_base, "nc")
outfile = ofo.make_name(file_date_str, ifo.get_it(infile), ifo.get_ft(infile))
outfile_path = os.path.join(output_path, outfile)
if (out_index_file.file_processed(outfile, file_date)):
logfile.write_time("Info: File %s already exists.\n" % outfile)
return (1)
logfile_arg = ""
if (logfile_path != ""):
logfile_arg = "-l %s" % logfile_path
command = "%s %s %s %s %s %s" % (convert_command, infile_path, var_names_file, cdl_file, outfile_path, logfile_arg)
logfile.write_time("Info: Executing %s\n" % command)
if not testing:
ret = os.system(command)
if (ret == 0):
write_str = "%s %d" % (outfile, int(pres_time))
out_index_file.write(write_str, file_date)
success = 1
else:
logfile.write_time("Error: Unable to convert to file %s. \n" % outfile)
return (success)
|
[
"bpetzke@ucar.edu"
] |
bpetzke@ucar.edu
|
9a37239d6928f07f2fd778ce45b6b806d19ff203
|
617f1b605be66e00d6a8c006db96e8131ad9d4ff
|
/xtreme_vision/Segmentation/__init__.py
|
f9b4fe2d2eecfd5dd1d074699964034ab891f202
|
[
"MIT"
] |
permissive
|
AsadRasheed-AR/Xtreme-Vision
|
8d7b5966c0c2c267538c27c0858ec0f4cf2807c3
|
2e09e6972c6b2752bc37f8356fafda151acacd0d
|
refs/heads/master
| 2023-03-26T11:34:04.459514
| 2021-03-27T03:08:40
| 2021-03-27T03:08:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,031
|
py
|
"""
MIT License
Copyright (c) 2020 Adeel <kingadeel2017@outlook.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from xtreme_vision.Segmentation.deeplab.semantic import semantic_segmentation
from xtreme_vision.Segmentation.maskrcnn.instance import instance_segmentation
from xtreme_vision.Segmentation.cdcl.inference_15parts import run_image, run_video
import cv2
import os
import sys
import tensorflow as tf
class Segmentation:
"""
This is Segmentation Class in Xtreme-Vision Library, it provides the support of State-Of-The-Art Models
like Mask-RCNN and DeepLabv3+. After Instantiating this Class, you can set its properties and use pre-defined
functions for performing segmentation Tasks out of the box.
Note: Custom Segmenation only Supports Mask-RCNN
Use_MaskRCNN() or Use_DeepLabv3() # To Specify which Model to Use
Detect_From_Image() # To Segment from Images
Detect_From_Video() # To Segment from Videos
Custom_Objects() # To set the desired objects to True e.g. Custom_Objects(car=True)
Detect_Custom_Objects_From_Image() # To Segment Custom Objects from Images
Detect_Custom_Objects_From_Video() # To Segment Custom Objects from Videos
"""
def __init__(self):
self.model = None
self.weights_path = ""
self.modelLoaded = False
self.modelType = ""
def Use_MaskRCNN(self, weights_path: str = None):
"""[This Function is used to set the Model Type to Mask-RCNN, Automatically downloads the weights
if set to None and Loads the Model]
Args:
weights_path (str, optional): [path to the trained weights file]. Defaults to None.
Raises:
FileNotFoundError: [If weights file doesn't exist at specified path]
"""
if weights_path is None:
path = 'xtreme_vision/weights/maskrcnn_weights.h5'
if os.path.isfile(path):
print('Found Existing Weights File...\nLoading Existing File...')
self.weights_path = path
else:
print('Downloading Weights File...\nPlease Wait...')
self.weights_path = tf.keras.utils.get_file('maskrcnn_weights.h5',
'https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5',
cache_subdir='weights/', cache_dir='xtreme_vision')
else:
if os.path.isfile(weights_path):
self.weights_path = weights_path
else:
raise FileNotFoundError(
"Weights File Doesn't Exist at Provided Path. Please Provide Valid Path.")
self.model = instance_segmentation()
self.model.load_model(self.weights_path)
self.modelLoaded = True
self.modelType = 'maskrcnn'
def Use_DeepLabv3(self, weights_path: str = None):
"""[This function is used to set the Model Type to DeepLabv3, Automatically downloads the weights
if set to None and Loads the Model]
Args:
weights_path (str, optional): [path to the trained weights file]. Defaults to None.
Raises:
FileNotFoundError: [If weights file doesn't exist at specified path]
"""
if weights_path is None:
path = 'xtreme_vision/weights/deeplab_weights.h5'
if os.path.isfile(path):
print('Found Existing Weights File...\nLoading Existing File...')
self.weights_path = path
else:
print('Downloading Weights File...\nPlease Wait...')
self.weights_path = tf.keras.utils.get_file('deeplab_weights.h5',
'https://github.com/ayoolaolafenwa/PixelLib/releases/download/1.3/deeplabv3_xception65_ade20k.h5',
cache_subdir='weights/', cache_dir='xtreme_vision')
else:
if os.path.isfile(weights_path):
self.weights_path = weights_path
else:
raise FileNotFoundError(
"Weights File Doesn't Exist at Provided Path. Please Provide Valid Path.")
self.model = semantic_segmentation()
self.model.load_ade20k_model(self.weights_path)
self.modelLoaded = True
self.modelType = 'deeplab'
def Use_PersonPart(self, weights_path:str=None):
if weights_path is not None:
if os.path.isfile(weights_path):
self.weights_path = weights_path
else:
raise FileNotFoundError("Weights File Doesn't Exist at provided path.")
else:
pass
self.modelLoaded = True
self.modelType = 'cdcl'
def Detect_From_Image(self, input_path:str, output_path:str, show_boxes:bool = False):
"""[This function is used to segment objects from Images]
Args:
input_path (str): [path to the input image with jpg/jpeg/png extension]
output_path (str): [path to save the output image with jpg/jpeg/png extension]
show_boxes (bool, optional): [wether to show the boxes of detected objects, Only Mask-RCNN supports it]. Defaults to False.
Raises:
RuntimeError: [If Model is not Loaded before Using this Function]
RuntimeError: [If any other Model type is specified other than Mask-RCNN or DeepLabv3]
"""
if self.modelLoaded != True:
raise RuntimeError('Before calling this function, you have to specify which Model you want to Use.')
else:
if self.modelType == 'maskrcnn':
_, img = self.model.segmentImage(image_path=input_path, show_bboxes=show_boxes, output_image_name=output_path)
elif self.modelType == 'deeplab':
_, img = self.model.segmentAsAde20k(input_path, output_path, overlay=True)
elif self.modelType == 'cdcl':
_ = run_image(input_path, output_path)
else:
raise RuntimeError(
'Invalid ModelType: Valid Types are "MaskRCNN"\t"DeepLabv3".')
def Detect_From_Video(self, input_path:str, output_path:str, show_boxes:bool = False, fps:int = 25):
"""[This function is used to segment objects from Videos]
Args:
input_path (str): [path to the input video with mp4/avi extension]
output_path (str): [path to save the output video with mp4/avi extension]
show_boxes (bool, optional): [wether to show the boxes of detected objects, Only Mask-RCNN supports it]. Defaults to False.
fps (int, optional): [frames per second for video processing]
Raises:
RuntimeError: [If Model is not Loaded before Using this Function]
RuntimeError: [If any other Model type is specified other than Mask-RCNN or DeepLabv3]
"""
if self.modelLoaded != True:
raise RuntimeError(
'Before calling this function, you have to specify which Model you want to Use.')
if self.modelType == 'cdcl':
vid = run_video(input_path, output_path, fps)
sys.exit()
out = None
cap = cv2.VideoCapture(input_path)
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
print(f'\nThere are {length} Frames in this video')
print('-' * 20)
print('Detecting Objects in the Video... Please Wait...')
print('-' * 20)
while(cap.isOpened()):
retreive, frame = cap.read()
if not retreive:
break
if self.modelType == 'maskrcnn':
_, im = self.model.segmentFrame(frame, show_boxes)
elif self.modelType == 'deeplab':
_, im = self.model.segmentFrameAsAde20k(frame, overlay=True)
else:
raise RuntimeError(
'Invalid ModelType: Valid Types are "MaskRCNN"\t"DeepLabv3".')
if out is None:
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter(output_path, fourcc, fps, (frame.shape[1], frame.shape[0]))
out.write(im)
print('Done. Processing has been Finished... Please Check Output Video.')
out.release()
cap.release()
def Custom_Objects(self, person=False, bicycle=False, car=False, motorcycle=False, airplane=False,
bus=False, train=False, truck=False, boat=False, traffic_light=False, fire_hydrant=False,
stop_sign=False,
parking_meter=False, bench=False, bird=False, cat=False, dog=False, horse=False, sheep=False,
cow=False, elephant=False, bear=False, zebra=False,
giraffe=False, backpack=False, umbrella=False, handbag=False, tie=False, suitcase=False,
frisbee=False, skis=False, snowboard=False,
sports_ball=False, kite=False, baseball_bat=False, baseball_glove=False, skateboard=False,
surfboard=False, tennis_racket=False,
bottle=False, wine_glass=False, cup=False, fork=False, knife=False, spoon=False, bowl=False,
banana=False, apple=False, sandwich=False, orange=False,
broccoli=False, carrot=False, hot_dog=False, pizza=False, donut=False, cake=False, chair=False,
couch=False, potted_plant=False, bed=False,
dining_table=False, toilet=False, tv=False, laptop=False, mouse=False, remote=False,
keyboard=False, cell_phone=False, microwave=False,
oven=False, toaster=False, sink=False, refrigerator=False, book=False, clock=False, vase=False,
scissors=False, teddy_bear=False, hair_dryer=False,
toothbrush=False):
"""
The 'CustomObjects()' function allows you to handpick the type of objects you want to detect
from an image. The objects are pre-initiated in the function variables and predefined as 'False',
which you can easily set to true for any number of objects available. This function
returns a dictionary which must be parsed into the 'Detect_Custom_Objects_From_Image()' and
'Detect_Custom_Objects_From_Video()'.
Detecting custom objects only happens when you call the function 'Detect_Custom_Objects_From_Image()'
or 'Detect_Custom_Objects_From_Video()'
* true_values_of_objects (array); Acceptable values are 'True' and False for all object values present
:param boolean_values:
:return: custom_objects_dict
"""
custom_objects_dict = {}
input_values = [person, bicycle, car, motorcycle, airplane,
bus, train, truck, boat, traffic_light, fire_hydrant, stop_sign,
parking_meter, bench, bird, cat, dog, horse, sheep, cow, elephant, bear, zebra,
giraffe, backpack, umbrella, handbag, tie, suitcase, frisbee, skis, snowboard,
sports_ball, kite, baseball_bat, baseball_glove, skateboard, surfboard, tennis_racket,
bottle, wine_glass, cup, fork, knife, spoon, bowl, banana, apple, sandwich, orange,
broccoli, carrot, hot_dog, pizza, donut, cake, chair, couch, potted_plant, bed,
dining_table, toilet, tv, laptop, mouse, remote, keyboard, cell_phone, microwave,
oven, toaster, sink, refrigerator, book, clock, vase, scissors, teddy_bear, hair_dryer,
toothbrush]
actual_labels = ["person", "bicycle", "car", "motorcycle", "airplane",
"bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign",
"parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear",
"zebra",
"giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis",
"snowboard",
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket",
"bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich",
"orange",
"broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant",
"bed",
"dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
"microwave",
"oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
"hair dryer",
"toothbrush"]
for input_value, actual_label in zip(input_values, actual_labels):
if (input_value == True):
custom_objects_dict[actual_label] = "valid"
else:
custom_objects_dict[actual_label] = "invalid"
return custom_objects_dict
def Detect_Custom_Objects_From_Image(self, custom_objects=None, input_path:str = None, output_path:str = None,
show_boxes:bool = False):
"""[This function is used to detect custom objects from Images, it will only detect those objects which
are set to True in dictionary returned from Custom_Objects() function.]
Args:
custom_objects: (dict) [dictionary returned from Custom_Objects() function]
input_path: (str) [path to the input Image with jpg/jpeg/png extension]
output_path: (str) [path to save the output image with jpg/jpeg/png extension]
show_boxes: (bool) [wether to show the boxes of detected objects]
Raises:
RuntimeError: [If custom_objects/input_path/output_path is not specified]
RuntimeError: [If Model is not Loaded before calling this function]
RuntimeError: [If any other Model Type is Specified other than Mask-RCNN]
"""
if (custom_objects is None) or (input_path is None) or (output_path is None):
raise RuntimeError(
'Custom_Objects, Input_Path and Output_path should not be None.')
else:
if self.modelLoaded:
if (self.modelType == 'maskrcnn'):
_, img = self.model.segmentImage(input_path, show_boxes, output_path, custom=custom_objects)
else:
raise RuntimeError(
'Invalid ModelType: Valid Type is "MaskRCNN".')
else:
raise RuntimeError(
'Before calling this function, you have to call Use_MaskRCNN().')
def Detect_Custom_Objects_From_Video(self, custom_objects = None, input_path:str = None, output_path:str = None,
show_boxes:bool = False, fps:int = 25):
"""[This function is used to detect custom objects from Videos, it will only detect those objects which
are set to True in dictionary returned from Custom_Objects() function.]
Args:
custom_objects: (dict) [dictionary returned from Custom_Objects() function]
input_path: (str) [path to the input Video with mp4/avi extension]
output_path: (str) [path to save the output Video with mp4/avi extension]
show_boxes: (bool) [wether to show the boxes of detected objects]
fps: (int) [frames per second for video processing]
Raises:
RuntimeError: [If custom_objects/input_path/output_path is not specified]
RuntimeError: [If Model is not Loaded before calling this function]
RuntimeError: [If any other Model Type is Specified other than Mask-RCNN]
"""
if (custom_objects is None) or (input_path is None) or (output_path is None):
raise RuntimeError(
'Custom_Objects, Input_Path and Output_path should not be None.')
if self.modelLoaded != True:
raise RuntimeError('Before calling this function, you have to specify which Model you want to Use.')
out = None
cap = cv2.VideoCapture(input_path)
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
print(f'\nThere are {length} Frames in this video')
print('-' * 20)
print('Detecting Objects in the Video... Please Wait...')
print('-' * 20)
while(cap.isOpened()):
retreive, frame = cap.read()
if not retreive:
break
if self.modelType == 'maskrcnn':
_, im = self.model.segmentFrame(frame, show_boxes, custom=custom_objects)
else:
raise RuntimeError(
'Invalid ModelType: Valid Type is "MaskRCNN".')
if out is None:
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter(output_path, fourcc, fps, (frame.shape[1], frame.shape[0]))
out.write(im)
print('Done. Processing has been Finished... Please Check Output Video.')
out.release()
cap.release()
|
[
"kingadeel2017@outlook.com"
] |
kingadeel2017@outlook.com
|
8f6fa2943fb0344752b6d9c6e9e3ab31b645f8ed
|
0b67d0a09cd97c51af1b81833996e1ae1faa82a6
|
/python/extract_line.py
|
db5782d7b667d3c03a7217a60e8f2893e3ae2ebb
|
[] |
no_license
|
draemonash2/codes
|
6bf56ca92d0ef06df41817721be3772ee5d77b79
|
47fe44714c072c4b48a9e56dba086053a5930706
|
refs/heads/master
| 2023-08-27T20:44:14.455438
| 2023-08-27T06:25:06
| 2023-08-27T06:25:06
| 7,755,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
#!/usr/bin/env python3
# usage : python3 extract_line.py <infile> <outfile>
import re
import sys
def main():
args = sys.argv
if len(args) == 3:
pass
else:
print('Arguments are too short')
return 0
in_file_name = args[1]
out_file_name = args[2]
pattern = r'(\[\[)(\d+)(\]\])'
try:
out_file = open(out_file_name, 'w')
in_file = open(in_file_name)
lines = in_file.readlines()
for line in lines:
#print(line, end="")
matchlist = re.findall(pattern, line)
if matchlist:
#print(matchlist[0][0] + matchlist[0][1] + matchlist[0][2], end="")
out_file.write(matchlist[0][0] + matchlist[0][1] + matchlist[0][2] + "\n")
except Exception as e:
print(e)
finally:
out_file.close()
in_file.close()
if __name__ == "__main__":
main()
|
[
"draemon_ash3@yahoo.co.jp"
] |
draemon_ash3@yahoo.co.jp
|
d07f0b06b191ac9fd9da7bb745f538817f7787ec
|
ff50e1efc0e6272f64efc0e25465915b08c805c1
|
/Course_1_Python_Programming/Notebook_2.py
|
f04b90ee6af88aa29e3deac2d8d940ebaba51d86
|
[] |
no_license
|
DemetriusStorm/study_stepic
|
b97c8c88ae64d5c5dee217b42543cd5096b32802
|
341832a3bf45e63dcbedad2f1f50e55fccc13965
|
refs/heads/master
| 2021-06-12T12:03:37.992559
| 2021-04-30T10:05:40
| 2021-04-30T10:05:40
| 184,598,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,215
|
py
|
import turtle
'''
forward(X) Пройти вперёд X пикселей
backward(X) Пройти назад X пикселей
left(X) Повернуться налево на X градусов
right(X) Повернуться направо на X градусов
penup() Не оставлять след при движении
pendown() Оставлять след при движении
shape(X) Изменить значок черепахи (“arrow”, “turtle”, “circle”, “square”, “triangle”, “classic”)
stamp() Нарисовать копию черепахи в текущем месте
color() Установить цвет
begin_fill() Необходимо вызвать перед рисованием фигуры, которую надо закрасить
end_fill() Вызвать после окончания рисования фигуры
width() Установить толщину линии
goto(x, y) Переместить черепашку в точку (x, y)
'''
turtle.penup() # Не оставлять след при движении
turtle.goto(-100, 5) # Переместить черепашку в точку (x, y)
turtle.pendown() # Не оставлять след при движении
# Рисуем окружность с положительным значением радиуса
turtle.circle(150)
turtle.penup()
turtle.goto(-100, -5)
turtle.pendown()
# Рисуем окружность с отрицательным значением радиуса
turtle.circle(-50)
turtle.penup()
turtle.goto(5, 5)
turtle.pendown()
# Рисуем дугу в 180 градусов с положительным значением
turtle.circle(50, 180)
turtle.penup()
turtle.goto(5, -105)
turtle.pendown()
turtle.seth(0)
# Рисуем дугу в 270 градусов с отрицательным значением
turtle.circle(50, -270)
turtle.penup()
turtle.goto(120, 5)
turtle.pendown()
turtle.seth(0)
# Рисуем пятиугольник
turtle.circle(50, 360, 5)
turtle.penup()
turtle.goto(120, -105)
turtle.pendown()
# Рисуем восьмиугольник
turtle.circle(50, 360, 12)
turtle.mainloop()
|
[
"demetrius.storm@gmail.com"
] |
demetrius.storm@gmail.com
|
4a8034df0f5f993a502e6fa757a5ff67607e4b9a
|
f9eccb11c3962d4c9193f232d8e3d6ffc3110b12
|
/1st/load_vgg.py
|
fb89396f1f3910f525aac8b867ee8c4727549a9a
|
[] |
no_license
|
ninopira/study_pytorch
|
57c17b3af1110b32621624c80ac5276783ef738e
|
1dbb356d68d9ceeab1aeb0f33aa3648d73e59ac2
|
refs/heads/master
| 2022-04-09T15:08:47.228955
| 2020-03-23T04:37:46
| 2020-03-23T04:37:46
| 243,179,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,479
|
py
|
# パッケージのimport
import numpy as np
import json
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torchvision
from torchvision import models, transforms
# PyTorchのバージョン確認
print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
use_pretrained = True
net = models.vgg16(pretrained=use_pretrained)
print(net)
# 前処理クラスの作成
class BaseTransform():
"""
画像のサイズをリサイズし、色を標準化する。
Attributes
----------
resize : int
リサイズ先の画像の大きさ。
mean : (R, G, B)
各色チャネルの平均値。
std : (R, G, B)
各色チャネルの標準偏差。
"""
def __init__(self, resize, mean, std):
self.base_transform = transforms.Compose([
transforms.Resize(resize), # 短い辺の長さがresizeの大きさになる
transforms.CenterCrop(resize), # 画像中央をresize × resizeで切り取り
transforms.ToTensor(), # Torchテンソルに変換
transforms.Normalize(mean, std) # 色情報の標準化
])
def __call__(self, img):
return self.base_transform(img)
image_file_path = './data/goldenretriever-3724972_640.jpg'
img = Image.open(image_file_path) # [高さ][幅][色RGB]
resize = 224
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
transform = BaseTransform(resize, mean, std)
img_transformed = transform(img) # torch.Size([3, 224, 224])
mg_transformed = img_transformed.numpy().transpose((1, 2, 0))
img_transformed = np.clip(img_transformed, 0, 1)
# ILSVRCのラベル情報をロードし辞意書型変数を生成します
ILSVRC_class_index = json.load(open('./data/imagenet_class_index.json', 'r'))
print(ILSVRC_class_index)
# 後処理
class ILSVRCPredictor():
"""
ILSVRCデータに対するモデルの出力からラベルを求める。
Attributes
----------
class_index : dictionary
クラスindexとラベル名を対応させた辞書型変数。
"""
def __init__(self, class_index):
self.class_index = class_index
def predict_max(self, out):
"""
確率最大のILSVRCのラベル名を取得する。
Parameters
----------
out : torch.Size([1, 1000])
Netからの出力。
Returns
-------
predicted_label_name : str
最も予測確率が高いラベルの名前
"""
maxid = np.argmax(out.detach().numpy())
predicted_label_name = self.class_index[str(maxid)][1]
return predicted_label_name
# ILSVRCPredictorのインスタンスを生成します
predictor = ILSVRCPredictor(ILSVRC_class_index)
# 入力画像を読み込む
image_file_path = './data/goldenretriever-3724972_640.jpg'
img = Image.open(image_file_path) # [高さ][幅][色RGB]
# 前処理の後、バッチサイズの次元を追加する
transform = BaseTransform(resize, mean, std) # 前処理クラス作成
img_transformed = transform(img) # torch.Size([3, 224, 224])
inputs = img_transformed.unsqueeze_(0) # torch.Size([1, 3, 224, 224])
# モデルに入力し、モデル出力をラベルに変換する
out = net(inputs) # torch.Size([1, 1000])
print(out)
result = predictor.predict_max(out)
# 予測結果を出力する
print("入力画像の予測結果:", result)
|
[
"nino0114hira@gmail.com"
] |
nino0114hira@gmail.com
|
4e6b20554863b6c56ab9512114ee081741cb2a08
|
8be39fd741cbbb08439433188ca1bc59ee5cf11f
|
/data_handler/data_parser.py
|
126a9f7a129ef094c81285efdd97d74724bc3838
|
[
"MIT"
] |
permissive
|
tpimentelms/fast-conversational-banking
|
ca1e699261f989f3b535a50782062c000985ba1e
|
b9d3ddfe3adb78522fafab91c2d20495db063dda
|
refs/heads/master
| 2021-03-16T21:30:51.253223
| 2018-02-28T20:30:15
| 2018-02-28T20:30:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,739
|
py
|
import re
import torch
from torch.autograd import Variable
from .language_dict import LanguageDict, EOS_token, PAD_token
class DataParser(object):
def __init__(self, max_len, cuda=True, quiet=True):
self.max_len = max_len
self.input_max_len = 0
self.output_max_len = 0
self.quiet = quiet
self._cuda = cuda
# Lowercase, trim, and remove non-letter characters
def normalize_string(self, s):
raise NotImplementedError(
'DataParser class should not be used directly, and ' +
'class which inherits it should implement normalize_string')
def read_input(self, src_file, tgt_file):
self._print("Reading lines...")
# Read the file and split into lines
lines1 = open(src_file).read().strip().split('\n')
lines2 = open(tgt_file).read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[x for x in l] for l in zip(lines1, lines2)]
return pairs
def split_sentence(self, p):
return p.split(' ')
def parse_pair(self, p, max_len=None):
sentences = (self.split_sentence(x) for x in p)
sentences = tuple([self.normalize_string(x) for x in sentences])
if max_len is not None:
sentences = [x[:max_len - 1] for x in sentences]
return sentences
def parse_pairs(self, pairs, max_len=None):
return [self.parse_pair(pair, max_len=max_len) for pair in pairs]
def read_data(self, src_file, tgt_file, max_len=None):
pairs = self.read_input(src_file, tgt_file)
self._print("Read %s sentence pairs" % len(pairs))
pairs = self.parse_pairs(pairs, max_len)
# import ipdb; ipdb.set_trace()
return pairs
def setup_parser(self, pairs):
# Make dicts instances
self.input_dict = LanguageDict('src')
self.output_dict = LanguageDict('tgt')
self.pairs = pairs
self._print("Counting words...")
for pair in self.pairs:
self.add_src_sentence(pair[0])
self.add_tgt_sentence(pair[1])
self.max_len = min(self.max_len, self.output_max_len)
self._print("Counted words:")
self._print('\t', self.input_dict.name, self.input_dict.n_words)
self._print('\t', self.output_dict.name, self.output_dict.n_words)
return self.input_dict, self.output_dict
def add_src_sentence(self, sentence):
self.input_dict.addSentence(sentence)
self.input_max_len = max(self.input_max_len, len(sentence) + 1)
def add_tgt_sentence(self, sentence):
self.output_dict.addSentence(sentence)
self.output_max_len = max(self.output_max_len, len(sentence) + 1)
def remove_rare_words(self, min_count):
self.input_dict.removeRareWords(min_count)
self._print("\t after reduce", self.input_dict.name, len(self.input_dict.index2word))
def indexes_from_sentence(lang_dict, sentence):
return [lang_dict.getWordIndex(word) for word in sentence]
def variable_from_sentence(self, lang_dict, sentence):
indexes = DataParser.indexes_from_sentence(lang_dict, sentence)
indexes.append(EOS_token)
if self._cuda:
return Variable(torch.cuda.LongTensor(indexes).view(-1, 1))
else:
return Variable(torch.LongTensor(indexes).view(-1, 1))
# def variables_from_pair(self, pair=None):
# pair = self.pair if pair is None else pair
# input_variable = self.variable_from_sentence(self.input_dict, pair[0])
# target_variable = self.variable_from_sentence(self.output_dict, pair[1])
# return input_variable, target_variable
def variables_from_pairs(self, pairs):
input_variables = []
target_variables = []
for pair in pairs:
input_variables += [self.variable_from_sentence(self.input_dict, pair[0]).transpose(0, 1)]
target_variables += [self.variable_from_sentence(self.output_dict, pair[1]).transpose(0, 1)]
target_avg_len = sum([x.size(1) for x in target_variables]) / len(target_variables)
input_variable = self.pad_and_cat(input_variables)
target_variable = self.pad_and_cat(target_variables)
return input_variable, target_variable, target_avg_len
def pad_and_cat(self, tensor_list):
max_len = max([x.size(1) for x in tensor_list])
pad_list = Variable(tensor_list[0].data.new(len(tensor_list), max_len))
pad_list[:] = PAD_token
for i, tensor in enumerate(tensor_list):
pad_list[i, :tensor.size(1)] = tensor
return pad_list
def _print(self, *args):
if not self.quiet:
print(*args)
|
[
"tiagopms@gmail.com"
] |
tiagopms@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.