blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fdd25e91bdb09e58d4f219ef3803e81fd78e0545
|
6493bc4fdf2618b401c7c2acf6e04567a27a1b00
|
/klearn/kernels/__init__.py
|
1a14748333710a3cb0f1405360498e01722b3acd
|
[] |
no_license
|
mpharrigan/klearn
|
75dc5bfea65ed7018fd42d7eb502b32c4ff7a007
|
697e62993cf3a42444cc9115f8fea0425950fec2
|
refs/heads/master
| 2021-01-16T00:31:34.616280
| 2014-05-21T22:50:10
| 2014-05-21T22:50:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
from .baseclasses import AbstractKernel
from .dotproduct import DotProduct
from .polynomial import Polynomial
from .gaussian import Gaussian
|
[
"schwancr@stanford.edu"
] |
schwancr@stanford.edu
|
06987544b5a7feb1dbbc7995c0edfea534eb7a34
|
34fb963c4c942d7d9e64d91deb97f58136f32861
|
/resources/viewmodels.py
|
5826fdc12d9d76b81598bc85be256eef22c9d94a
|
[] |
no_license
|
dalelicious/iwantremote
|
180ca95429fdf9af305c40aeaaf43c0bce5ac52c
|
70509125e9981a2624c4d05f8de60ead194db154
|
refs/heads/master
| 2023-06-01T08:40:42.238496
| 2021-06-24T10:37:03
| 2021-06-24T10:37:03
| 209,974,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
# Django
from django.utils import timezone
# Resources
from . models import Resources
class ResourcesViewModel():
def get_blog_by_name(self, blogTitle):
""" Get blog by id
"""
blog = Resources.objects.get(slugTitle=blogTitle)
return blog
def get_blog_list(self):
""" Get all blog
"""
blog_list = Resources.objects.all()
return blog_list
|
[
"dale.torre@ubiquitygs.com"
] |
dale.torre@ubiquitygs.com
|
5d978145f8d58e4ca97c9537773c5ee11431fccb
|
539f531d07faf4d86ccc548e2b6dae706056a906
|
/Environnement/dags/Batch_Longueur_Chaine.py
|
baf53e3917fcb9dd10dc3761bebaa8cf8db98556
|
[] |
no_license
|
Allan06/TPT-Airflow
|
f9c7202982fcd63a402584ae6569254b1cf43cfe
|
8461684e8f6c6c1923d1681e417e483a20a67a9b
|
refs/heads/master
| 2023-04-10T23:40:36.397472
| 2021-04-18T01:19:02
| 2021-04-18T01:19:02
| 350,856,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,546
|
py
|
from airflow import DAG
from airflow.operators.python import PythonOperator
from airflow.operators.bash import BashOperator
from airflow.utils.dates import days_ago
from airflow.providers.mysql.operators.mysql import MySqlOperator
import pandas as pd
import numpy as np
import glob
import os
# =============================================================================
# Initilisation variable globales
# =============================================================================
chemin_resultats = "/opt/airflow/dags/resultats/"
chemin_donnees = "/opt/airflow/dags/donnees/"
PATRONYMES = f"{chemin_donnees}patronymes.csv"
NB_TRAITEMENTS = 5
NROWS = 800000
CHUNKSIZE = NROWS // NB_TRAITEMENTS
# =============================================================================
# Test BD Mysql
# =============================================================================
requete_creer = """DROP TABLE IF EXISTS PATRONYME;
CREATE TABLE PATRONYME (
id_patronyme INT AUTO_INCREMENT PRIMARY KEY,
patronyme VARCHAR(30) NOT NULL,
nombre INT NOT NULL)"""
donnee = pd.read_csv(PATRONYMES, sep=",")
requete_inserer = ""
for i in range(1000):
patronyme, nombre = donnee.iloc[i]
requete_inserer += f"""INSERT INTO PATRONYME(patronyme, nombre)
VALUES(
"{patronyme}",
{nombre}
); """
# =============================================================================
# Fonctions non DAG
# =============================================================================
def inserer_taille(lot):
"""
Insère une nouvelle colonne 'Taille" dans un dataset correspondant à
la longueur d'un patronyme.
:param lot: dataset à modifier
:return: le dataset contenant la nouvelle colonne et les informations de longueur
"""
donnees = lot.copy().fillna("")
nb_donnees = donnees.shape[0]
patronymes = donnees.patronyme.values
colonne_tailles = np.zeros(nb_donnees, dtype=int)
for p in range(nb_donnees):
colonne_tailles[p] = len(patronymes[p])
donnees = donnees.assign(Taille=colonne_tailles)
return donnees
def recuperer_fichiers():
"""
Récupère les fichiers patronymes créés
:return: la liste des chemins des fichiers
"""
return glob.glob(f"{chemin_resultats}*-*.csv")
# =============================================================================
# Fonctions liées aux taâches
# =============================================================================
def preparer_data():
"""
Initialise les données de la table avec création de la colonne Taille.
:return: None
"""
# Suppression d'anciens fichiers patronymes avec colonne Tailles
try:
os.remove(chemin_resultats + "*")
except OSError:
pass
print("PREPARER_DATA")
def traitement_unitaire_b(**kwargs):
"""
Calcule, insère les tailles et écrit les nouvelles données dans des fichiers distincts.
:param kwargs: {lot}
:return: None
"""
lot = kwargs["lot"]
donnees = inserer_taille(lot)
# Header = true si premier lot
donnees.to_csv(f"{chemin_resultats}patronymes_tailles-{lot.index[0]}.csv", header=(lot.index[0] == 0), index=False)
print(f"TRAITEMENT_UNITAIRE_BATCH_LOT_{lot.index[0]}")
def concatener_data_test():
"""
Concatène les nouveaux fichiers patronymes creés.
:return: None
"""
fichiers = recuperer_fichiers()
with open(f"{chemin_resultats}patronymes_tailles.csv", 'a') as f_final:
for fichier in fichiers:
with open(fichier, 'r') as f:
f_final.write(f.read())
print("CONCATENER_DATA_TEST")
def effacer_data():
"""
Efface tous les fichiers CSV temporaire créés.
:return: None
"""
fichiers = recuperer_fichiers()
if fichiers:
try:
for fichier in fichiers:
os.remove(fichier)
except OSError:
pass
print("EFFACER_DATA")
# =============================================================================
# DAG et TASKs
# =============================================================================
dag = DAG(
dag_id='Batch_Longueur_Chaine',
start_date=days_ago(2)
)
ceer_table = MySqlOperator(
task_id='TEST_creer_table',
sql=requete_creer,
mysql_conn_id='mysql_connexion',
database='airflow',
autocommit=True,
dag=dag
)
inserer_table = MySqlOperator(
task_id='TEST_inserer_table',
sql=requete_inserer,
mysql_conn_id='mysql_connexion',
database='airflow',
autocommit=True,
dag=dag
)
preparer_data = PythonOperator(
task_id='preparer_data',
python_callable=preparer_data,
dag=dag,
)
concatener_data = BashOperator(
task_id='concatener_data',
bash_command=f"cat {chemin_resultats}*-*.csv >> {chemin_resultats}patronymes_tailles.csv",
dag=dag,
)
effacer_data = PythonOperator(
task_id='effacer_data',
python_callable=effacer_data,
dag=dag,
)
for lot in pd.read_csv(PATRONYMES, sep=",", chunksize=CHUNKSIZE, nrows=NROWS):
traitement_unitaire_batch = PythonOperator(
task_id=f"traitement_unitaire_batch_{lot.index[0]}",
python_callable=traitement_unitaire_b,
op_kwargs={'lot': lot},
dag=dag
)
ceer_table >> inserer_table >> preparer_data >> traitement_unitaire_batch >> concatener_data >> effacer_data
|
[
"allanpajany@hotmail.fr"
] |
allanpajany@hotmail.fr
|
694c27356be2599a64a8f60afce851dbb984aa19
|
11c2e1b6fada746b71e0bd9575f5936a352f14df
|
/Compare.py
|
d815227a2fcbf91b0d4faed9af68ce50ace23ec5
|
[] |
no_license
|
Eglopez/Python-GUI
|
6e3e49f25ebb9f60b41d1981c46d6852f5a5eb51
|
05d7c71206d293aea2e8a32a21809f06d9fdcb2c
|
refs/heads/master
| 2020-07-05T08:34:26.338755
| 2019-08-20T00:55:45
| 2019-08-20T00:55:45
| 202,592,236
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,385
|
py
|
class Compare():
def __init__(self):
self.alphabet = "!@#$%&/()=?¡¿¡[]{*}012345678890abcdefghijklmnñopqrstuvwxyzABCDEFGHIJKLMNÑOPQRSTUVwXYZáéíóúÁÉÍÓÚüÜ"
def compare(self,obj1,obj2):
obj_1 = ""
obj_2 = ""
if(type(obj1) == 'int'):
obj_1 = "%s" % obj1
if(type(obj1) == '__main__.Node'):
obj_1 = obj1.name
if(type(obj2) == 'int'):
obj_2 = "%s" % obj2
if(type(obj2) == '__main__.Node'):
obj_2 = obj2.name
obj_1 = obj_1.strip()
obj_2 = obj_2.strip()
if(obj_1 == obj_2):
return 0;
lesser = self.lesserLength(obj_1,obj_2)
for i in range(lesser):
if(type(obj_1[i]) != "undefined" and type(obj_2[i]) != " undefined " and self.alphabet.index(obj_1[i]) < self.alphabet.index(obj_2[i])):
return -1
elif(type(obj_1[i]) != "undefined" and type(obj_2[i]) != " undefined " and self.alphabet.index(obj_1[i]) > self.alphabet.index(obj_2[i])):
return 1
if( len(obj_1) < len(obj_2)):
return -1
return 1
def compareLesserLength(self,str1,atr2):
l = 0
if(l < len(str1)):
l = len(str1)
if(l < len(str2)):
l = len(str2)
return l
|
[
"eduardolopezlainez2001@gmail.com"
] |
eduardolopezlainez2001@gmail.com
|
a582cff63bfa1208999424ac532f639d57e4946c
|
ce6fc44470dcb5fca78cdd3349a7be70d75f2e3a
|
/AtCoder/Grand 039/A.py
|
2ec2212c13eebe7ef3a938d8513f35a3b69c6e01
|
[] |
no_license
|
cormackikkert/competitive-programming
|
f3fa287fcb74248ba218ecd763f8f6df31d57424
|
3a1200b8ff9b6941c422371961a127d7be8f2e00
|
refs/heads/master
| 2022-12-17T02:02:40.892608
| 2020-09-20T11:47:15
| 2020-09-20T11:47:15
| 266,775,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
S = input()
K = int(input())
import random
import string
def count(string):
total = 0
i = 1
while i < len(string):
if string[i-1] == string[i]:
total += 1
i += 1
i += 1
return total
if S == len(S) * S[0]:
res = (K * len(S)) // 2
elif S[0] == S[-1]:
new = S.strip(S[0])
start = len(S) - len(S.lstrip(S[0]))
end = len(S) - len(S.rstrip(S[0]))
res = start // 2 + end // 2 + K * count(new) + (K - 1) * ((start + end) // 2)
else:
res = K * count(S)
print(res)
|
[
"u6427001@anu.edu.au"
] |
u6427001@anu.edu.au
|
4a98bbe875ea92033cf4104a4afb702dc69a4f41
|
8873755db1c83c077921f13dc2ce2af54326861c
|
/neuralNetwork.py
|
da687d60ac4250d1f9c600f7fa3c459ce1933f36
|
[] |
no_license
|
sgandhi101/TSLA_Prediction
|
72de42c18a505e3e083329099655e45460472599
|
cc5f664d302c1e1d421e96d3d89655cbc79b178b
|
refs/heads/master
| 2023-01-22T01:52:25.138625
| 2020-11-30T19:05:09
| 2020-11-30T19:05:09
| 316,622,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,039
|
py
|
# DO *NOT* RUN ON LOCAL COMPUTER, SEE NOTE BELOW BEFORE ATTEMPTING
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
data = pd.read_csv('classificationAggregate.csv') # Import the dataset
X = data.iloc[0:, :-1] # X values are everything except whether or not the price went up
y = data.iloc[:, 4] # Y value is a True/False
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
# Create a training dataset that is 10 percent of the overall data
# The reason that this one is lower is that we found that a neural network over fits the data much more easily
# than any of the other classifiers we trained so we found that 10% is better for our results
scale = StandardScaler() # Standardize the data in order to make sure training data is normally distributed
scale.fit(X_train) # Fit it with training data
# Apply this to the rest of the data
X_train = scale.transform(X_train)
X_test = scale.transform(X_test)
# NOTE: WE STRONGLY RECOMMEND YOU DO *NOT* RUN THIS ON YOUR LOCAL COMPUTER. WE RAN THIS ON IU'S CARBONATE
# SUPERCOMPUTER AND IT STILL TOOK A CONSIDERABLE AMOUNT OF TIME TO RUN. IT WILL MOST LIKELY CRASH YOUR
# LOCAL COMPUTER AS IT REQUIRES *MUCH* MORE RESOURCES THAN ANY OF THE OTHER ALGORITHMS
# Train a neural network with six nodes with 5000 hidden layers each iterated through 10 million times
# These numbers changed a lot we played around with different numbers of nodes, layers, and iterations
mlp = MLPClassifier(hidden_layer_sizes=(5000, 5000, 5000, 5000, 5000, 5000), max_iter=10000000)
mlp.fit(X_train, y_train.values.ravel()) # Fit the training data
predictions = mlp.predict(X_test) # Create the model
# Print a list of accuracy metrics
print(confusion_matrix(y_test, predictions))
print(classification_report(y_test, predictions))
print(accuracy_score(y_test, predictions))
|
[
"sugandhi@iu.edu"
] |
sugandhi@iu.edu
|
26654af914453d575f2b21bbae4b6a0ee5ba6035
|
a49acc754f99706a74270ba867d11b7851131160
|
/apps/users/models.py
|
378b91f30f559ed736d1cc026a3438ef59ff4d9f
|
[] |
no_license
|
xr1627119275/MxOnline
|
ff45dc4c2b116bad6c9609b7b636ee15827609de
|
d22296ef7d79a62b835f2349b71791d11772910e
|
refs/heads/master
| 2021-07-21T07:00:23.608556
| 2017-10-30T07:39:38
| 2017-10-30T07:39:38
| 108,816,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,854
|
py
|
# _*_ encoding:utf-8 _*_
from __future__ import unicode_literals
from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UserProfile(AbstractUser):
nick_name = models.CharField(max_length=50,verbose_name=u"昵称",default='')
birday = models.DateField(verbose_name=u'生日',null=True,blank=True)
gender = models.CharField(max_length=2,choices=(('male',u'男'),('female',u'女')),default='female')
address = models.CharField(max_length=100,default=u'')
mobile = models.CharField(max_length=11,null=True,blank=True)
image = models.ImageField(upload_to='image/%y/%m',default=u'image/default.png',max_length=100)
class Meta:
verbose_name = '用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.username
# 邮箱
class EmailVerifyRecord(models.Model):
code = models.CharField(max_length=20, verbose_name=u'验证码')
email = models.EmailField(max_length=50, verbose_name=u'邮箱')
send_type = models.CharField(choices=(('register',u'zhuce'),('forget',u'忘记密码')),max_length=10)
send_time = models.DateTimeField(default=datetime.now)
class Meta:
verbose_name = u'邮箱验证码'
verbose_name_plural = verbose_name
# 轮播图
class Banner(models.Model):
title = models.CharField(max_length=100,verbose_name=u'标题')
image = models.ImageField(upload_to='banner/%y/%m',verbose_name=u"轮播图")
url = models.URLField(max_length=200,verbose_name=u'访问地址')
index = models.IntegerField(default=100,verbose_name=u'顺序')
add_title = models.DateTimeField(default=datetime.now,verbose_name=u'添加时间')
class Meta:
verbose_name = u'轮播图'
verbose_name_plural = verbose_name
|
[
"1627119275@qq.com"
] |
1627119275@qq.com
|
f338508b43ff286c18818c649354e6ab8c88919d
|
c87727a77d17eef2afebc72c29e3ee347d05737c
|
/task3_1.py
|
930e5fa01a350f9dbb1fc7fb34787ec805a133bf
|
[] |
no_license
|
ikosolapov1983/Homework3
|
5331d8e2afec69e2752b74317187ddb0883e81d0
|
20482824f134fae66e46a8b03a48c0df486d4de5
|
refs/heads/master
| 2020-08-05T00:45:59.740475
| 2019-10-05T12:14:08
| 2019-10-05T12:14:08
| 212,337,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 56
|
py
|
x = 0
while x <= 10:
print(str(x) + "!")
x += 1
|
[
"ikosolapov@hotmail.com"
] |
ikosolapov@hotmail.com
|
7c856f3effb8ea92ca241b2013673a63a53dd7ef
|
dfdb55ae1a05edada92d3840c67dec7e2d4da1e9
|
/realEstate/listings/views.py
|
171dc2333876d55e475fbc872e423f7d2047e42a
|
[
"MIT"
] |
permissive
|
OmarSalah95/Django-Toy
|
17ee3646665c9e1bcc8ecbb354142f8dadd74ed5
|
4899b5f9e30dae0623aa9a3a134e375cacccea10
|
refs/heads/master
| 2023-04-27T12:06:05.813683
| 2019-11-18T00:49:59
| 2019-11-18T00:49:59
| 222,306,035
| 0
| 0
|
MIT
| 2023-04-21T20:47:56
| 2019-11-17T20:15:03
|
CSS
|
UTF-8
|
Python
| false
| false
| 281
|
py
|
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'listings/listings.html')
def listing(request):
return render(request, 'listings/listing.html')
def search(request):
return render(request, 'listings/search.html')
|
[
"42569856+OmarSalah95@users.noreply.github.com"
] |
42569856+OmarSalah95@users.noreply.github.com
|
1134bd350cd0de3c935b8bc8e1ae7403c7749842
|
5de046cc4849f52a5737c2591b2c0144b9981103
|
/policy_gradient.py
|
b724c24eca9a498c044fd4cd71ec650f38370df7
|
[] |
no_license
|
lionelblonde/cartpole-pg-intro-tf
|
36300d015c1e328103b45d5c76283f4249424dfa
|
1c896c991d6b758a379685708f6348ffc1115537
|
refs/heads/master
| 2020-03-06T18:48:44.028750
| 2018-03-27T16:31:15
| 2018-03-27T16:31:15
| 126,859,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,489
|
py
|
import gym
# from gym import wrappers
# import math
import random
import numpy as np
import tensorflow as tf # since we now use gradients
import matplotlib.pyplot as plt
# This function is useless here: tf provides tf.nn.softmax()
def softmax(x): # x is a vector
# substract by the max for num stability - mathematically equiv to stock softmax
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
# The use of tf scopes enables us to use loss, optimizer, etc. names for both functions
# Update our policy to prefer certain actions
def policy_gradient():
with tf.variable_scope("policy"):
# state space dimension = 4, action space dimension = 2
# policy = one linear combination of the state variables per action
parameters = tf.get_variable("policy_parameters", [4, 2])
state = tf.placeholder("float", [None, 4])
action = tf.placeholder("float", [None, 2])
advantage = tf.placeholder("float", [None, 1])
linear = tf.matmul(state, parameters) # no bias, outputs a vector ([1, 2])
# Softmax activation: transforms the vector in probs of playing each action ([1, 2])
# it is the usual choice as output activation for classification problems (sig too)
computed_probs = tf.nn.softmax(linear)
action_prob = tf.reduce_sum(tf.multiply(computed_probs, action), axis=1)
# element-wise mul
# action is a one-hot vector, so the element-wise mul outputs a one-hot vector
# reduce_sum along the 1 axis transforms the one-hot vector into the scalar it contains
# The two steps could be replaced by one dot product
eligibility = tf.log(action_prob) * advantage # no np.matmul since both are scalars
loss = -tf.reduce_sum(eligibility)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
return computed_probs, state, action, advantage, optimizer
# How to measure the success of performing given actions in given states: values
# 1 hidden layer NN (10 neurons wide hidden layer) to determine the best action for a state
# input is the state ([1, 4])
# output is the value ([1, 1])
def value_gradient():
with tf.variable_scope("value"):
# Calculate the value of a state
state = tf.placeholder("float", [None, 4])
w1 = tf.get_variable("w1", [4, 10]) # weight matrix input (state) -> hidden
b1 = tf.get_variable("b1", [1, 10]) # bias vector input (state) -> hidden
h1 = tf.nn.relu(tf.matmul(state, w1) + b1) # hidden layer, ReLU activation
w2 = tf.get_variable("w2", [10, 1]) # weight matrix hidden -> output (value)
b2 = tf.get_variable("b2", [1, 1]) # bias vector hidden -> output (value)
computed_value = tf.matmul(h1, w2) + b2 # linear activation
# it is the usual choice as output activation for regression problems
# Update the value of a state
new_value = tf.placeholder("float", [None, 1])
loss = tf.nn.l2_loss(computed_value - new_value)
optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss)
return computed_value, state, new_value, loss, optimizer
# Run episodes to gather data, similarly to random search and hillclimbing
# except that now we want to recoard the transitions and rewards gotten from them
def run_episode(env, policy_grad, value_grad, sess):
pl_computed_probs, pl_state, pl_action, pl_advantage, pl_optimizer = policy_grad
vl_computed_value, vl_state, vl_new_value, vl_loss, vl_optimizer = value_grad
observation = env.reset() # contains initial state information, wrong format though
total_reward = 0
states = []
actions = []
advantages = []
transitions = []
update_values = []
# Run the episode
for timestep in range(200):
# env.render() # uncomment to see the simulation as it runs
# Step 1: compute the policy
# Reshape observation from [4,] -> [1, 4] to coincide with state
observed_state = np.expand_dims(observation, axis=0)
# Compute the probabilities over actions in the observed states
action_probs = sess.run(pl_computed_probs, feed_dict={pl_state: observed_state})
# pl_computed_probs is a list -> sess.run returns a list
# the returned list contains one element, which is a [1, 2] list containing the probs
# [[action_0_prob, action_1_prob]] -> 2 square brackets
# since we asked for one element (which happens to be a list), as opposed to several (if
# we asked for several elements), for which we would have gotten a list of those elements
action = 0 if random.uniform(0, 1) < action_probs[0][0] else 1
# this ensures that the action is picked non-deterministically
# otherwise we would just deterministically pick the action with highest prob all the time
# instead of picking it according to its probability
# Step 2: record the transition
states.append(observation) # observation before reshape
action_one_hot = np.zeros(2)
action_one_hot[action] = 1 # one-hot vector indicating which action to perform
actions.append(action_one_hot)
# Take action in the environment
old_observation = observation # already appened to states
observation, reward, done, info = env.step(action) # OpenAI Gym API
transitions.append((old_observation, action, reward))
# note that we ignore the s_{t+1}, the state we arrive at: observation
total_reward += reward
if done:
print("--- episode finished after %s timesteps ---" % (timestep))
break
# Compute the return
for index, transition in enumerate(transitions):
observation, action, reward = transition # reward useless: only future rewards
# Step 1: calculate the discounted MC returned
gamma = 0.97 # discount factor
_return = 0 # only interested in the future reward, not the current or previous ones
# _return is the empirical estimate of the Q-value
episode_duration = len(transitions) # reminder: there is one transition per timestep
number_remaining_transitions = episode_duration - index
for i in range(number_remaining_transitions):
# add the immediate rewards of each remaining transitions in the current episode
_return += transitions[index + i][2] * (gamma ** i)
# Step 2: record the advantage
observed_state = np.expand_dims(observation, axis=0) # reshape to match state
current_value = sess.run(vl_computed_value, feed_dict={vl_state: observed_state})[0][0]
# [0][0] to go from [[value]] to value
advantages.append(_return - current_value)
# Step 3: record the return for value updating
update_values.append(_return)
# Update value function
update_values_vector = np.expand_dims(update_values, axis=1) # from [n,] to [n, 1] (vector)
sess.run(vl_optimizer, feed_dict={vl_state: states, vl_new_value: update_values_vector})
# Update the policy
advantages_vector = np.expand_dims(advantages, axis=1) # from [m,] to [m, 1] (vector)
sess.run(pl_optimizer,
feed_dict={pl_state: states, pl_action: actions, pl_advantage: advantages_vector})
return total_reward
def train(submit):
env = gym.make('CartPole-v0')
# env = wrappers.Monitor(env, "./cartpole-experiment")
tf.reset_default_graph() # necessary to clean up inbetween episodes
policy_grad = policy_gradient()
value_grad = value_gradient()
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
for episode in range(2000):
reward = run_episode(env, policy_grad, value_grad, sess)
print("--- episode %d | reward %d ---" % (episode, reward))
if reward == 200: # upper threshold = in balance for 200 timesteps
print("Stood up for 200 timesteps!")
break
return episode
# Graphs
results = []
for _ in range(50):
results.append(train(submit=False))
plt.hist(results, 50, normed=1, facecolor="g", alpha=0.75)
plt.xlabel("Episodes required to reach 200")
plt.ylabel("Frequency")
plt.title("Histogram of Policy Gradient")
plt.savefig("cartpole-policy-gradient.png")
plt.show() # has to be after savefig call, otherwise blank image in file
print("Average #episode required to reach target score (200): %s" % (np.sum(results) / 50.0))
|
[
"lionel.blonde@gmail.com"
] |
lionel.blonde@gmail.com
|
60188d8b70ac88226e0b5c0c47a462c35d280a1f
|
fe281868b9321c3b8e186f5349bd022e2a082e07
|
/实验2_垃圾邮件过滤/贝叶斯多项式算法/main.py
|
9869ce07a319ff0c6b8b06560ecc49d34868459d
|
[] |
no_license
|
LastCigarete/infosecurity
|
c51d381276a05d3e7f82769cd286e9958ee2fb15
|
780c088d7c139bfe1bdf09445ff773ef53ac8156
|
refs/heads/master
| 2023-03-15T18:15:04.080890
| 2020-05-06T03:15:46
| 2020-05-06T03:15:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,277
|
py
|
#coding=utf-8
import os
from collections import Counter
import numpy as np
import random
import re
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei']
def getFileDiv():
hamFile = './/email//ham'
spamFile = './/email//spam'
hamEmails = [os.path.join(hamFile,f) for f in os.listdir(hamFile)]
spamEmails = [os.path.join(spamFile,f) for f in os.listdir(spamFile)]
label = np.zeros(50)
label[len(hamEmails):50] = 1
hamEmails.extend(spamEmails)
randnum = os.urandom(8) #以同样方式对路径和标签打乱
random.seed(randnum)
random.shuffle(label)
random.seed(randnum)
random.shuffle(hamEmails)
return hamEmails,label
def getWordsProb(filepath, label):
spamList = []
hamList = []
for index,path in enumerate(filepath):
with open(path, 'r', encoding='gb18030', errors='ignore') as f:
lines = f.readlines()
for line in lines:
line = re.sub('[^a-zA-Z]',' ',line)
words = line.split()
if label[index] == 0:
hamList.extend(words)
else:
spamList.extend(words)
spamCounter = Counter(spamList)
hamCounter = Counter(hamList)
for item in list(spamCounter): #处理单字符
if len(item) == 1:
del spamCounter[item]
for item in list(hamCounter):
if len(item) == 1:
del hamCounter[item]
spamSet = set(spamCounter)
hamSet = set(hamCounter)
spamSet.update(hamSet)
allWordList = Counter(spamSet)
spamDict = {}
hamDict = {}
spamCounter = allWordList + spamCounter #消除概率为零相乘的影响
spamCnt = sum(spamCounter.values())
for k,v in spamCounter.items():
spamDict[k] = v/spamCnt
hamCounter = allWordList + hamCounter
hamCnt = sum(hamCounter.values())
for k,v in hamCounter.items():
hamDict[k] = v/hamCnt
#print(sum(hamDict.values()), sum(spamDict.values()))
return hamDict,spamDict
def mulNBTest(hamDict, spamDict, testEmail, testLabel):
result = [] #记录判断结果,之后与Label对比
spamProb = 0.5 #P(spam) = 0.5
hamProb = 0.5 #P(ham) = 0.5
for testFile in testEmail:
testWords = []
with open(testFile, 'r', encoding='gb18030', errors='ignore') as f:
lines = f.readlines()
for line in lines:
line = re.sub('[^a-zA-Z]',' ',line)
words = line.split()
testWords.extend(words)
testCounter = Counter(testWords)
for item in list(testCounter): #处理单字符
if len(item) == 1:
del testCounter[item]
pureWords = list(testCounter) #得到邮件内字符列表
probList = [] #存储每个字符的贡献
mediumFre1 = np.median(list(hamDict.values()))
mediumFre2 = np.median(list(spamDict.values()))
for word in pureWords:
pwh = hamDict.get(word, mediumFre1) # P(word|ham)
pws = spamDict.get(word, mediumFre2) # P(word|spam)
psw = (spamProb*pws)/(pwh*hamProb+pws*spamProb) # P(spam|word) = P(spam)*P(word|spam)/P(word)
probList.append(psw)
numerator = 1 #分子
denominator= 1 #分母
for psw in probList:
numerator *= psw
denominator *= (1-psw)
# P(spam|word1word2…wordn) = P1P2…Pn/(P1P2…Pn+(1-P1)(1-P2)…(1-Pn))
resProb = numerator/(numerator+denominator)
if resProb > 0.9:
result.append(1)
else:
result.append(0)
#计算准确率、精确度和召回率
rightCnt = 0
TP = 0 #将正类预测为正类数
FN = 0 #将正类预测为负类数
FP = 0 #将负类预测为正类数
for index in range(len(testLabel)):
if testLabel[index] == 1:
if result[index] == 1:
rightCnt += 1
TP += 1
else:
FN +=1
else:
if result[index] == 0:
rightCnt += 1
else:
FP +=1
accuracy = rightCnt / len(testLabel)
precision = TP/(TP+FP)
recall = TP/(TP+FN)
return accuracy,precision,recall
def main():
allEmail,label = getFileDiv()
trainEmail = allEmail[:40]
trainLable = label[:40]
testEmail = allEmail[40:]
testLabel = label[40:]
hamDict,spamDict = getWordsProb(trainEmail, trainLable)
accuracy,precision,recall = mulNBTest(hamDict,spamDict,testEmail,testLabel)
print("%f%f%f" %(accuracy,precision,recall))
return accuracy,precision,recall
if __name__ == '__main__':
accuracy = []
precision = []
recall = []
for i in range(100):
a,b,c = main()
accuracy.append(a)
precision.append(b)
recall.append(c)
x = list(range(100))
plt.plot(x, accuracy, color='red', label='准确率')
plt.plot(x, precision, color='skyblue', label='精确度')
plt.plot(x, recall, color='blue', label='召回率')
plt.legend(loc = 'upper right')
plt.show()
|
[
"1029076560@qq.com"
] |
1029076560@qq.com
|
56799809ff84f9be3ec51a12f546d1c89424b975
|
d305e9667f18127e4a1d4d65e5370cf60df30102
|
/tests/ut/python/dataset/test_random_crop_and_resize_with_bbox.py
|
2727325e007ba40b6bb6c02558a5846a078903a1
|
[
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
imyzx2017/mindspore_pcl
|
d8e5bd1f80458538d07ef0a8fc447b552bd87420
|
f548c9dae106879d1a83377dd06b10d96427fd2d
|
refs/heads/master
| 2023-01-13T22:28:42.064535
| 2020-11-18T11:15:41
| 2020-11-18T11:15:41
| 313,906,414
| 6
| 1
|
Apache-2.0
| 2020-11-18T11:25:08
| 2020-11-18T10:57:26
| null |
UTF-8
|
Python
| false
| false
| 9,546
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RandomCropAndResizeWithBBox op in DE
"""
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
from mindspore import log as logger
from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \
config_get_set_seed, config_get_set_num_parallel_workers, save_and_check_md5
GENERATE_GOLDEN = False
# Updated VOC dataset with correct annotations - DATA_DIR
DATA_DIR_VOC = "../data/dataset/testVOC2012_2"
# COCO dataset - DATA_DIR, ANNOTATION_DIR
DATA_DIR_COCO = ["../data/dataset/testCOCO/train/", "../data/dataset/testCOCO/annotations/train.json"]
def test_random_resized_crop_with_bbox_op_c(plot_vis=False):
"""
Prints images and bboxes side by side with and without RandomResizedCropWithBBox Op applied,
tests with MD5 check, expected to pass
"""
logger.info("test_random_resized_crop_with_bbox_op_c")
original_seed = config_get_set_seed(23415)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Load dataset
dataVoc1 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (0.5, 0.5), (0.5, 0.5))
# map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"],
column_order=["image", "bbox"])
filename = "random_resized_crop_with_bbox_01_c_result.npz"
save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN)
unaugSamp, augSamp = [], []
for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True),
dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)):
unaugSamp.append(unAug)
augSamp.append(Aug)
if plot_vis:
visualize_with_bounding_boxes(unaugSamp, augSamp)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_resized_crop_with_bbox_op_coco_c(plot_vis=False):
"""
Prints images and bboxes side by side with and without RandomResizedCropWithBBox Op applied,
Testing with Coco dataset
"""
logger.info("test_random_resized_crop_with_bbox_op_coco_c")
# load dataset
dataCoco1 = ds.CocoDataset(DATA_DIR_COCO[0], annotation_file=DATA_DIR_COCO[1], task="Detection",
decode=True, shuffle=False)
dataCoco2 = ds.CocoDataset(DATA_DIR_COCO[0], annotation_file=DATA_DIR_COCO[1], task="Detection",
decode=True, shuffle=False)
test_op = c_vision.RandomResizedCropWithBBox((512, 512), (0.5, 1), (0.5, 1))
dataCoco2 = dataCoco2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"],
column_order=["image", "bbox"])
unaugSamp, augSamp = [], []
for unAug, Aug in zip(dataCoco1.create_dict_iterator(num_epochs=1, output_numpy=True),
dataCoco2.create_dict_iterator(num_epochs=1, output_numpy=True)):
unaugSamp.append(unAug)
augSamp.append(Aug)
if plot_vis:
visualize_with_bounding_boxes(unaugSamp, augSamp, "bbox")
def test_random_resized_crop_with_bbox_op_edge_c(plot_vis=False):
"""
Prints images and bboxes side by side with and without RandomResizedCropWithBBox Op applied,
tests on dynamically generated edge case, expected to pass
"""
logger.info("test_random_resized_crop_with_bbox_op_edge_c")
# Load dataset
dataVoc1 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (0.5, 0.5), (0.5, 0.5))
# maps to convert data into valid edge case data
dataVoc1 = dataVoc1.map(
operations=[lambda img, bboxes: (img, np.array([[0, 0, img.shape[1], img.shape[0]]]).astype(bboxes.dtype))],
input_columns=["image", "bbox"],
output_columns=["image", "bbox"],
column_order=["image", "bbox"])
# Test Op added to list of Operations here
dataVoc2 = dataVoc2.map(
operations=[lambda img, bboxes: (img, np.array([[0, 0, img.shape[1], img.shape[0]]]).astype(bboxes.dtype)),
test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"],
column_order=["image", "bbox"])
unaugSamp, augSamp = [], []
for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True),
dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)):
unaugSamp.append(unAug)
augSamp.append(Aug)
if plot_vis:
visualize_with_bounding_boxes(unaugSamp, augSamp)
def test_random_resized_crop_with_bbox_op_invalid_c():
"""
Tests RandomResizedCropWithBBox on invalid constructor parameters, expected to raise ValueError
"""
logger.info("test_random_resized_crop_with_bbox_op_invalid_c")
# Load dataset, only Augmented Dataset as test will raise ValueError
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
try:
# If input range of scale is not in the order of (min, max), ValueError will be raised.
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (1, 0.5), (0.5, 0.5))
# map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"],
column_order=["image", "bbox"])
for _ in dataVoc2.create_dict_iterator(num_epochs=1):
break
except ValueError as err:
logger.info("Got an exception in DE: {}".format(str(err)))
assert "Input is not within the required interval of (0 to 16777216)." in str(err)
def test_random_resized_crop_with_bbox_op_invalid2_c():
"""
Tests RandomResizedCropWithBBox Op on invalid constructor parameters, expected to raise ValueError
"""
logger.info("test_random_resized_crop_with_bbox_op_invalid2_c")
# Load dataset # only loading the to AugDataset as test will fail on this
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
try:
# If input range of ratio is not in the order of (min, max), ValueError will be raised.
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (1, 1), (1, 0.5))
# map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"],
column_order=["image", "bbox"])
for _ in dataVoc2.create_dict_iterator(num_epochs=1):
break
except ValueError as err:
logger.info("Got an exception in DE: {}".format(str(err)))
assert "Input is not within the required interval of (0 to 16777216)." in str(err)
def test_random_resized_crop_with_bbox_op_bad_c():
"""
Test RandomCropWithBBox op with invalid bounding boxes, expected to catch multiple errors.
"""
logger.info("test_random_resized_crop_with_bbox_op_bad_c")
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (0.5, 0.5), (0.5, 0.5))
data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image")
data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op, InvalidBBoxType.HeightOverflow, "bounding boxes is out of bounds of the image")
data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op, InvalidBBoxType.NegativeXY, "min_x")
data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WrongShape, "4 features")
if __name__ == "__main__":
test_random_resized_crop_with_bbox_op_c(plot_vis=False)
test_random_resized_crop_with_bbox_op_coco_c(plot_vis=False)
test_random_resized_crop_with_bbox_op_edge_c(plot_vis=False)
test_random_resized_crop_with_bbox_op_invalid_c()
test_random_resized_crop_with_bbox_op_invalid2_c()
test_random_resized_crop_with_bbox_op_bad_c()
|
[
"513344092@qq.com"
] |
513344092@qq.com
|
50e30b493faf1d7f25e8435890ae0d3625daa2f5
|
d0dcc77793433e31adda34b0a9989da694b61f19
|
/caffe_visualization_tools/visualization_tools.py
|
b9d1fea0d025e807465ef4f5541ed1d1c48d94af
|
[] |
no_license
|
MagicSen/python_tools
|
0bac702a31ad5b8b742fe809e63f027891783d4c
|
296b375cb3807f8436ce6b7c8a661daeecd975c1
|
refs/heads/master
| 2021-09-02T15:57:59.758341
| 2018-01-03T14:10:17
| 2018-01-03T14:10:17
| 116,134,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,681
|
py
|
##
# @file visualization_tools.py
# @brief This tools is for caffe model visualization.
# @author Yang Sen, magicys@qq.com
# @version 1.0.0
# @date 2017-01-04
# Copyright(C)
# For free
# All right reserved
#
import numpy as np
import matplotlib.pyplot as plt
import sys
import caffe
import os
import pylab
# set the plot enviroment
plt.rcParams['figure.figsize'] = (10,10)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
##
# @brief vis_square function which you can preview convolution kernels and the convolution results.
#
# @param data
# @param name
#
# @return
def vis_square(data, name):
"""Take an array of shape (n, height, width) or (n, height, width, 3)
and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)"""
# normalize data for display
data = (data - data.min()) / (data.max() - data.min())
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = (((0, n ** 2 - data.shape[0]),
(0, 1), (0, 1)) # add some space between filters
+ ((0, 0),) * (data.ndim - 3)) # don't pad the last dimension (if there is one)
data = np.pad(data, padding, mode='constant', constant_values=1) # pad with ones (white)
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
# pylab.show()
plt.figure(name)
plt.imshow(data); plt.axis('off')
def lookNetDetail(net, layers_name_all=None):
# Get all layers' name
if layers_name_all == None:
layers_name_all = net.params.keys()
for layer_name in layers_name_all:
data_shape = net.blobs[layer_name].data.shape
filter_shape = net.params[layer_name][0].data.shape
if len(filter_shape) == 2:
data = net.blobs[layer_name].data[0]
filter_data = net.params[layer_name][0].data
#plt.figure(layer_name)
#plt.imshow(filter_data); plt.axis('off')
plt.figure(layer_name + "_result")
plt.plot(data)
print "========================================="
print "Layer name: " + layer_name
print "Layer Shape: " + str(filter_shape)
print "Data Shape: " + str(data_shape)
print "========================================="
pylab.show()
elif len(filter_shape) == 4:
maps_number = data_shape[1]
data = net.blobs[layer_name].data[0,:maps_number]
filter_data = net.params[layer_name][0].data
filter_maps_number = filter_shape[0]
filter_data = filter_data.transpose(1, 0, 2, 3)[0, :filter_maps_number]
vis_square(filter_data, layer_name)
vis_square(data, layer_name+ "_result")
print "========================================="
print "Layer name: " + layer_name
print "Layer Shape: " + str(filter_shape)
print "Data Shape: " + str(data_shape)
print "========================================="
pylab.show()
else:
return
##
# @brief createInputForLayer Create net input from image
#
# @param input_image_path
# @param net_type: ==0[skeleton(1, 2, 96, 96)]
# ==1[detection(1, 2, 120, 160)]
# @return
def createInputForLayer(input_image_path, net_type):
if not os.path.exists(input_image_path):
return None
# Create input image
image = caffe.io.load_image(input_image_path, color=False)
print "Input image size: " + str(image.shape)
if net_type == 0:
# Flip
image = np.fliplr(image);
image_left = image[0:,0:96,0]
image_right = image[0:,96: , 0]
image_all = np.zeros([2,96,96])
image_all[0, :, :] = image_left
image_all[1, :, :] = image_right
return image_all
elif net_type == 1:
return image
else:
return image
if __name__ == "__main__":
if len(sys.argv) < 4:
print "<caffe_define_prototxt> <caffe_model> <input_image_data>"
sys.exit()
model_def = sys.argv[1]
model_weights = sys.argv[2]
input_image_path = sys.argv[3]
if os.path.exists(model_def) and os.path.exists(model_weights):
print 'Caffe model found.'
else:
print 'Cound not find the caffe model.'
sys.exit()
# Set running environment
caffe.set_device(0)
caffe.set_mode_gpu()
net = caffe.Net(model_def, model_weights, caffe.TEST)
# Create input data
image = createInputForLayer(input_image_path, 0)
# Show the input data
for i in range(0, image.shape[0]):
plt.figure("Image Channels " + str(i))
plt.imshow(image[i, :]); plt.axis('off')
pylab.show()
# Set the net and run the nets
net.blobs['pair_data'].data[0] = image
net.forward()
# Set the layer which you want to watch
layers_name_all = ['ippal', 'fcpal']
lookNetDetail(net, layers_name_all)
# Example for change the layer data: "ippal"
# Change the layer data and rerun
plt.figure("After Changed ippal")
conv_str = 'ippal'
feat = net.blobs[conv_str].data[0]
feat[3] = 0
feat[8] = 0
feat[14] = 0
feat[166] = 0
feat[168] = 0
plt.plot(feat.flat)
print net.blobs[conv_str].data.shape
pylab.show()
# Change the net data
net.blobs['ippal'].data[0] = feat
net.forward(None, 'fcpal', 'prob2')
lookNetDetail(net, layers_name_all)
|
[
"syang@usens.com"
] |
syang@usens.com
|
305be6d509218ff7f44913a393a6d3dd1e53e492
|
9d8ab91c052ec637b396d07862225d331b3084e3
|
/find_uid.py
|
d4122bff071adcfc72a9c1d04f092405b1b5f916
|
[] |
no_license
|
yazdipour/security-assignments
|
13ed50cf2b081ad63bd11920491dc00995e2c30d
|
30b49442bc0bb5b415f200e1202e4b0f40439a57
|
refs/heads/master
| 2022-11-29T02:30:53.200737
| 2020-08-03T14:03:09
| 2020-08-03T14:03:09
| 261,142,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
import struct
import subprocess
uid = subprocess.check_output('id -ur', shell=True)
iuid = int(uid)
xuid = hex(iuid)
print xuid ##0x3e8
buid = struct.pack("I", iuid) #'\xe8\x03\x00\x00'
print buid
|
[
"shahriar.yazdipour@outlook.com"
] |
shahriar.yazdipour@outlook.com
|
30d736d8402f7e58e7668fb6e6c5636bb1ed7d5a
|
c2fe2b45ca6c1372596834af4686e84f66ce6f0b
|
/apps/files/forms.py
|
796d381fd3e7f45cbfe5ccb264168e64033df360
|
[] |
no_license
|
seun-otosho/DjModelProject
|
9cecf18fac66ad729f0bde88cccb18d224928b06
|
75aa0d1dc465556f3afa31abce6e6f51f04e5671
|
refs/heads/master
| 2023-05-13T04:00:24.787683
| 2021-05-31T02:17:43
| 2021-05-31T02:17:43
| 369,716,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
from django import forms
from . import models
class submissionForm(forms.ModelForm):
class Meta:
model = models.submission
fields = [
"file_name",
"file",
"object_id",
"content_type",
]
|
[
"seun@kolacredit.com"
] |
seun@kolacredit.com
|
6c6dfcc9661470d7e551972b314a494d43f7f4b6
|
01082af86cad0824cbc33b87320adea3eef6ac11
|
/classstatd/admin.py
|
7ad1845474bee7eb57607c77f98e1cd6ad0263d4
|
[] |
no_license
|
hanul500/dreamy
|
224f259665aed64db05ad773d3b1403899c59183
|
26927c771ec3735d07363696153d4e4fa23ce56b
|
refs/heads/master
| 2020-12-04T22:34:25.100751
| 2020-01-05T19:07:38
| 2020-01-05T19:07:38
| 231,923,912
| 0
| 0
| null | 2020-01-10T17:31:54
| 2020-01-05T13:47:14
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 189
|
py
|
from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(Classstatinfo)
admin.site.register(stat_mat_rel)
admin.site.register(stat_tool_rel)
|
[
"hanul500@naver.com"
] |
hanul500@naver.com
|
1e589a1a4cc05b5d9810004cab44e34774d06d70
|
f1bdc509d64633fde353b58bd283b9c06da6a71c
|
/plot.py
|
ae52d8c562bb832b7b9b2d11a7977af1bca1d816
|
[] |
no_license
|
Fersol/tcp-congestion-ns3
|
503b1829808814e224a150bfd9f890e1d9c5b203
|
b181311fb24ba53e9acf582b3d6f9467eb131311
|
refs/heads/master
| 2022-04-16T19:54:33.586905
| 2020-04-15T19:47:02
| 2020-04-15T19:47:02
| 255,039,380
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
import argparse
def parse_args():
parser = argparse.ArgumentParser(add_help=True, description="Files to plot")
parser.add_argument("-files", "--files", nargs="+", required=False,
default=('cwndVegas.tr', 'cwndNewReno.tr', 'cwndBic.tr'),
help="List of files to plot")
args=parser.parse_args()
return vars(args)
if __name__ == "__main__":
args = parse_args()
for filename in args['files']:
print(f'Start plotting {filename}')
filesave = filename.split('.')[0] + '.png'
df = pd.read_csv(filename, sep=" ", header=None)
plt.figure()
plt.plot(df[0], df[1])
plt.savefig(filesave)
print(f'End plotting')
|
[
"alex-2011.s@yandex.ru"
] |
alex-2011.s@yandex.ru
|
1a90032ae1d4bb24c7395045105044791c01642b
|
81f73066919c22cb5649a9095233d2edf92f4a1b
|
/data_process/data_process/data_process.py
|
afdb638cd5e3ec846c817eb74167fb8e5288886d
|
[] |
no_license
|
jity16/Campus_Network_Management
|
20d4ea2d2ae29f1bad9f3dfe1c85e7158ec7c371
|
bff5f8897dc7fa36e96bc963d22524a9cd2bbcde
|
refs/heads/master
| 2020-06-17T12:30:39.428441
| 2019-07-09T03:30:03
| 2019-07-09T03:30:03
| 195,925,194
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,256
|
py
|
class Host():
def __init__(self):
self.state = ""
self.ip = ""
self.portlist = []
class Port():
def __init__(self):
self.id = ""
self.state = ""
self.name = ""
import os
import pickle
from xml.dom import minidom
def get_attrvalue(node, attrname):
return node.getAttribute(attrname) if node else ''
def get_nodevalue(node, index = 0):
return node.childNodes[index].nodeValue if node else ''
def get_xmlnode(node, name):
return node.getElementsByTagName(name) if node else []
os.chdir("G:/大二上ver2/计算机网络管理/课程大作业/network_manage_bigwork/network_manage_bigwork/Project/data_process")
doc = minidom.parse("nmapinfo.xml")
root = doc.documentElement
host_nodes = get_xmlnode(root, 'host')
host_list = []
for node in host_nodes:
host = Host()
status = get_xmlnode(node, "status")
addr = get_xmlnode(node, "address")
#if(addr == []):
#continue;
host.state = get_attrvalue(status[0], "state")
host.ip = get_attrvalue(addr[0], "addr")
ports = get_xmlnode(node, "ports")
if(ports == []):
continue;
ports = get_xmlnode(ports[0], "port")
for portnode in ports:
port = Port()
port.id = get_attrvalue(portnode, "portid")
state = get_xmlnode(portnode, "state")
service = get_xmlnode(portnode, "service")
port.state = get_attrvalue(state[0], "state")
port.name = get_attrvalue(service[0], "name")
host.portlist.append(port)
host_list.append(host)
len(host_list)
len(host_nodes)
def printport(port):
print("id = " + port.id + "\n"
+"state = " + port.state + "\n"
+"name = " + port.name + "\n")
def printhost(host):
print("ip = " + host.ip + "\n"
+"state = " + host.state + "\n")
for p in host.portlist:
printport(p)
printhost(host_list[len(host_list)-1])
file = open('hosts.txt','wb')
pickle.dump(host_list, file)
file.close()
f = open('hosts.txt', 'rb')
hostlist = pickle.load(f)
tmp = list(map(lambda x: len(x.portlist), hostlist))
len(tmp)
for h in hostlist:
if(len(h.portlist) == 1000):
print(list(map(lambda x: x.id, h.portlist)))
break
|
[
"jity16@mails.tsinghua.edu.cn"
] |
jity16@mails.tsinghua.edu.cn
|
b083604a03c1f5064d2aff52934739fa3325be94
|
fa639f7fd14c4b860c06eb0ae5b66217bb83a585
|
/lambda/main.py
|
3acd167a5537af6bb63c5b27aabbd3810e0474c3
|
[] |
no_license
|
Lcmkey/aws-cdk-serverless-deep-learning-inference
|
c007331b25237eefbadb065571e18e6bc572c042
|
2b56c8c527669d93ef76b36347f742de138b7b58
|
refs/heads/master
| 2023-02-26T09:06:53.946495
| 2021-02-01T14:01:57
| 2021-02-01T14:01:57
| 322,429,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,244
|
py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import sys
import os
# Setting library paths.
efs_path = "/mnt/python"
python_pkg_path = os.path.join(efs_path, "tensorflow/lib/python3.8/site-packages")
sys.path.append(python_pkg_path)
import json
import string
import time
import io
import requests
# Importing TensorFlow
import tensorflow as tf
# Loading model
model_path = os.path.join(efs_path, 'model/')
loaded_model = tf.saved_model.load(model_path)
detector = loaded_model.signatures['default']
def lambda_handler(event, context):
r = requests.get(event['url'])
img = tf.image.decode_jpeg(r.content, channels=3)
# Executing inference.
converted_img = tf.image.convert_image_dtype(img, tf.float32)[tf.newaxis, ...]
start_time = time.time()
result = detector(converted_img)
end_time = time.time()
obj = {
'detection_boxes' : result['detection_boxes'].numpy().tolist(),
'detection_scores': result['detection_scores'].numpy().tolist(),
'detection_class_entities': [el.decode('UTF-8') for el in result['detection_class_entities'].numpy()]
}
return {
'statusCode': 200,
'body': json.dumps(obj)
}
|
[
"lcmkey@gmail.com"
] |
lcmkey@gmail.com
|
46951ff9e914c649aac2882ba5d1e640deded76e
|
866182bc8950cf851c412777871a4157f7bf2fff
|
/smsp/schema_utils.py
|
7f3c16ace73070a087c653aa6d254ad8d6263ee0
|
[
"MIT"
] |
permissive
|
elementechemlyn/pysmsp
|
f8170e51e28feaeb24b252cd6d442e13ce8199b4
|
a6ddea5068bfc67dbeb94d73a9290f12ec726d46
|
refs/heads/master
| 2021-09-02T10:18:56.306691
| 2018-01-01T20:45:29
| 2018-01-01T20:45:29
| 115,945,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,141
|
py
|
import sys
import re as re_
import base64
import datetime as datetime_
import warnings as warnings_
try:
from lxml import etree as etree_
except ImportError:
from xml.etree import ElementTree as etree_
Validate_simpletypes_ = True
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for a example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ImportError:
GenerateDSNamespaceDefs_ = {}
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node=None, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node=None, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node=None, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (time_parts[0], micro_seconds, )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns. We should:
# - AND the outer elements
# - OR the inner elements
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
if re_.search(patterns2, target) is not None:
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
return instring.encode(ExternalEncoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
if type(self) != type(other):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'utf-8'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace,
pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(
outfile, level, namespace, name,
pretty_print=pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name,
base64.b64encode(self.value),
self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(
element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0,
optional=0, child_attrs=None, choice=None):
self.name = name
self.data_type = data_type
self.container = container
self.child_attrs = child_attrs
self.choice = choice
self.optional = optional
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs
def get_child_attrs(self): return self.child_attrs
def set_choice(self, choice): self.choice = choice
def get_choice(self): return self.choice
def set_optional(self, optional): self.optional = optional
def get_optional(self): return self.optional
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
|
[
"emlyn@blue"
] |
emlyn@blue
|
b5c2d29c6ec32dea08c50833b0043f0b3d64b696
|
e8c0f3c3ef2c1476b3e2af01ba63fea3cd993787
|
/tools/google_activity_parser.py
|
2902242ccfe5702ac057657d1d7c41559b3197b4
|
[] |
no_license
|
hal2001/machine_learning
|
05ff47a2fe08fa7267a673b54c84f939c3ef565c
|
39c857fa937b3301d42624ef9b264ab04b49c3dc
|
refs/heads/master
| 2020-03-11T15:28:01.434296
| 2018-02-15T18:53:22
| 2018-02-15T18:53:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,865
|
py
|
# -*- coding: utf-8 -*-
"""
Parses "My Activity" from Google, specifically for Google Search
Author: Aaron Penne
Example input of a single Google search:
<div class="outer-cell mdl-cell mdl-cell--12-col mdl-shadow--2dp">
<div class="mdl-grid">
<div class="header-cell mdl-cell mdl-cell--12-col">
<p class="mdl-typography--title">
Search<br>
</p>
</div>
<div class="content-cell mdl-cell mdl-cell--6-col mdl-typography--body-1">
Searched for
<a href="https://www.google.com/search?q=download+google+my+activity">
download google my activity
</a><br>
Feb 12, 2018, 1:23:11 PM
</div>
<div class="content-cell mdl-cell mdl-cell--6-col mdl-typography--body-1 mdl-typography--text-right">
</div>
<div class="content-cell mdl-cell mdl-cell--12-col mdl-typography--caption">
<b>Products:</b><br> Search<br>
</div>
</div>
</div>
Example output:
Searched for download google my activity 02/12/2018 01:23:11 PM
"""
import datetime
from bs4 import BeautifulSoup
# Hard coded file names for now
my_path = "C:/tmp/"
file_in = my_path + "MyActivity.html"
file_shrunk = my_path + "MyActivity_Shrunk.html"
file_out = my_path + "MyActivity_Clean.txt"
# Create smaller intermediate file to speed up processing
with open(file_in, "r", encoding="utf8") as f_in:
with open(file_shrunk, "w+", encoding="utf8") as f_out:
for line in f_in:
# Replaces large class names with simple ones, cuts file size in half and makes code more readable
line = line.replace("\"outer-cell mdl-cell mdl-cell--12-col mdl-shadow--2dp\"", "div_A")
line = line.replace("\"mdl-grid\"", "div_B")
line = line.replace("\"header-cell mdl-cell mdl-cell--12-col\"", "div_C")
line = line.replace("\"mdl-typography--title\"", "p_A")
line = line.replace("\"content-cell mdl-cell mdl-cell--6-col mdl-typography--body-1\"", "div_D")
line = line.replace("\"content-cell mdl-cell mdl-cell--6-col mdl-typography--body-1 mdl-typography--text-right\"", "div_E")
line = line.replace("\"content-cell mdl-cell mdl-cell--12-col mdl-typography--caption\"", "div_F")
# Adds line breaks between main divs
line = line.replace("</div></div></div><div", "</div></div></div>\n<div")
f_out.write(line)
# Open file with correct encoding
with open(file_shrunk, encoding="utf8") as f:
soup = BeautifulSoup(f, "lxml") # Need to have lxml installed https://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser
# Pulls out all div contents which hold the search details
all_divs = soup.find_all(class_="div_D")
with open(file_out, "w+", encoding="utf8") as f:
# Write headers
f.write("Action\tTerm\tTimestamp\n")
for i, div in enumerate(all_divs):
try:
# Strip out the 'Visited' or 'Searched for' text
action = div.contents[0].replace(u'\xa0', u'')
# Get the URL or search term
term = div.contents[1].text.replace('\t', ' ')
# Put the date and time into something excel understands
timestamp = datetime.datetime.strptime(div.contents[-1], '%b %d, %Y, %I:%M:%S %p').strftime('%m/%d/%Y %I:%M:%S %p')
# Write to file, tab-delimited
f.write("{0}\t{1}\t{2}\t{3}\n".format(i, action, term, timestamp))
except:
# FIXME A lot of errors and skipped chunks, particularly 'Searched for hotels...'
print("{0} Disregarding '{1}'... ".format(i, div.text))
|
[
"noreply@github.com"
] |
noreply@github.com
|
d1e9592a5372de64fff11d1be0fd0d8c24210af6
|
39add6f458ca28dcc5cf4dbe3afde986cedd8648
|
/VAE.py
|
1a1275a0108f1d04db5cfcacc6c45ff5aaa7c30f
|
[] |
no_license
|
ArifulIslamPreence/Radio-link-failure-prediction-drafted-obsolete
|
1e219919280d4160678c68b6dddd8220e81b8a70
|
5c85f76a8731530f8a7b06a774c87cbd8bb7c049
|
refs/heads/master
| 2023-06-25T17:52:25.359385
| 2021-07-12T17:38:43
| 2021-07-12T17:38:43
| 385,331,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,016
|
py
|
'''Implementation of Variational Autoencoder Network for dataset reconstructing into normalized form.
The whole combined dataset is fed into model by spliting batches'''
import csv
import numpy as np
import pandas as pd
from sklearn import preprocessing
import seaborn as sns
from sklearn.model_selection import train_test_split
import torch
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score
from sklearn.preprocessing import StandardScaler
from torch import nn, optim
import math
import matplotlib.pyplot as plt
df1 = pd.read_csv("output_dataset/new_combined.csv", index_col=0, low_memory=False)
train, test = train_test_split(df1, test_size=0.30, random_state=0)
features = train.columns
batch_size = 100
df1 = df1.interpolate(method='linear', limit_direction= 'forward')
train.fillna(train.mean(),inplace = True)
test.fillna(test.mean(),inplace = True)
#Train_data
normalizer = preprocessing.Normalizer(norm="l2")
training = normalizer.fit_transform(train)
training = pd.DataFrame(training, columns= features)
train_tensor = torch.tensor(training.values.astype(np.float32))
train_loader = torch.utils.data.DataLoader(train_tensor, batch_size=batch_size, shuffle=True)
#Test data
testing = normalizer.fit_transform(test)
training = pd.DataFrame(testing, columns= features)
test_X = pd.DataFrame(testing, columns=features)
test_Y = test.rlf
#
#
#
dimension = len(features)
lr = 1e-5
num_epochs = 100
class AutoEncoder(nn.Module):
def __init__(self):
super(AutoEncoder, self).__init__()
# encoder
self.enc1 = nn.Linear(in_features=dimension, out_features=int(dimension / 2))
self.enc2 = nn.Linear(in_features=int(dimension / 2), out_features=int(dimension / 4))
self.enc3 = nn.Linear(in_features=int(dimension / 4), out_features=int(dimension / 8))
# self.enc4 = nn.Linear(in_features=int(dim/4), out_features=int(dim/8))
# decoder
self.dec1 = nn.Linear(in_features=int(dimension / 8), out_features=int(dimension / 4))
self.dec2 = nn.Linear(in_features=int(dimension / 4), out_features=int(dimension / 2))
self.dec3 = nn.Linear(in_features=int(dimension / 2), out_features=dimension)
# self.dec4 = nn.Linear(in_features=dim, out_features=dim)
def forward(self, x):
x = F.relu(self.enc1(x))
x = F.relu(self.enc2(x))
x = F.relu(self.enc3(x))
x = F.relu(self.dec1(x))
x = F.relu(self.dec2(x))
x = F.relu(self.dec3(x))
# sigmoid activation
# x = torch.sigmoid(self.enc1(x))
# x = torch.sigmoid(self.enc2(x))
# x = torch.sigmoid(self.enc3(x))
# # x = F.relu(self.enc4(x))
# x = torch.sigmoid(self.dec1(x))
# x = torch.sigmoid(self.dec2(x))
# x = torch.sigmoid(self.dec3(x))
return x
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = AutoEncoder()
optimizer = optim.Adam(net.parameters(), lr=1e-5)
loss_function = nn.BCEWithLogitsLoss() # nn.BCEWithLogitsLoss() #MSELoss too
get_loss = list()
def training_ae(net, trainloader, epochs):
train_loss = []
for epoch in range(epochs):
running_loss = 0.0
for data in train_loader:
input_data = data.to(device=device)
optimizer.zero_grad()
output = net(input_data).to(device=device) # output is the reconstruced x
loss = loss_function(output, input_data).to(device=device) # input_data should be the target variable
loss.backward()
optimizer.step()
running_loss += loss.item()
loss = running_loss / len(trainloader)
train_loss.append(loss)
if epoch % 20 == 0:
print('Epoch {} of {}, Train Loss: {:.3f}'.format(
epoch + 1, num_epochs, loss))
return train_loss
get_loss = training_ae(net, train_loader, num_epochs)
_, ax = plt.subplots(1, 1, figsize=(15, 10))
plt.xlabel("epochs")
plt.ylabel("loss value ")
ax.set_title('Loss graph')
ax.plot(get_loss)
test_loss = []
net.eval()
test_tensor = torch.tensor(test_X.values.astype(np.float32))
with torch.no_grad():
for i in range(len(test_X)):
input = test_tensor[i].to(device=device)
output = net(input).to(device=device)
loss = loss_function(output, input).to(device=device)
test_loss.append(loss.item())
fpr, tpr, thresholds = roc_curve(y_true=test_Y.astype(int), y_score=test_loss, pos_label=1)
ranked_thresholds = sorted(list(zip(np.abs(1.5*tpr - fpr), thresholds, tpr, fpr)), key=lambda i: i[0], reverse=True)
_, failure_threshold, threshold_tpr, threshold_fpr = ranked_thresholds[0]
print(f"Selected failure Threshold: {failure_threshold}")
print("Theshold yields TPR: {:.4f}, FPR: {:.4f}".format(threshold_tpr, threshold_fpr))
auc = roc_auc_score(y_true=test_Y.astype(int), y_score=test_loss)
print("AUC: {:.4f}".format(auc))
plt.figure(figsize=(10, 10))
plt.plot([0,1], [0,1], linestyle="--") # plot baseline curve
plt.plot(fpr, tpr, marker=".", label="Failure Threshold:{:.6f}\nTPR: {:.4f}, FPR:{:.4f}".format(failure_threshold, threshold_tpr, threshold_fpr))
plt.axhline(y=threshold_tpr, color='darkgreen', lw=0.8, ls='--')
plt.axvline(x=threshold_fpr, color='darkgreen', lw=0.8, ls='--')
plt.title("ROC Curve")
plt.ylabel("True Positive Rate")
plt.xlabel("False Positive Rate")
plt.legend(loc="lower right")
test_results = test_Y.to_frame().astype(bool)
test_results['loss'] = pd.Series(test_loss, index=test_results.index)
test_results['is_failed'] = test_results.loss > failure_threshold
conf_matrix = confusion_matrix(test_results.rlf, test_results.is_failed)
plt.figure()
sns.heatmap(conf_matrix, annot=True, annot_kws={"size": 16}, fmt='g')
plt.title('Failure Threshold Classification - Confusion Matrix')
print(classification_report(test_results.rlf, test_results.is_failed, target_names=["regular", "rlf"]))
|
[
"noreply@github.com"
] |
noreply@github.com
|
5adb1a7c3497fb7477cdb13de87de36dbe2cfa4e
|
531bb144a2027f0db257bf2e7166e9231bb475ec
|
/node_modules/mongoose/node_modules/mongodb/node_modules/bson/build/config.gypi
|
5ea2a9787107a20e60bdc2bbc75271e0fb9dd77d
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
Rmoore424/reciprosity
|
bd4e5bb806ce1f7000baccf92695b6cd6e02fa53
|
b264ab4a0c310066338cec66aa4661e2be6a8f9f
|
refs/heads/master
| 2021-03-12T22:12:15.738135
| 2015-02-07T21:39:30
| 2015-02-07T21:39:30
| 30,359,899
| 0
| 0
| null | 2015-02-07T21:39:30
| 2015-02-05T14:39:00
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,270
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"node_install_npm": "false",
"node_prefix": "/usr/local/Cellar/node/0.10.33_1",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/local/opt/python/bin/python2.7",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/Users/jenniferleagarda/.node-gyp/0.10.33",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/bash",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"cafile": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/jenniferleagarda/.npm-init.js",
"userconfig": "/Users/jenniferleagarda/.npmrc",
"node_version": "0.10.33",
"user": "",
"save": "true",
"editor": "vi",
"tag": "latest",
"global": "",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/jenniferleagarda/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/2.1.10 node/v0.10.33 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "18",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/var/folders/cm/tq4l9lgj35g93lmldvtlxfbc0000gn/T",
"unsafe_perm": "true",
"prefix": "/usr/local",
"link": ""
}
}
|
[
"Rmoore424@gmail.com"
] |
Rmoore424@gmail.com
|
73063d99ce9277f02182be41df4226b158411a0a
|
147e37057d507c499d9474204d8b9ec01aa048ba
|
/backend/src/blog/models.py
|
61a2677cc07905135442778699d2d7e8eb4d0d6d
|
[
"MIT"
] |
permissive
|
andebor/overflow
|
de4cc231a95cfabba48aeda7a0de20c5c0d6ccb5
|
b4860bade3587ae233422a973c6ffb8fae6031b7
|
refs/heads/master
| 2020-05-19T20:30:15.110110
| 2019-05-05T21:25:12
| 2019-05-05T21:25:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,038
|
py
|
from django.contrib.auth import get_user_model
from django.db import models
from django.utils.text import slugify
class Tag(models.Model):
name = models.CharField(max_length=256)
def __str__(self):
return '{}'.format(self.name)
class Post(models.Model):
title = models.CharField(max_length=256)
description = models.CharField(max_length=1024)
author = models.ForeignKey(get_user_model(), blank=True, null=True, on_delete=models.SET_NULL)
published = models.DateTimeField()
edited = models.DateTimeField(auto_now=True)
content = models.TextField()
tags = models.ManyToManyField(Tag, blank=True)
illustration = models.CharField(max_length=2048, blank=True, null=True)
slug = models.SlugField(max_length=256, unique=True)
class Meta:
ordering = ('-published',)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super().save(*args, **kwargs)
def __str__(self):
return '{}'.format(self.title)
|
[
"myth@overflow.no"
] |
myth@overflow.no
|
479ea80a853b8988ea10248c71a04b9972d837aa
|
7f99c0d6fd03ac7388efc9713a311040989fda59
|
/Python/script_runner/test/db.py
|
fe2e2691172da8431f2d67aad803d1f27425c0cc
|
[] |
no_license
|
helver/Alliances
|
35b688513e9030751d33d9d0b17adeb3ed5f1aa8
|
4e85e3453fac3484d293208db5bf7dc44518cc14
|
refs/heads/master
| 2021-01-15T16:29:30.158420
| 2015-04-30T14:09:45
| 2015-04-30T14:09:45
| 34,855,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,447
|
py
|
import string
import twisted.internet.defer as defer
import twisted.internet.reactor as reactor
import mir.models as models
import re
import mir.identity as identity
database = "/usr/lib/python2.5/site-packages/mir/script_runner/test/"
job_script = file(database + "data/job_scripts/basic_agent_1.xml").read()
job_schedule = "now"
job_input_uri_list = "<input><uri>/workspaces/1/hosts/resources/1/</uri>\n<uri>/workspaces/1/hosts/resources/2/</uri></input>"
def build_job_xml():
return '%s%s\n<when>%s</when>\n%s\n</JobDefinition>' % (
'<?xml version="1.0"?>\n<JobDefinition xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" href="/workspaces/1/jobs/234/">',
job_script,
job_schedule,
job_input_uri_list,
)
job_xml = build_job_xml()
class DummyJobQuery(object):
def __init__(self, *args, **kwargs):
global job_script
global job_xml
if kwargs.has_key('scriptfile'):
job_script = file(database + "data/job_scripts/%s" % kwargs['scriptfile']).read()
job_xml = build_job_xml()
def select_by(self, *args, **kwargs):
return [models.Job("ScriptRunner TestJob #1", schedule=job_schedule, input=job_input_uri_list, script=job_script, workspace_id=1, id=kwargs["id"])]
class DummyHostQuery(object):
def __init__(self, *args, **kwargs):
global job_script
global job_xml
if kwargs.has_key('scriptfile'):
job_script = file(database + "data/job_scripts/%s" % kwargs['scriptfile']).read()
job_xml = build_job_xml()
def select_by(self, *args, **kwargs):
if kwargs.has_key('href'):
queryset = []
for href in kwargs['href']:
id = identity.identity_from_string(href).id
address = '127.0.0.%s:22201' % id
queryset.append(models.Host('Host ##%s' % id, workspace_id=kwargs["workspace_id"], id=id, address=address))
return queryset
else:
address = '127.0.0.1:22201'
return [models.Host('Host ##1', workspace_id=kwargs["workspace_id"], id=kwargs["id"], address=address)]
class DummySession(object):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
def query(self, model_type):
if model_type == models.Job:
return DummyJobQuery(**self.kwargs)
if model_type == models.Host:
return DummyHostQuery(**self.kwargs)
def save_or_update(self, *args, **kwargs):
pass
def flush(self, *args, **kwargs):
pass
def clear(self, *args, **kwargs):
pass
def save(self, *args, **kwargs):
pass
def update(self, *args, **kwargs):
pass
def refresh(self, *args, **kwargs):
pass
def save_or_update(self, *args, **kwargs):
pass
class ACTestObjectDB(object):
"""
Provides the interface to the Mir Resource Database.
"""
def __init__(self, *args, **kwargs):
self.session = DummySession(args, **kwargs)
def close(self):
pass
def defer_to_session(self, fn, *args, **kwargs):
showdef = defer.Deferred()
fn(self, self.session, *args, **kwargs)
reactor.callLater(0, lambda x: x.callback(""), showdef)
return showdef
|
[
"ehelvey@gmail.com"
] |
ehelvey@gmail.com
|
09e278b839107b839c504a7ee39854da665cd9f9
|
394072f7fd3e2a226aeed78bf0a4f587f4c4e383
|
/lambdaExpr/pick_lambda.py
|
eb8c2306a372907e577c942ccd5c5b4e7827dcb3
|
[] |
no_license
|
LeonCrashCode/DRSparsing
|
ec5cca079a2c73eb512444e1ac86215722e6503a
|
c7e92beb8878ff2386bc6789e6c17f0d35bf1277
|
refs/heads/master
| 2020-03-16T09:52:11.217219
| 2019-01-17T14:20:16
| 2019-01-17T14:20:16
| 124,549,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 932
|
py
|
import sys
import types
import json
def ascii_encode_dict(data):
ascii_encode = lambda x: x.encode('ascii') if isinstance(x, unicode) else x
return dict(map(ascii_encode, pair) for pair in data.items())
def tostring(expre):
assert type(expre) == types.DictType
re = []
re.append(expre["type"]+"(")
if len(expre["indexs"]) != 0:
re.append("["+" ".join(expre["indexs"])+"]")
if expre["text"] != "":
re.append(expre["text"])
if len(expre["attrib"]) != 0:
for key in expre["attrib"].keys():
re.append(expre["attrib"][key])
re.append(")")
return " ".join(re)
L = []
for line in open(sys.argv[1]):
line = line.strip()
if line == "":
if L[1].split()[1] == sys.argv[2]:
print "\n".join(L[0:4])
#print tostring(json.loads(L[3], object_hook=ascii_encode_dict))
print "\n".join(L[4:6])
#print tostring(json.loads(L[5], object_hook=ascii_encode_dict))
print
L = []
else:
L.append(line)
|
[
"jmliunlp@gmail.com"
] |
jmliunlp@gmail.com
|
1099e40b6a420049aa66c9efe1dffc09c240475a
|
a960f0f01beab623e4ce1c21382284ff409ad2cd
|
/Python/venv/Lib/site-packages/pmdarima/model_selection/_validation.py
|
cd715ad44cbb9b94ae10578b535aa5bdb5b804d5
|
[] |
no_license
|
eoeroglu/grad-proj
|
e343c0266de1768417aa8724a6440cb088f9d014
|
3cef3383e5dd0f9937c18512fe8c6dc3b00e7729
|
refs/heads/master
| 2022-11-19T11:06:42.429979
| 2020-07-20T06:25:24
| 2020-07-20T06:25:24
| 257,416,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,442
|
py
|
# -*- coding: utf-8 -*-
"""
Cross-validation for ARIMA and pipeline estimators.
See: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/_validation.py
""" # noqa: E501
import numpy as np
import numbers
import warnings
import time
from traceback import format_exception_only
from sklearn import base
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.utils import indexable
from ._split import check_cv
from .. import metrics
from ..utils import check_endog
from ..arima.warnings import ModelFitWarning
from ..compat.sklearn import safe_indexing
__all__ = [
'cross_validate',
'cross_val_predict',
'cross_val_score',
]
_valid_scoring = {
'mean_absolute_error': mean_absolute_error,
'mean_squared_error': mean_squared_error,
'smape': metrics.smape,
}
_valid_averaging = {
'mean': np.nanmean,
'median': np.nanmedian,
}
def _check_callables(x, dct, varname):
if callable(x):
return x
if isinstance(x, str):
try:
return dct[x]
except KeyError:
valid_keys = list(dct.keys())
raise ValueError('%s can be a callable or a string in %s'
% (varname, str(valid_keys)))
raise TypeError('expected a callable or a string, but got %r (type=%s)'
% (x, type(x)))
def _check_averaging(method):
return _check_callables(method, _valid_averaging, "averaging")
def _check_scoring(metric):
return _check_callables(metric, _valid_scoring, "metric")
def _safe_split(y, exog, train, test):
"""Performs the CV indexing given the indices"""
y_train, y_test = y.take(train), y.take(test)
if exog is None:
exog_train = exog_test = None
else:
exog_train, exog_test = \
safe_indexing(exog, train), safe_indexing(exog, test)
return y_train, y_test, exog_train, exog_test
def _fit_and_score(fold, estimator, y, exog, scorer, train, test, verbose,
error_score):
"""Fit estimator and compute scores for a given dataset split."""
msg = 'fold=%i' % fold
if verbose > 1:
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
start_time = time.time()
y_train, y_test, exog_train, exog_test = _safe_split(y, exog, train, test)
try:
estimator.fit(y_train, exogenous=exog_train)
except Exception as e:
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
else:
test_scores = error_score
warnings.warn("Estimator fit failed. The score on this train-test "
"partition will be set to %f. Details: \n%s"
% (error_score,
format_exception_only(type(e), e)[0]),
ModelFitWarning)
else:
fit_time = time.time() - start_time
# forecast h periods into the future and compute the score
preds = estimator.predict(n_periods=len(test), exogenous=exog_test)
test_scores = scorer(y_test, preds)
score_time = time.time() - start_time - fit_time
if verbose > 2:
total_time = score_time + fit_time
msg += ", score=%.3f [time=%.3f sec]" % (test_scores, total_time)
print(msg)
# TODO: if we ever want train scores, we'll need to change this signature
return test_scores, fit_time, score_time
def _fit_and_predict(fold, estimator, y, exog, train, test, verbose):
"""Fit estimator and compute scores for a given dataset split."""
msg = 'fold=%i' % fold
if verbose > 1:
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
start_time = time.time()
y_train, _, exog_train, exog_test = _safe_split(y, exog, train, test)
# scikit doesn't handle failures on cv predict, so we won't either.
estimator.fit(y_train, exogenous=exog_train)
fit_time = time.time() - start_time
# forecast h periods into the future
start_time = time.time()
preds = estimator.predict(n_periods=len(test), exogenous=exog_test)
pred_time = time.time() - start_time
if verbose > 2:
total_time = pred_time + fit_time
msg += " [time=%.3f sec]" % (total_time)
print(msg)
return preds, test
def cross_validate(estimator, y, exogenous=None, scoring=None, cv=None,
verbose=0, error_score=np.nan):
"""Evaluate metric(s) by cross-validation and also record fit/score times.
Parameters
----------
estimator : estimator
An estimator object that implements the ``fit`` method
y : array-like or iterable, shape=(n_samples,)
The time-series array.
exogenous : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables.
scoring : str or callable, optional (default=None)
The scoring metric to use. If a callable, must adhere to the signature
``metric(true, predicted)``. Valid string scoring metrics include:
- 'smape'
- 'mean_absolute_error'
- 'mean_squared_error'
cv : BaseTSCrossValidator or None, optional (default=None)
An instance of cross-validation. If None, will use a RollingForecastCV
verbose : integer, optional
The verbosity level.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, ModelFitWarning is raised. This parameter
does not affect the refit step, which will always raise the error.
"""
y, exog = indexable(y, exogenous)
y = check_endog(y, copy=False)
cv = check_cv(cv)
scoring = _check_scoring(scoring)
# validate the error score
if not (error_score == "raise" or isinstance(error_score, numbers.Number)):
raise ValueError('error_score should be the string "raise" or a '
'numeric value')
# TODO: in the future we might consider joblib for parallelizing, but it
# . could cause cross threads in parallelism..
results = [
_fit_and_score(fold, base.clone(estimator), y, exog,
scorer=scoring,
train=train,
test=test,
verbose=verbose,
error_score=error_score)
for fold, (train, test) in enumerate(cv.split(y, exog))]
scores, fit_times, score_times = list(zip(*results))
ret = {
'test_score': np.array(scores),
'fit_time': np.array(fit_times),
'score_time': np.array(score_times),
}
return ret
def cross_val_predict(estimator, y, exogenous=None, cv=None, verbose=0,
averaging="mean"):
"""Generate cross-validated estimates for each input data point
Parameters
----------
estimator : estimator
An estimator object that implements the ``fit`` method
y : array-like or iterable, shape=(n_samples,)
The time-series array.
exogenous : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables.
cv : BaseTSCrossValidator or None, optional (default=None)
An instance of cross-validation. If None, will use a RollingForecastCV.
Note that for cross-validation predictions, the CV step cannot exceed
the CV horizon, or there will be a gap between fold predictions.
verbose : integer, optional
The verbosity level.
averaging : str or callable, one of ["median", "mean"] (default="mean")
Unlike normal CV, time series CV might have different folds (windows)
forecasting the same time step. After all forecast windows are made,
we build a matrix of y x n_folds, populating each fold's forecasts like
so::
nan nan nan # training samples
nan nan nan
nan nan nan
nan nan nan
1 nan nan # test samples
4 3 nan
3 2.5 3.5
nan 6 5
nan nan 4
We then average each time step's forecasts to end up with our final
prediction results.
Examples
--------
>>> import pmdarima as pm
>>> from pmdarima.model_selection import cross_val_predict,\
... RollingForecastCV
>>> y = pm.datasets.load_wineind()
>>> cv = RollingForecastCV(h=14, step=12)
>>> preds = cross_val_predict(
... pm.ARIMA((1, 1, 2), seasonal_order=(0, 1, 1, 12)), y, cv=cv)
"""
y, exog = indexable(y, exogenous)
y = check_endog(y, copy=False)
cv = check_cv(cv)
avgfunc = _check_averaging(averaging)
# need to be careful here:
# >>> cv = RollingForecastCV(step=6, h=4)
# >>> cv_generator = cv.split(wineind)
# >>> next(cv_generator)
# (array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
# 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
# 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
# 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57]),
# array([58, 59, 60, 61]))
# >>> next(cv_generator)
# (array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
# 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
# 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
# 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
# 60, 61, 62, 63]),
# array([64, 65, 66, 67])) <~~
if cv.step > cv.horizon:
raise ValueError("CV step cannot be > CV horizon, or there will be a "
"gap in predictions between folds")
# clone estimator to make sure all folds are independent
prediction_blocks = [
_fit_and_predict(fold, base.clone(estimator), y, exog,
train=train,
test=test,
verbose=verbose,) # TODO: fit params?
for fold, (train, test) in enumerate(cv.split(y, exog))]
# Unlike normal CV, time series CV might have different folds (windows)
# forecasting the same time step. In this stage, we build a matrix of
# y x n_folds, populating each fold's forecasts like so:
pred_matrix = np.ones((y.shape[0], len(prediction_blocks))) * np.nan
for i, (pred_block, test_indices) in enumerate(prediction_blocks):
pred_matrix[test_indices, i] = pred_block
# from there, we need to apply nanmean (or some other metric) along rows
# to agree on a forecast for a sample.
test_mask = ~(np.isnan(pred_matrix).all(axis=1))
predictions = pred_matrix[test_mask]
return avgfunc(predictions, axis=1)
def cross_val_score(estimator, y, exogenous=None, scoring=None, cv=None,
verbose=0, error_score=np.nan):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator
An estimator object that implements the ``fit`` method
y : array-like or iterable, shape=(n_samples,)
The time-series array.
exogenous : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables.
scoring : str or callable, optional (default=None)
The scoring metric to use. If a callable, must adhere to the signature
``metric(true, predicted)``. Valid string scoring metrics include:
- 'smape'
- 'mean_absolute_error'
- 'mean_squared_error'
cv : BaseTSCrossValidator or None, optional (default=None)
An instance of cross-validation. If None, will use a RollingForecastCV
verbose : integer, optional
The verbosity level.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, ModelFitWarning is raised. This parameter
does not affect the refit step, which will always raise the error.
"""
cv_results = cross_validate(estimator=estimator, y=y, exogenous=exogenous,
scoring=scoring, cv=cv,
verbose=verbose,
error_score=error_score)
return cv_results['test_score']
|
[
"eoeroglu@gmail.com"
] |
eoeroglu@gmail.com
|
81c76479930f2493db07e52dad0cb747a679b934
|
2dc23883f2cf91176907316fbeb5e95517e9c878
|
/Experiments/main.py
|
55fe2be1326b7866e37028f38f002253398951c1
|
[
"Apache-2.0"
] |
permissive
|
nour-mubarak/eye-gaze-dataset
|
835f53542f256bbe646d6a0fa200aedeaa859878
|
82f2427541a54e3d5e20cc7850ed75c84d80988f
|
refs/heads/master
| 2023-05-03T00:49:01.117203
| 2021-05-21T03:12:04
| 2021-05-21T03:12:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,449
|
py
|
import os
import argparse
import torch
import sys
import random
import logging
import numpy as np
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from visdom import Visdom
from datetime import datetime
from torchvision import transforms
from imgaug import augmenters as iaa
from torch.utils.data import DataLoader
from models.eyegaze_model import EyegazeModel
from utils.dataset import split_dataset, EyegazeDataset, collate_fn
from utils.utils import cyclical_lr, train_teacher_network, test_eyegaze_network, load_model
plt.rcParams['figure.figsize'] = [10, 10]
logging.basicConfig(stream=sys.stdout, format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG)
logger = logging.getLogger('eyegaze')
logging.getLogger('matplotlib.font_manager').disabled = True
pil_logger = logging.getLogger('PIL').setLevel(logging.INFO)
def make_parser():
parser = argparse.ArgumentParser(description='PyTorch RNN Classifier w/ attention')
# Data
parser.add_argument('--data_path', type=str, default='resources/master_sheet.csv', help='Data path')
parser.add_argument('--image_path', type=str, default='/data/MIMIC/MIMIC-IV/cxr_v2/physionet.org/files/mimic-cxr/2.0.0', help='image_path')
parser.add_argument('--heatmaps_path', type=str, help='Heatmaps directory',
default='/data/MIMIC/eye_gaze/fixation_heatmaps/uncalibrated/temporal_heatmaps')
parser.add_argument('--output_dir', type=str, default='results', help='Output directory')
parser.add_argument('--class_names', type=list, default=['Normal', 'CHF', 'pneumonia'], help='Label names for classification')
parser.add_argument('--num_workers', type=int, default=16, help='number of workers')
parser.add_argument('--resize', type=int, default=224, help='Resizing images')
# Training
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--epochs', type=int, default=10, help='number of epochs')
parser.add_argument('--lr', type=float, default=1e-3, help='initial learning rate')
parser.add_argument('--scheduler', default=False, action='store_true', help='[USE] scheduler')
parser.add_argument('--step_size', type=int, default=5, help='scheduler step size')
## Temporal Model Specific arguments.
parser.add_argument('--model_type', default='baseline', choices=['baseline', 'temporal'], help='model choice')
parser.add_argument('--dropout', type=float, default=0.5, help='dropout')
parser.add_argument('--hidden_dim', type=int, default=64, help='hidden size for image model')
parser.add_argument('--emb_dim', type=int, default=64, help='cnn embedding size for heatmap model')
parser.add_argument('--hidden_hm', nargs='+', type=int, default=[256, 128], help='hidden size for heatmap model')
parser.add_argument('--num_layers_hm', type=int, default=1, help='num layers for heatmap model')
parser.add_argument('--cell', type=str, default='lstm', choices=['lstm', 'gru'], help='LSTM or GRU for heatmap model')
parser.add_argument('--brnn_hm', default=True, action='store_true', help='[USE] bidirectional for heatmap model')
parser.add_argument('--attention', default=True, action='store_true', help='[USE] attention for heatmap model')
# Misc
parser.add_argument('--gpus', type=str, default='3', help='Which gpus to use, -1 for CPU')
parser.add_argument('--viz', default=False, action='store_true', help='[USE] Vizdom')
parser.add_argument('--gcam_viz', default=False, action='store_true', help='[USE] Used for displaying the GradCam results')
parser.add_argument('--test', default=False, action='store_true', help='[USE] flag for testing only')
parser.add_argument('--testdir', type=str, default=None, help='model to test [same as train if not set]')
parser.add_argument('--rseed', type=int, default=42, help='Seed for reproducibility')
return parser
def load_data(model_type, data_path, image_path, heatmaps_path, input_size, class_names, batch_size, num_workers, rseed):
# ImageNet normalization
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
train_file, valid_file, test_file = split_dataset(data_path, random_state=rseed)
seq = iaa.Sequential([iaa.Resize((input_size, input_size))])
image_transform = transforms.Compose([seq.augment_image, transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
if model_type in ['temporal']:
heatmap_temporal_transform = transforms.Compose([transforms.Resize([input_size, input_size]),
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(), transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
transforms.Normalize(mean=mean, std=std)])
heatmap_static_transform = transforms.Compose([transforms.Resize([input_size, input_size]), transforms.ToTensor()])
static_heatmap_path = heatmaps_path
train_dataset = EyegazeDataset(train_file, image_path, class_names, heatmaps_path=heatmaps_path,
static_heatmap_path=static_heatmap_path,
heatmap_temporal_transform=heatmap_temporal_transform,
heatmap_static_transform=heatmap_static_transform,
image_transform=image_transform)
valid_dataset = EyegazeDataset(valid_file, image_path, class_names, heatmaps_path=heatmaps_path,
static_heatmap_path=static_heatmap_path,
heatmap_temporal_transform=heatmap_temporal_transform,
heatmap_static_transform=heatmap_static_transform,
image_transform=image_transform)
test_dataset = EyegazeDataset(test_file, image_path, class_names, heatmaps_path=heatmaps_path,
static_heatmap_path=static_heatmap_path,
heatmap_temporal_transform=heatmap_temporal_transform,
heatmap_static_transform=heatmap_static_transform,
image_transform=image_transform)
# drop_last=True for batchnorm issue: https://discuss.pytorch.org/t/error-expected-more-than-1-value-per-channel-when-training/26274
# this did not resolve the issue for all cases
train_dl = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers,
collate_fn=collate_fn, drop_last=True)
valid_dl = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers,
collate_fn=collate_fn, drop_last=True)
test_dl = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn, num_workers=32)
else:
train_dataset = EyegazeDataset(train_file, image_path, class_names, image_transform=image_transform)
valid_dataset = EyegazeDataset(valid_file, image_path, class_names, image_transform=image_transform)
train_dl = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=num_workers)
valid_dl = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=num_workers)
test_dataset = EyegazeDataset(test_file, image_path, class_names, image_transform=image_transform)
test_dl = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, drop_last=True, num_workers=32)
return train_dl, valid_dl, test_dl
def run_experiment(args, train_dl, valid_dl, viz, env_name, output_model_path):
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
image_classifier = EyegazeModel(args.model_type, len(args.class_names), dropout=args.dropout, emb_dim=args.emb_dim,
hidden_dim=args.emb_dim, hidden_hm=args.hidden_hm, attention=args.attention,
cell=args.cell, brnn_hm=args.brnn_hm, num_layers_hm=args.num_layers_hm).to(args.device)
logger.info(image_classifier)
total_params = sum([np.prod(p.size()) for p in image_classifier.parameters()])
logger.info(f'Number of parameters:{total_params}')
if len(args.gpus.split(',')) > 1:
print(f"Using {len(args.gpus.split(',')) } GPUs!")
device_ids = [int(i) for i in args.gpus.split(',')]
image_classifier = nn.DataParallel(image_classifier, device_ids=device_ids)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(image_classifier.parameters(), lr=args.lr)
clr = cyclical_lr(step_sz=args.step_size, min_lr=args.lr, max_lr=1, mode='triangular2')
exp_lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, [clr])
train_teacher_network(image_classifier, criterion, optimizer, exp_lr_scheduler, train_dl, valid_dl, output_model_path,
args.epochs, viz=viz, env_name=env_name, is_schedule=args.scheduler)
logger.info(f'Model saved at ...{output_model_path}')
return image_classifier
if __name__ == '__main__':
args = make_parser().parse_args()
random.seed(args.rseed)
np.random.seed(args.rseed)
torch.manual_seed(args.rseed)
cuda = torch.cuda.is_available() and args.gpus != '-1'
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if cuda:
torch.cuda.manual_seed(args.rseed)
torch.cuda.manual_seed_all(args.rseed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
torch.cuda.set_device("cuda:"+ args.gpus)
args.device = torch.device("cuda:"+ args.gpus) if cuda else torch.device('cpu')
logger.info(torch.cuda.get_device_name(args.device))
# Create saving dir, all useful variables
comment_variable = ''
timestamp = str(datetime.now()).replace(" ", "").split('.')[0]
for arg in vars(args):
if arg not in ['data_path', 'heatmaps_path', 'image_path', 'class_names', 'gpus', 'viz', 'device', 'alpha', 'omega',
'lambda1', 'test', 'testdir', 'output_dir', 'model_teacher', 'num_workers', 'rseed', 'pretrained']:
comment_variable += f'{arg}{str(getattr(args, arg)).replace(" ", "")}_' \
if arg != 'model_type' else f'{str(getattr(args, arg))}_'
comment_variable += f'{timestamp}'
output_model_path = os.path.join(args.output_dir, comment_variable)
logger.info("[Arguments]: %r", args)
train_dl, valid_dl, test_dl = load_data(args.model_type, args.data_path, args.image_path, args.heatmaps_path,
args.resize, args.class_names, args.batch_size, args.num_workers, args.rseed)
if not args.test: #training
viz = Visdom(env='EyeGaze', port=8097) if args.viz else None
env_name = 'EyeGaze' if args.viz else None
run_experiment(args, train_dl, valid_dl, viz, env_name=env_name, output_model_path=output_model_path)
logger.info('---- NOW TESTING SET --- ')
model_dir = args.testdir if args.testdir else output_model_path
best_mean_auc = 0.0
best_model_name = ''
for i in range(0, args.epochs, 1):
model_name = f'Epoch_{i}.pth'
model = EyegazeModel(args.model_type, len(args.class_names), dropout=args.dropout,
emb_dim=args.emb_dim,
hidden_dim=args.emb_dim, hidden_hm=args.hidden_hm,
attention=args.attention,
cell=args.cell, brnn_hm=args.brnn_hm, num_layers_hm=args.num_layers_hm).to(args.device)
if len(args.gpus.split(',')) > 1:
print(f"Using {len(args.gpus.split(',')) } GPUs!")
device_ids = [int(i) for i in args.gpus.split(',')]
model = nn.DataParallel(model, device_ids=device_ids)
model = load_model(model_name, model_dir, model).to(args.device)
model_auc = test_eyegaze_network(model, test_dl, args.class_names, model_dir, model_name)
if model_auc >= best_mean_auc:
best_model_name = model_name
best_mean_auc = model_auc
logger.info(f"Best AUC:{best_mean_auc} from model with name: {best_model_name}")
|
[
"Ismini.Lourentzou@ibm.com"
] |
Ismini.Lourentzou@ibm.com
|
29e38aed9818dbd98fd36c0f4dcf2c39bf1a9e2d
|
4261f5ed5e3401ae9bc8ad09149d6d3529afbefb
|
/models.py
|
5b53597c87c0f81ed624d13c39cdcf1330c0589c
|
[] |
no_license
|
lrivallain/survey
|
165c928d4f830fbda161eee40644f8385d2b0c23
|
798a3732cda854b22ae73e3ecf85c736ea2bede0
|
refs/heads/master
| 2021-01-10T07:38:04.692743
| 2019-05-19T07:14:47
| 2019-05-19T07:14:47
| 36,890,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,619
|
py
|
from django.db import models
from django.contrib.auth.models import User
import random
import string
from django.utils import timezone
from datetime import datetime, timedelta
# Tocken settings
TOKEN_LENGTH=20
TOKEN_REF_SETS=string.ascii_letters + string.digits
# Generate a random token with TOKEN_LENGTH characters from TOKEN_REF_SETS
def _createToken():
while True:
# build a random hash
token = ''.join([random.choice(TOKEN_REF_SETS) for n in range(TOKEN_LENGTH)])
# test if token is already in Pool table
if not Question.objects.filter(token=token):
return token # if not, return
def _defaultAnswerDelta():
return datetime.now()+timedelta(days=2)
class Question(models.Model):
"""
Define a question and its author
"""
token = models.CharField(max_length=TOKEN_LENGTH, default=_createToken, primary_key=True, editable=False)
text = models.TextField()
author = models.ForeignKey(User, editable=False, null=True, blank=True)
pub_date = models.DateTimeField('date published', editable=False, auto_now_add=True)
answer_date = models.DateTimeField('answer publication date', default=_defaultAnswerDelta)
def __str__(self):
return self.token
# get all answers related to the current question
# excluding the author one
def get_all_answers(self):
return Answer.objects.filter(question=self).exclude(author=self.author)
# get only author answer to the question
def get_author_answer(self):
return Answer.objects.get(question=self, author=self.author)
# return the absolute url for this question
def get_absolute_url(self):
return '/' + self.token + '/'
# return the list of users that already answered to the question
def already_answered(self):
already_answered_users = []
for answer in self.get_all_answers():
already_answered_users.append(answer.author)
return already_answered_users
# return boolean according to the state of author answer
def is_answer_published(self):
if self.answer_date <= timezone.now():
return True
return False
def author_answer(self):
return
class Answer(models.Model):
"""
Define user answer to a question
"""
question = models.ForeignKey(Question)
text = models.TextField()
author = models.ForeignKey(User, editable=False, null=True, blank=True)
pub_date = models.DateTimeField('date published', auto_now=True)
def __str__(self):
return self.question.token + ' @' + self.author.username
|
[
"ludovic.rivallain@gmail.com"
] |
ludovic.rivallain@gmail.com
|
a7ebc45ec08fa90f7be4d3ab92e042f19fa0326a
|
cd74536e8403400675e8b54fa64ee6466400370f
|
/bot.py
|
6b6baa487ed1d5d668d9476a1b22feac1766868a
|
[
"MIT"
] |
permissive
|
volskaya/norman
|
1feff41d91ba64de714f0a37e041d4bba8b0c515
|
dc414b0b661cce3b8c9fd06c50c52c5c7f6cf0b9
|
refs/heads/master
| 2022-02-21T00:36:01.611059
| 2019-09-19T13:40:45
| 2019-09-19T13:40:45
| 141,054,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,349
|
py
|
#!/usr/bin/env python
""" Copyright (C) 2018 github.com/volskaya
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# pylint: disable=redefined-builtin, import-error, too-many-instance-attributes, too-many-arguments, too-many-return-statements
import discord
from utils import make_print, is_id
def print(*args, **kwargs):
"""Prefix builtin print"""
return make_print('Bot', *args, **kwargs)
class Bot:
"""Client object for discord.Client"""
def __init__(self, client, store, config, embed, permissions, args):
"""Instantiate"""
self.client = client
self.store = store
self.config = config
self.embed = embed
self.permissions = permissions
self.args = args
# Holds ID's to users, the bot currently awaits a key from
# Used to prevent multiple requests, during @on_member_updated
self.pending_users = []
# If initial sweep has no permissions, queue up one, incase
# the bot ever receives those permissions
self.sweep_bans_needed = False
def currently_validating(self, user_id):
"""Returns true, if bot is awaiting a key from the user ID"""
try:
self.pending_users.index(user_id)
return True
except ValueError:
return False
async def set_defaults(self):
"""Currently only sets default username"""
if self.client.user.name != self.config.name:
print(f'Changing bots old name - {self.client.user.name} '
+ f'to {self.config.name} and uploading a new avatar')
try:
await self.client.edit_profile(username=self.config.name)
await self.client.edit_profile(avatar=self.config.avatar)
except discord.errors.HTTPException:
print('Bots user update failed, skipping…')
async def kick(self, member):
"""Shortcut for kicking, with error check
Won't kick owner, specified in the config file
If the bot has permissions to kick, assume it was
intended to do so
"""
if not self.permissions.can_kick or not self.args.kick:
return
try:
if member.id != self.config.owner_id \
and member != self.config.server.owner:
await self.client.kick(member)
except discord.Forbidden:
print(f"Bot didn't have permissions to kick {member}")
except AttributeError:
pass # Couldn't find the user
async def move(self, member, to_channel):
"""Shortcut for moving members to channels
If the bot has permissiosn to move, assume it was
intended to do so
"""
if not self.permissions.can_move:
return
try:
self.client.move_member(member, to_channel)
except discord.Forbidden:
print(f"Bot didn't have permissions to move {member}")
async def add_role(self, member, role):
"""Adds the accepted role
If the bot has permissions to change roles,
assume it was intended to do so
"""
if not self.permissions.can_manage_roles:
print("Bot didn't have enough permissions to manage roles")
return
if not role:
print(f'Adding a role to {member} failed! Reference missing')
try:
await self.client.add_roles(member, role)
except discord.Forbidden:
print(f"Bot didn't have permissions to add a role to {member}")
async def remove_role(self, member, role):
"""Removes the accepted role
If the bot has permissions to change roles,
assume it was intended to do so
"""
if not self.permissions.can_manage_roles:
print("Bot didn't have enough permissions to manage roles")
return
if not role:
print(f'Removing a role from {member} failed! Reference missing')
try:
await self.client.remove_roles(member, role)
except AttributeError:
print(f'{member} had no role to remove')
except discord.Forbidden:
print(f"No permissions, to remove a role from {member}")
async def validate_key(self, member, response):
"""Shortcut for validating a key"""
if self.store.does_key_match(member, response):
return True
return False
async def is_user_approved(self, message, member):
"""Shortcut for checking, if the user is already approved
Will also print the appropriate message
"""
try:
user = self.store.get_user(member)
if user['approved']:
print(f'{message.author} tried approved an '
+ f'already approved member {member}')
await self.embed.nm_already_approved(
message.author, member, user['key'])
return True
return False
except KeyError:
return False
async def get_user(self, name, channel=None):
"""Finds the user by its name
If no channel specified, iterates over all users, the bot can see
"""
if isinstance(name, (discord.User, discord.Member)):
return name # Don't do any lookup, if its already a member
if isinstance(channel, discord.Channel): # Find in channel
member = discord.utils.find(
lambda m: str(m) == name, channel.server.members)
return member
# Else find from everyone the bot can see
for member in self.client.get_all_members(): # FIXME: Use discord.find
if str(member) == name:
return member
return None
async def get_user_by_id(self, user_id, channel=None):
"""Finds the user by its ID
If no channel specified, iterates over all users, the bot can see
"""
if isinstance(channel, discord.Channel): # Find in channel
member = discord.utils.find(
lambda m: m.id == user_id, channel.server.members)
return member
# Else find from everyone the bot can see
for member in self.client.get_all_members(): # FIXME: Use discord.find
if member.id == user_id:
return member
return None
# FIXME: Too many returns
async def add_user(self, message, approve):
"""Shortcut for adding/approving a user"""
target = message.content.split(' ')[1] # 0 == command string
channel = message.channel
async def print_error():
"""Nested shortcut for add_user() error"""
print(f'{message.author} tried approving {target}, '
+ 'but the user was not found')
await self.embed.nm_not_found(message.author, target)
# When the user is not connected to the server, it can only be added
# with its ID, since his name is outside of bots field of view
# If using an ID, means the member is not in scope, so just create an
# invalidated DB entry, which gets finished, the next time this ID
# connects to the server
# FIXME: First check, if the user is not in the server
if is_id(target):
print(f'Adding a user by ID, {target}')
if await self.is_user_approved(message, target):
return # Function will send a warning message
entry = await self.store.add_user(target, approve)
await self.embed.nm_add(
message.author, '<unknown>', entry['key'], approve)
return None
# If the bot received !approve in a private message, channel won't
# have a server, so look trough all the servers the bot is connected to
elif isinstance(channel, discord.PrivateChannel):
# Iterates over all the channels the bot is connected to
for member in self.client.get_all_members():
if str(member) == target:
if await self.is_user_approved(message, member):
return # Function will send a warning message
if approve:
await self.add_role(
member, self.config.roles['approved']['ref'])
# Send as a private message
entry = await self.store.add_user(member, approve)
await self.embed.nm_add(
message.author, member, entry['key'], approve)
return member
await print_error()
return None
elif isinstance(channel, discord.Channel):
# Only iterates over the message channel
member = discord.utils.find(
lambda m: str(m) == target, message.channel.server.members)
if not member:
await print_error()
return
if await self.is_user_approved(message, member):
return member # Function will send a warning message
# Approval happens here
entry = await self.store.add_user(member, approve)
key = entry['key']
# Send to the channel
print(f'{message.author} approved {target}, key - {key}')
await self.embed.nm_add(message.author, member, key, approve)
return member
else:
print(f'{message.author} used !approve in an unsupported channel')
return None
async def remove_user(self, message, target):
"""Lookup user in the database and delete him
Also dispatches messages to message.channel and target
"""
if is_id(target):
try:
print(f'Removing user by ID - {target}')
name = self.store.get_user(target)['name']
await self.store.remove_user_by_id(target)
await self.embed.nm_deleted(message.channel, name)
except KeyError:
print(f"Couldn't find a user with id {target} within the DB")
await self.embed.nm_does_not_exist(message.channel, target)
return # Job done, return early
# Looks up members only within the bots field of view
member = await self.get_user(target)
if not member:
print(f"bot.remove_user() couldn't get user {target}")
try:
print("Falling back to DB lookup")
user_id = self.store.get_user_id(target)
name = self.store.get_user(user_id)['name']
await self.store.remove_user_by_id(user_id)
await self.embed.nm_deleted(message.channel, name)
except KeyError:
print("DB Lookup failed too")
await self.embed.nm_does_not_exist(message.channel, target)
return
# Assume the bot was intended to remove the role and kick
await self.kick(member)
# Will have to lookup the username in the database and get
# the key from there
print(f'{message.author} deleting {target} from the database')
success = await self.store.remove_user_by_id(member.id)
if success:
print(f'{message.author} deleted {target} from the store')
print(f'Removing "Approved" role from {target}')
await self.remove_role(
member, self.config.roles['approved']['ref'])
await self.embed.nm_you_were_deleted(member)
await self.embed.nm_deleted(message.channel, member)
else:
print(f'{message.author} tried to delete {target} from the store')
await self.embed.nm_does_not_exist(message.channel, member)
async def remove_user_silent(self, target):
"""Lookup user in the database and delete him"""
print(f'Attempting to delete {target} from the database')
member = await self.get_user(target)
success = await self.store.remove_user(member)
if success:
print(f' {target} successfully deleted')
await self.remove_role(
member, self.config.roles['approved']['ref'])
return True
return False
async def sweep_server(self):
"""Sweeps the server, when bot is ready
Meant to clean up users, that slipped in, while the bot was off
"""
count = 0
print('Performing a sweep, to kick unapproved users')
# list, so the generator wouldn't change, when someone is kicked
for member in list(self.config.server.members):
if not self.store.is_approved(member) and not member.bot:
# If the member has a pending key, ask to validate it instead
if self.store.get_user_key(member):
print(f'{member} has a pending key, validating it instead')
await self.request_key(member)
return
print(f'Attempting to kick {member.name}')
await self.embed.nm_not_approved(member)
# NOTE: Removing a role + kicking
# == on_member_update infinite loop
if self.args.kick:
await self.kick(member)
else:
await self.remove_role(
member, self.config.roles['approved']['ref'])
count += 1
print(f'Sweep complete, {count} members removed')
print('Also checking banned members')
await self.sweep_bans()
async def sweep_bans(self):
"""Deletes members, with valid keys, from DB, if they're banned"""
count = 0
bans = []
try:
bans = await self.client.get_bans(self.config.server)
except discord.Forbidden:
self.sweep_bans_needed = True
print("Bot didn't have permissions to access "
+ f"{self.config.server.name} ban list")
return # Return early
for member in bans:
if self.store.is_approved(member) and not member.bot:
print(f'Attempting to kick {member.name}')
await self.kick(member)
count += 1
success = await self.remove_user_silent(member)
if success:
print(f'Deleted {member} from the database')
print(f'Ban sweep complete, {count} members removed')
async def request_key(self, member):
"""Sends a message to a user, awaiting a key, else kick"""
timeout = None # None means no timeout
if self.args.timeout_time > 0:
timeout = self.args.timeout_time
try:
user = self.store.get_user(member)
if not user['valid']:
print(f'Updating an invalidated DB entry, ID - {member}')
self.store.update_user_entry(member)
if not user['approved']:
if self.currently_validating(member.id):
return # @on_member_update can cause redundant calls
key = user['key']
# self.currently_validating[member.id] = True
self.pending_users.append(member.id)
# First arg referes to channel
await self.embed.nm_request_key(member, member)
print(f'Waiting for a key from {member}, ({key})')
response = await self.client.wait_for_message(
timeout=timeout, author=member, content=key)
self.pending_users.remove(member.id)
if not response:
print(f'Response from {member} was wrong')
await self.embed.nm_decline_key(member)
# Check if someone approved member, inbetween the request
if not self.store.is_approved(member):
await self.kick(member)
return # Return early
print(f'Received response: {response.content}')
status = await self.validate_key(member, response.content)
if status:
print(f'{member} responded with a correct key')
await self.embed.nm_accept_key(member, response.content)
await self.store.add_user(member, True) # Save approval
# Assume the bot was intended to give the role and move
await self.add_role(
member, self.config.roles['approved']['ref'])
await self.move(member, self.config.server.default_channel)
else:
print(f'{member} response key was invalid')
await self.embed.nm_decline_key(member)
# Check if someone approved member, inbetween the request
if not self.store.is_approved(member):
await self.kick(member)
else:
print(f'{member} member connecting to the server, '
+ 'ensuring the user still has the "Approved" role')
if not self.config.roles['approved']['ref'] in member.roles:
print(f'"Approved" role missing for {member}, fixing…')
await self.add_role(
member, self.config.roles['approved']['ref'])
except KeyError:
print(f'{member} tried joining, without a key. Kicking…')
await self.embed.nm_unregistered_user(member)
await self.kick(member)
def has_role(self, member, key):
"""Returns role, if member has it"""
for role in member.roles:
# Dynamic role ID is a name
if self.config.roles[key]['dynamic'] \
and role.name == self.config.roles[key]['id']:
return role
elif role.id == self.config.roles[key]['id']:
return role
return None
def has_bot_role(self, member):
"""Returns role, if member has it"""
return self.has_role(member, 'bot')
def has_approved_role(self, member):
"""Returns role, if member has it"""
return self.has_role(member, 'approved')
|
[
"roolaav@gmail.com"
] |
roolaav@gmail.com
|
67496ec447aca5bf4189dab19f0983fcd1386567
|
ed410a1af46f1c52a7803eb23c64b396b9eb952b
|
/google-gcp/automl_sample_code/automl-detect.py
|
f945c0cc3ad01bcf6b2af444309bdb06602b9926
|
[] |
no_license
|
vaibhavpatil123/reference-architecture
|
0afbe2a0635b77d9d0ca60d8afaf2d83e3079016
|
2d9dfb57f2bb87bef297b6bbcac6f3184f26843b
|
refs/heads/master
| 2022-03-04T23:07:19.741351
| 2019-10-28T14:06:00
| 2019-10-28T14:06:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,359
|
py
|
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Because AutoML API is not public yet, the default ADC (application default
# credential) provided by cloud SDK won't give you the right permission.
# That is why a service account credential is needed in order to make
# ADC work.
# Follow the https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/docs/core/auth.rst#setting-up-a-service-account
# to download a JSON key file for your service account. Then make sure that
# you enable the "Cloud AutoML API" in API Manager of pantheon.
# Make sure your service account has the `AutoML Viewer` and `AutoML Predictor`
# IAM permissions.
# On your dev machine, run `export GOOGLE_APPLICATION_DEFAULT_CREDENTIAL=
# {PATH TO THE DOWNLOADED JSON KEY FILE}.
#
# Note: one need to join `cloud-automl-trusted-testers@googlegroups.com` group
# in order to enable "Cloud AutoML API" in pantheon.
"""This application demonstrates how to perform basic operations with the
Google AutoML Vision API.
Example Usage:
python automl_detect.py create_dataset "my dataset name"
python automl_detect.py create_model 7174207385752752219
python automl_detect.py delete_dataset 7174207385752752219
python automl_detect.py delete_model 7174207385752752219
python automl_detect.py list_datasets
python automl_detect.py list_models 7174207385752752219
python automl_detect.py list_model_evaluations 7174207385752752219
python automl_detect.py get_model_evaluation 70585533885 9062584215341814548
python automl_detect.py predict 7174207385752752219
python automl_detect.py get_model 70585533885 "gs://cloud-test-vcm/img/image_test.jpg"
python automl_detect.py import "gs://cloud-test-vcm/csv/all_data.csv"
For more information, the documentation at
https://cloud.google.com/vision/automl/docs.
"""
import os
import time
import sys
import argparse
from google.cloud import automl_v1alpha1
from google.cloud.automl_v1alpha1.proto import service_pb2
from google.cloud.automl_v1alpha1.gapic import enums
def callback(operation_future):
result = operation_future.result()
def automl_create_dataset(dataset_name):
""" Create a dataset"""
dataset_spec = {
"classification_type" : enums.ClassificationType.MULTILABEL
}
my_dataset = {
"display_name" : dataset_name,
"image_classification_dataset_spec" : dataset_spec
}
response = client.create_dataset( parent, my_dataset)
print("\nDataset creation: {}", response)
dataset_full_id = response.name
def automl_list_datasets():
""" list all datasets"""
filter_ = ''
response = client.list_datasets(parent, filter_)
print("\nList of datasets:")
for element in response:
print(element)
def automl_import(path, dataset_full_id):
""" import labeled images """
input_uris = [ path]
operation = client.import_dataset(dataset_full_id, input_uris)
print('\nProcessing import')
result = operation.result()
print("\nImages imported: {} ", result)
def automl_create_model(dataset_id, model_name):
""" start training """
#dataset_id = dataset_full_id.split('/')[-1]
my_model = {
"display_name" : model_name,
"dataset_id" : dataset_id,
"image_classification_model_spec": { "model_type": "BASIC_MODEL" }
}
print(my_model)
operation = client.create_model(parent, my_model)
print('\nTraining started')
result = operation.result()
model_full_id = result.name
print("Model id: {}", model_id)
print("\nTraining done")
def automl_list_model_evaluations(model_id):
""" list model evaluations """
filter_ = ''
parent_model = client.model_path(project_id, 'us-central1', model_id)
print("\nList of model evaluations:")
for element in client.list_model_evaluations(parent_model, filter_):
print(element)
def automl_get_model_evaluation(model_id, model_evaluation_id):
""" Get model evaluation """
name = client.model_evaluation_path(project_id, 'us-central1', model_id, model_evaluation_id)
print("\nModel evaluation:")
response = client.get_model_evaluation(name)
print(response)
def automl_get_model(model_id):
""" Get model evaluation """
name = client.model_path(project_id, 'us-central1', model_id)
print("\nModel:")
response = client.get_model(name)
print(response)
def automl_list_models():
""" # list all models """
filter_ = ''
response = client.list_models(parent, filter_)
print("\nList of models:")
for element in response:
print(element)
def automl_predict(model_full_id, path):
""" # prediction """
prediction_client = automl_v1alpha1.PredictionServiceClient()
file_path = path
with open(file_path, 'rb') as ff:
content = ff.read()
payload = {'image': {
'image_bytes': content
}
}
params = {}
request = prediction_client.predict(model_full_id, payload, params)
print('\nPrediction results: {}', response)
def automl_delete_model(model_id):
""" delete a model """
name = client.model_path(project_id, 'us-central1', model_id)
operation = client.delete_model(name)
operation.add_done_callback(callback)
print( '\nModel deletion')
def automl_delete_dataset(dataset_id):
""" delete a dataset """
name = client.dataset_path(project_id, 'us-central1', dataset_id)
operation = client.delete_dataset(name)
operation.add_done_callback(callback)
print( '\nDataset deletion')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
automl_create_dataset_parser = subparsers.add_parser(
'create_dataset', help=automl_create_dataset.__doc__)
automl_create_dataset_parser.add_argument('dataset_name')
automl_create_model_parser = subparsers.add_parser(
'create_model', help=automl_create_model.__doc__)
automl_create_model_parser.add_argument('dataset_id')
automl_create_model_parser.add_argument('model_name')
automl_import_parser = subparsers.add_parser(
'import', help=automl_import.__doc__)
automl_import_parser.add_argument('path')
automl_import_parser.add_argument('dataset_full_id')
automl_list_datasets_parser = subparsers.add_parser(
'list_datasets', help=automl_list_datasets.__doc__)
automl_list_models_parser = subparsers.add_parser(
'list_models', help=automl_list_models.__doc__)
automl_delete_dataset_parser = subparsers.add_parser(
'delete_dataset', help=automl_delete_dataset.__doc__)
automl_delete_dataset_parser.add_argument('dataset_id')
automl_delete_model_parser = subparsers.add_parser(
'delete_model', help=automl_delete_model.__doc__)
automl_delete_model_parser.add_argument('model_id')
automl_predict_parser = subparsers.add_parser(
'predict', help=automl_predict.__doc__)
automl_predict_parser.add_argument('model_id')
automl_predict_parser.add_argument('path')
automl_list_model_evaluations_parser = subparsers.add_parser(
'list_model_evaluations', help=automl_list_model_evaluations.__doc__)
automl_list_model_evaluations_parser.add_argument('model_id')
automl_get_model_evaluation_parser = subparsers.add_parser(
'get_model_evaluation', help=automl_get_model_evaluation.__doc__)
automl_get_model_evaluation_parser.add_argument('model_id')
automl_get_model_evaluation_parser.add_argument('model_evaluation_id')
automl_get_model_parser = subparsers.add_parser(
'get_model', help=automl_get_model_evaluation.__doc__)
automl_get_model_parser.add_argument('model_id')
# set up
project_id = 'your_project_id' # You can replace with your consumer project id.
client = automl_v1alpha1.AutoMlClient()
parent = client.location_path(project_id, 'us-central1')
args = parser.parse_args()
if args.command == 'create_dataset':
automl_create_dataset(args.dataset_name)
if args.command == 'create_model':
automl_create_model(args.dataset_id, args.model_name)
if args.command == 'delete_dataset':
automl_delete_dataset(args.dataset_id)
if args.command == 'delete_model':
automl_delete_model(args.model_id)
if args.command == 'list_datasets':
automl_list_datasets()
if args.command == 'list_models':
automl_list_models()
if args.command == 'list_model_evaluations':
automl_list_model_evaluations(args.model_id)
if args.command == 'get_model':
automl_get_model(args.model_id)
if args.command == 'get_model_evaluation':
automl_get_model_evaluation(args.model_id, args.model_evaluation_id)
if args.command == 'import':
automl_import(args.path, args.dataset_full_id)
if args.command == 'predict':
automl_predict(args.model_id, args.path)
|
[
"jorwalk@gmail.com"
] |
jorwalk@gmail.com
|
67b4fdb18932e728d619611ee41d81e11eb82f6e
|
02c6b39399c1cfb434ad718c90bed3d8e6310ed0
|
/symbolic/symbolic_interval/__init__.py
|
a05b1f606a6c61911dc1a5c02ffb66d08e5ade09
|
[] |
no_license
|
phate09/SafeDRL
|
09b8924fa91aa43cf543ea5727ebe4cc8e13c0a5
|
3d4278eaaabb046a90fc1cebd1b5862d63dc5894
|
refs/heads/master
| 2022-09-17T05:12:28.529329
| 2022-08-29T08:21:32
| 2022-08-29T08:21:32
| 204,663,981
| 8
| 3
| null | 2021-12-02T14:13:46
| 2019-08-27T09:07:04
|
Python
|
UTF-8
|
Python
| false
| false
| 97
|
py
|
from .interval import Interval, Symbolic_interval
from .symbolic_network import Interval_network
|
[
"phate09@hotmail.it"
] |
phate09@hotmail.it
|
4234da45fec98149194742c6fe4d1dea150fa8dc
|
ea7ae0d383b3d97cde7995eaeda9651afc2496c4
|
/main.py
|
b4923bbcd52ab065f981cfbd83b98da0734a3416
|
[] |
no_license
|
Brucejy/Human-Protein-Atlas-Image-Classification
|
7abef1a42c5f9fd683cfbf2f781d24ae2957a925
|
0eb4e502db291e2a863ea1a2818d6f72242c6b93
|
refs/heads/master
| 2020-05-23T02:11:25.619370
| 2019-05-18T07:29:41
| 2019-05-18T07:29:41
| 186,598,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,133
|
py
|
# coding: utf-8
import keras
from keras import backend as K
from keras.models import Model
from keras.layers import Activation, Dense, Input
from keras.optimizers import Adam
import os, cv2
import numpy as np
import glob
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
import tensorflow as tf
from keras.applications import Xception
# fix random seed
np.random.seed(seed=2018)
tf.set_random_seed(32)
# load dataset info
labels=pd.read_csv("train.csv").set_index('Id')
labels['Target']=[[int(i) for i in s.split()] for s in labels['Target']]
colors=['red','green','blue']
Id=[]
for i in range(0,31072):
a=str(labels.Id[i])
Id.append(a)
for i in range(0,31702):
flags=cv2.IMREAD_GRAYSCALE
img=np.stack([cv2.imread(os.path.join('trainfile', Id[i]+'_'+color+'.png'), flags) for color in colors],-1)
np.save('AJImage/'+Id[i],img)
folderss=glob.glob('AJImage')
imglists=[]
for folder in folderss:
for f in glob.glob(folder+'/*.npy'):
imglists.append(f)
imglists.sort()
IMAGE_DIMS=(299,299,3)
data=[]
for files in imglists:
img=np.load(files)
img=cv2.resize(img,(IMAGE_DIMS[1],IMAGE_DIMS[0]),interpolation=cv2.INTER_AREA).astype(np.float32)/255
data.append(img)
data=np.array(data)
# split data into train, test
(trainX, testX, trainY, testY)=train_test_split(data, labels.Target, test_size=0.15, random_state=42)
mlb=MultiLabelBinarizer()
trainYm=mlb.fit_transform(trainY)
nlb=MultiLabelBinarizer()
testYn=nlb.fit_transform(testY)
# load predicted dataset info
ss=pd.read_csv('sample_submission.csv')
pId=[]
for i in range(0,11702):
a=str(ss.Id[i])
pId.append(a)
for i in range(0,11702):
flags=cv2.IMREAD_GRAYSCALE
img=np.stack([cv2.imread(os.path.join('testfile', pId[i]+'_'+color+'.png'), flags) for color in colors],-1)
np.save('AJTEImage/'+pId[i],img)
predict_f=glob.glob('AJTEImage')
pimglist=[]
for folder in predict_f:
for f in glob.glob(folder+'/*.npy'):
pimglist.append(f)
pimglist.sort()
pdata=[]
for files in pimglist:
img=np.load(files)
img=cv2.resize(img,(IMAGE_DIMS[1],IMAGE_DIMS[0]),interpolation=cv2.INTER_AREA).astype(np.float32)/255
pdata.append(img)
pdata=np.array(pdata)
# create model
def createmodel(inputshape,n_classes):
inp_mask=Input(shape=inputshape)
pretrain_model=Xception(include_top=False,weights='imagenet',pooling='max')
pretrain_model.name='xception_image'
x=pretrain_model(inp_mask)
out=Dense(n_classes,activation='sigmoid')(x)
model=Model(inputs=[inp_mask],outputs=[out])
return model
model=createmodel(inputshape=(299,299,3),n_classes=28)
def f1(y_true, y_pred):
y_pred = K.round(y_pred)
tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)
tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)
fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1 = 2*p*r / (p+r+K.epsilon())
f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)
return K.mean(f1)
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.001), metrics=['acc',f1])
aug = ImageDataGenerator(rotation_range=180, width_shift_range=0.1, height_shift_range=0.1, shear_range=20, zoom_range=[0.8, 1.2], horizontal_flip=True, vertical_flip=True, fill_mode='reflect')
model.fit_generator(aug.flow(trainX, trainYm, batch_size=16), steps_per_epoch=len(trainX)/16, epochs=25,validation_data=(testX, testYn), workers=20, verbose=1)
# a TTA wrapper for keras model with a predicted method
class TTA_ModelWrapper():
def __init__(self, model):
self.model=model
self.gene=datagen=ImageDataGenerator(
rotation_range=180,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=20,
zoom_range=[0.8,1.2],
fill_mode='reflect',
horizontal_flip=True,
vertical_flip=True)
def predict_tta(self, X, aug_times=16):
pred=[]
for x_i in X:
sum_p=0
for i, d in enumerate(self.gene.flow(x_i[np.newaxis], batch_size=1)):
if i>=aug_times:
break
p=self.model.predict(d)[0]
sum_p+=p
pred.append(sum_p/aug_times)
return np.array(pred)
model=TTA_ModelWrapper(model)
py=model.predict_tta(pdata,aug_times=16)
# find the threshold for each class
datath=[]
for files in imglists[26411:]:
img=np.load(files)
img=cv2.resize(img,(IMAGE_DIMS[1],IMAGE_DIMS[0]),interpolation=cv2.INTER_AREA).astype(np.float32)/255
datath.append(img)
labels.Target[26411:]=np.array(labels.Target[26411:])
testYnth=nlb.fit_transform(labels.Target[26411:])
pred_metrix=model.predict_tta(datath,aug_times=16)
def f1_np(y_pred, y_true, threshold=0.5):
'''numpy f1 metric'''
y_pred = (y_pred>threshold).astype(int)
TP = (y_pred*y_true).sum(1)
prec = TP/(y_pred.sum(1)+1e-7)
rec = TP/(y_true.sum(1)+1e-7)
res = 2*prec*rec/(prec+rec+1e-7)
return res.mean()
def f1_n(y_pred, y_true, thresh, n, default=0.5):
'''partial f1 function for index n'''
threshold = default * np.ones(y_pred.shape[1])
threshold[n]=thresh
return f1_np(y_pred, y_true, threshold)
def find_thresh(y_pred, y_true):
'''brute force thresh finder'''
ths = []
for i in range(y_pred.shape[1]):
aux = []
for th in np.linspace(0,1,100):
aux += [f1_n(y_pred, y_true, th, i)]
ths += [np.array(aux).argmax()/100]
return np.array(ths)
ths = find_thresh(pred_metrix, testYnth)
print(ths)
# create submission
y=[]
for x in py:
l=np.arange(28)[x>=ths]
y.append(l)
ss['Predicted']=y
x=[]
for i in range(0,11702):
x.append('')
for i in range(0,11702):
for y in ss.Predicted[i]:
x[i]+=' '+str(y)
Y=[]
for i in range(0,11702):
Y.append(x[i].strip())
ss.Predicted=Y
ss.to_csv('submission.csv',index=False)
|
[
"b25940965@gmail.com"
] |
b25940965@gmail.com
|
e7a41ab6ff5efcaeebafaabd9037e6b8f9d2cadc
|
55cc60ae22e27b121383098aae7c7ab36f2bb64f
|
/exercices/045/solution.py
|
e87f5aab9db144e93f73f4b0492504c664388865
|
[] |
no_license
|
lnarbona/hackinscience
|
6ad804faa2a6ce0a9d14d070f1808b6851fcafa3
|
1497c33e29a4cfe8ac009ae1894661aacc95e5c3
|
refs/heads/master
| 2021-07-07T00:13:33.986964
| 2015-12-15T10:38:08
| 2015-12-15T10:38:08
| 43,556,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43
|
py
|
def sqrt(numero):
return numero**(0.5)
|
[
"lara.narbona@cri-paris.org"
] |
lara.narbona@cri-paris.org
|
df824d5307b1a45d4bdaaf6834c8633efa3b5d87
|
0a14400b5df85592b9732ef00c893f62a458554b
|
/src/main.py
|
aee09a0a7404ad5eb0498de67c44241deb38229f
|
[] |
no_license
|
a1rwulf/movlibgen
|
c193e19110164806b017074462079c43eb5ad8fc
|
b66c5c7e76084383846ef51d77b3ff60162bcb7c
|
refs/heads/master
| 2020-06-14T05:53:50.781062
| 2019-10-07T09:28:33
| 2019-10-07T09:28:33
| 75,224,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
import tmdbsimple as tmdb
import configparser
import time
import os
from datetime import datetime
config = configparser.ConfigParser()
config.read('movlibgen.cfg')
x = 'abcdefghijklmnopqrstuvwxyz'
path = config.get('CONFIG', 'PATH')
samplefile = config.get('CONFIG', 'SAMPLE')
tmdb.API_KEY = config.get('CONFIG', 'API_KEY')
search = tmdb.Search()
for i in x:
for p in range(1, 400):
response = search.movie(query=i, page=str(p), include_adult='no')
print (search.total_results)
print (search.page)
for s in search.results:
try:
movie_year = ''
if s['release_date'] != '':
dt = datetime.strptime(s['release_date'], '%Y-%m-%d')
movie_year = dt.year
fakemovie = str(s['title'])
fakemovie = fakemovie.replace(" ", "_")
fakemovie += "_"
fakemovie += str(movie_year)
fakemovie += ".ts"
fullsample = path
fullsample += "/"
fullsample += samplefile
fullfakemovie = path
fullfakemovie += "/"
fullfakemovie += fakemovie
print(str(s['title']), movie_year)
os.symlink(fullsample, fullfakemovie)
except Exception as e:
print ("Skip to next" + str(e))
time.sleep(1)
|
[
"haupt.wolfgang@gmail.com"
] |
haupt.wolfgang@gmail.com
|
5022b25cb069afe810d84ff795b402a89157ac94
|
20eb79521bfd5103dd088ef9ddd7b0e654b3e3f9
|
/Binary_search.py
|
4f3e93a7ce7e39189c0e13633c67fa90a8e0103d
|
[
"Apache-2.0"
] |
permissive
|
Sourav4yogi/Python
|
dfec0403b22f3d3df95397926b560889fad63d6c
|
53284da8982a76638bff230f3230c3adee5195d8
|
refs/heads/master
| 2023-05-26T11:54:49.872038
| 2023-05-20T14:47:12
| 2023-05-20T14:47:12
| 263,278,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
def binary_search(array,item):
high=len(array)-1
low=0
while high>=low:
mid=int((high+low)/2)
guess=array[mid]
if guess==item:
return mid
if guess>item:
high=mid-1
else:
low=mid+1
return None
array=[1,3,5,7,9]
print(binary_search(array, -1))
|
[
"noreply@github.com"
] |
noreply@github.com
|
7b68ba9a1d655a781e41cb1aeb3c4390cd69e5fa
|
3cf379c79764a0aa8bdd556b90ad3b1c287f8bba
|
/models/base/model_base.py
|
6afb121d0034490558d0d5ff5d81325b16c4d7fc
|
[] |
no_license
|
alexnwang/SketchEmbedNet-public
|
b2c5924ba02e0d01343e1adbbae63529d3a1c78a
|
a98b212f91c459b575546edebf127c7ade85fb4e
|
refs/heads/master
| 2023-01-10T00:14:03.945769
| 2020-11-06T19:59:27
| 2020-11-06T19:59:27
| 285,636,393
| 11
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,015
|
py
|
import os
import tensorflow as tf
class BaseModel(object):
"""
Basic model
"""
def __init__(self, base_dir, model_id):
self._base_dir = base_dir
self._dir = os.path.join(base_dir, model_id)
self._summary_dir = os.path.join(self._dir, "tfsummary", self.__class__.__name__)
self._sampling_dir = os.path.join(self._dir, "sampling", self.__class__.__name__)
def train(self, *args, **kwargs):
raise NotImplementedError
def test(self, *args, **kwargs):
raise NotImplementedError
class TrainableModel(BaseModel):
"""
Iteratively trained model
"""
def __init__(self, base_dir, model_id, training, ckpt=None):
super(TrainableModel, self).__init__(base_dir, model_id)
self.training = training
self._ckpt = ckpt
# ----- Directory Flags ----- #
self._checkpoint_dir = os.path.join(self._dir, "checkpoints", self.__class__.__name__)
# ----- Summary Writer ----- #
if self.training:
self._writer = tf.summary.create_file_writer(self._summary_dir)
else:
None
# ----- Build Model ----- #
self._build_model()
# ----- Checkpoint Model ----- #
self._checkpoint_model()
def _build_model(self):
raise NotImplementedError
def _checkpoint_model(self):
raise NotImplementedError
def train(self, train_dataset, train_steps, print_freq, save_freq, eval_dataset=None, eval_freq=None):
raise NotImplementedError
def evaluate(self, step, eval_dataset):
raise NotImplementedError
def test(self, test_dataset, result_name, steps=None):
raise NotImplementedError
def forward(self, *args, **kwargs):
raise NotImplementedError
def _write_summaries(self, step, summaries_dict):
for key in summaries_dict:
tf.summary.scalar(key, summaries_dict[key], step=step)
|
[
"alexn.wang@mail.utoronto.ca"
] |
alexn.wang@mail.utoronto.ca
|
7933f46b513502a6a56250aef30b1525cc20a4e3
|
a15b9fe01e2edb0c126d926e2f5ad4d0e2e5c33f
|
/listings/urls.py
|
9bdb96448ff8143ffb269c01e05598c474c24417
|
[] |
no_license
|
zohaibk22/realestate-with-django
|
6abea13e49156ef4fba7a4918bd34dd54c07c3c4
|
ff0ab708a36b058adf02632f3a5ec3db0b94703c
|
refs/heads/master
| 2023-05-05T07:28:45.241477
| 2021-05-28T02:38:36
| 2021-05-28T02:38:36
| 347,839,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name='listings'),
path('<int:listings_id>', views.listing, name='listing'),
path('search', views.search, name='search' ),
]
|
[
"zohaibk1111@gmail.com"
] |
zohaibk1111@gmail.com
|
c721a92ac5444111678c7fa54101abe580bfdaa5
|
d4cd0a2c56c697bf7637ac8941d694eb515dce96
|
/CTDLGT_Python/bt12_cài_đặt_đồ_thị_vô_hướng.py
|
0d9839adbc3724a3e61301d0eeea656835c57726
|
[] |
no_license
|
ldthinh220802/THI_CTDLGT
|
d456796321d9377201916e84211f8f9ae255406e
|
2c1c6865a9d5d59b83d983de46289323ecbb75b7
|
refs/heads/main
| 2023-06-25T15:53:38.180421
| 2021-07-19T06:21:27
| 2021-07-19T06:21:27
| 384,970,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
# -*- coding: utf-8 -*-
"""BT12. Cài đặt đồ thị vô hướng
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/ldthinh220802/cau_truc_du_lieu_va_giai_thuat/blob/main/BT12_C%C3%A0i_%C4%91%E1%BA%B7t_%C4%91%E1%BB%93_th%E1%BB%8B_v%C3%B4_h%C6%B0%E1%BB%9Bng.ipynb
"""
import networkx as nx
import matplotlib.pyplot as plt
G = nx.DiGraph()
G.add_edges_from([('A','B'),('B','C'),('B','D'),('D','C')])
# Specify the edges you want here
red_edges = [('A', 'C')]
edge_colours = ['black' if not edge in red_edges else 'red'
for edge in G.edges()]
black_edges = [edge for edge in G.edges() if edge not in red_edges]
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G, pos, cmap=plt.get_cmap('jet'),
node_size = 500)
nx.draw_networkx_labels(G, pos)
nx.draw_networkx_edges(G, pos, edgelist=black_edges, arrows=False)
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
cad9054648026ec7176604a02079681b1fb1d29b
|
0e1bfd89a58f679490a7bc03c113bd37b4cb3ec0
|
/SVAGP/kernels.py
|
32c351e0503f025993845fc7a605ea9896a72c2b
|
[
"Apache-2.0"
] |
permissive
|
UCL/SVAGP
|
8014f47954ea7d2c210c8fd9a73cc459d0433c74
|
d8a9d9d4b87f317ef9b3c74aa39a2b30a488ab35
|
refs/heads/master
| 2021-01-23T10:35:53.170429
| 2017-06-01T17:14:24
| 2017-06-01T17:14:24
| 93,076,432
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,921
|
py
|
# Copyright 2016 James Hensman, alexggmatthews
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------
# Modification notice:
# This file was modified by Vincent ADAM
# ------------------------------------------
import tensorflow as tf
import numpy as np
from settings import int_type
class Kern(object):
"""
The basic kernel class. Handles input_dim and active dims, and provides a
generic '_slice' function to implement them.
"""
def __init__(self, input_dim, active_dims=None):
"""
input dim is an integer
active dims is either an iterable of integers or None.
Input dim is the number of input dimensions to the kernel. If the
kernel is computed on a matrix X which has more columns than input_dim,
then by default, only the first input_dim columns are used. If
different columns are required, then they may be specified by
active_dims.
If active dims is None, it effectively defaults to range(input_dim),
but we store it as a slice for efficiency.
"""
self.input_dim = int(input_dim)
if active_dims is None:
self.active_dims = slice(input_dim)
elif type(active_dims) is slice:
self.active_dims = active_dims
if active_dims.start is not None and active_dims.stop is not None and active_dims.step is not None:
assert len(range(*active_dims)) == input_dim # pragma: no cover
else:
self.active_dims = np.array(active_dims, dtype=np.int32)
assert len(active_dims) == input_dim
self.num_gauss_hermite_points = 20
def _slice(self, X, X2):
"""
Slice the correct dimensions for use in the kernel, as indicated by
`self.active_dims`.
:param X: Input 1 (NxD).
:param X2: Input 2 (MxD), may be None.
:return: Sliced X, X2, (Nxself.input_dim).
"""
if isinstance(self.active_dims, slice):
X = X[:, self.active_dims]
if X2 is not None:
X2 = X2[:, self.active_dims]
else:
X = tf.transpose(tf.gather(tf.transpose(X), self.active_dims))
if X2 is not None:
X2 = tf.transpose(tf.gather(tf.transpose(X2), self.active_dims))
with tf.control_dependencies([
tf.assert_equal(tf.shape(X)[1], tf.constant(self.input_dim, dtype=int_type))
]):
X = tf.identity(X)
return X, X2
def _slice_cov(self, cov):
"""
Slice the correct dimensions for use in the kernel, as indicated by
`self.active_dims` for covariance matrices. This requires slicing the
rows *and* columns. This will also turn flattened diagonal
matrices into a tensor of full diagonal matrices.
:param cov: Tensor of covariance matrices (NxDxD or NxD).
:return: N x self.input_dim x self.input_dim.
"""
cov = tf.cond(tf.equal(tf.rank(cov), 2), lambda: tf.matrix_diag(cov), lambda: cov)
if isinstance(self.active_dims, slice):
cov = cov[..., self.active_dims, self.active_dims]
else:
cov_shape = tf.shape(cov)
covr = tf.reshape(cov, [-1, cov_shape[-1], cov_shape[-1]])
gather1 = tf.gather(tf.transpose(covr, [2, 1, 0]), self.active_dims)
gather2 = tf.gather(tf.transpose(gather1, [1, 0, 2]), self.active_dims)
cov = tf.reshape(tf.transpose(gather2, [2, 0, 1]),
tf.concat_v2([cov_shape[:-2], [len(self.active_dims), len(self.active_dims)]], 0))
return cov
class Stationary(Kern):
"""
Base class for kernels that are stationary, that is, they only depend on
r = || x - x' ||
This class handles 'ARD' behaviour, which stands for 'Automatic Relevance
Determination'. This means that the kernel has one lengthscale per
dimension, otherwise the kernel is isotropic (has a single lengthscale).
"""
def __init__(self, input_dim, variance=1.0, lengthscales=1.,
active_dims=None):
"""
- input_dim is the dimension of the input to the kernel
- variance is the (initial) value for the variance parameter
- lengthscales is the initial value for the lengthscales parameter
defaults to 1.0
- active_dims is a list of length input_dim which controls which
columns of X are used.
"""
Kern.__init__(self, input_dim, active_dims)
self.lengthscales = tf.get_variable("lengthscales", [input_dim], initializer=tf.constant_initializer(lengthscales))
self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance))
def square_dist(self, X, X2):
X = X / self.lengthscales
Xs = tf.reduce_sum(tf.square(X), 1)
if X2 is None:
return -2 * tf.matmul(X, tf.transpose(X)) + \
tf.reshape(Xs, (-1, 1)) + tf.reshape(Xs, (1, -1))
else:
X2 = X2 / self.lengthscales
X2s = tf.reduce_sum(tf.square(X2), 1)
return -2 * tf.matmul(X, tf.transpose(X2)) + \
tf.reshape(Xs, (-1, 1)) + tf.reshape(X2s, (1, -1))
def euclid_dist(self, X, X2):
r2 = self.square_dist(X, X2)
return tf.sqrt(r2 + 1e-12)
def Kdiag(self, X, presliced=False):
return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance))
class RBF(Stationary):
"""
The radial basis function (RBF) or squared exponential kernel
"""
def K(self, X, X2=None, presliced=False):
if not presliced:
X, X2 = self._slice(X, X2)
return self.variance * tf.exp(-self.square_dist(X, X2) / 2)
class PeriodicKernel(Kern):
"""
The periodic kernel. Defined in Equation (47) of
D.J.C.MacKay. Introduction to Gaussian processes. In C.M.Bishop, editor,
Neural Networks and Machine Learning, pages 133--165. Springer, 1998.
Derived using the mapping u=(cos(x), sin(x)) on the inputs.
"""
def __init__(self, input_dim, period=1.0, variance=1.0,
lengthscales=1.0, active_dims=None):
Kern.__init__(self, input_dim, active_dims)
self.lengthscales = tf.get_variable("lengthscales", [input_dim], initializer=tf.constant_initializer(lengthscales))
self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance))
self.period = tf.get_variable("period", [1], initializer=tf.constant_initializer(period))
def Kdiag(self, X, presliced=False):
return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance))
def K(self, X, X2=None, presliced=False):
if not presliced:
X, X2 = self._slice(X, X2)
if X2 is None:
X2 = X
# Introduce dummy dimension so we can use broadcasting
f = tf.expand_dims(X, 1) # now N x 1 x D
f2 = tf.expand_dims(X2, 0) # now 1 x M x D
r = np.pi * (f - f2) / self.period
r = tf.reduce_sum(tf.square(tf.sin(r) / self.lengthscales), 2)
return self.variance * tf.exp(-0.5 * r)
class LocallyPeriodicKernel(Kern):
"""
k(t) = var * exp ( - t^2 / len^2 ) * cos ( 2 * pi * t / per )
"""
def __init__(self, input_dim, period=1.0, variance=1.0,
lengthscales=1.0, active_dims=None):
Kern.__init__(self, input_dim, active_dims)
self.lengthscales = tf.get_variable("lengthscales", [input_dim], initializer=tf.constant_initializer(lengthscales))
self.variance = tf.get_variable("variance", [1], initializer=tf.constant_initializer(variance))
self.period = tf.get_variable("period", [1], initializer=tf.constant_initializer(period))
def Kdiag(self, X, presliced=False):
return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.variance))
def K(self, X, X2=None, presliced=False):
if not presliced:
X, X2 = self._slice(X, X2)
if X2 is None:
X2 = X
# Introduce dummy dimension so we can use broadcasting
f = tf.expand_dims(X, 1) # now N x 1 x D
f2 = tf.expand_dims(X2, 0) # now 1 x M x D
r = tf.reduce_sum(f-f2,2) #hack for 1d
return self.variance * tf.exp( - tf.square(r/self.lengthscales) ) * tf.cos(2.*np.pi *r/ self.period)
|
[
"vincent.adam87@gmail.com"
] |
vincent.adam87@gmail.com
|
3d5e6311fe9aa6c00f6660d25bea7056cf7cb256
|
e48e655f69466f8a4a1bff0cf2f51d7f4fe48065
|
/venv/Scripts/pip3-script.py
|
bb18517866e77e527bae5f4fec198eac2aec8783
|
[] |
no_license
|
Panoramikes/Lista2
|
38f6d364a0ece07d432a04078dc9d079552fca83
|
752b5bf32282ef2aa9af3765c2770273c0535b2e
|
refs/heads/master
| 2020-04-30T12:09:27.352950
| 2019-03-20T21:29:57
| 2019-03-20T21:29:57
| 176,819,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
#!C:\Users\panor\PycharmProjects\Lista2\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"panoramikes@gmail.com"
] |
panoramikes@gmail.com
|
7888c84676fc0e2cc852fcbafaf9c206231c3e44
|
04f2b23dc29b024711b297104eb57e8b7943ac28
|
/dmdashboard/migrations/0006_auto_20170615_2130.py
|
99c42c4fd3c2b250b02a890b2c808fef51245575
|
[] |
no_license
|
acaggiano/dmtools
|
9e6e2609ae7e63369c4339b43bf17225c49f51db
|
e0969955ccdc0a45ff18bfe7ddca79283b408c65
|
refs/heads/master
| 2022-12-16T03:02:43.734781
| 2019-07-11T18:36:00
| 2019-07-11T18:36:00
| 94,246,208
| 1
| 0
| null | 2022-12-08T05:18:47
| 2017-06-13T18:44:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 697
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-16 01:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dmdashboard', '0005_auto_20170615_2122'),
]
operations = [
migrations.AlterField(
model_name='character',
name='alignment',
field=models.TextField(blank=True, choices=[('LG', 'Lawful Good'), ('NG', 'Neutral Good'), ('CG', 'Chaotic Good'), ('LN', 'Lawful Neutral'), ('N', 'True Neutral'), ('CN', 'Chaotic Neutral'), ('LE', 'Lawful Evil'), ('NE', 'Neutral Evil'), ('CE', 'Chaotic Evil')], null=True),
),
]
|
[
"onaiggaca@gmail.com"
] |
onaiggaca@gmail.com
|
cd5fc1a0531d8ded0b2c72c3befd66f483a78b86
|
f0151c7e52ac2c88e9bdf74767f1f1727f6789d0
|
/pieces.py
|
6b2894e5936438d720a7e3afc081530fb70187d7
|
[] |
no_license
|
lyulka/NFTSOI-Chess-Engine
|
fcb7e3e37e7d8d79456c9c1b0a84f6d1c49dddb2
|
c566f1e64a210ae67635107b3cd273fa9aba04cc
|
refs/heads/main
| 2023-01-23T10:21:08.646569
| 2020-11-28T19:38:16
| 2020-11-28T19:38:16
| 316,014,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,422
|
py
|
from move import Move
from coord import Coord
class Empty:
def __str__(self):
return u'\u25c7'
class Piece:
def __init__(self, color):
self.color = color
def is_enemy(self, other: 'Piece'):
if type(other) == Empty:
return False
return self.color != other.color
def is_friend(self, other: 'Piece'):
if type(other) == Empty:
return False
return self.color == other.color
class Pawn(Piece):
def __str__(self):
if self.color == 'b': return u'\u2659'
return u'\u265f'
# Valid move generation is used strictly to generate moves
# for the AI player. The validity of moves inputted by the
# human player is checked in Move.is_valid.
def valid_moves(self, board: 'Chessboard', from_c: 'Coord'):
moves = []
if self.color == 'w':
# Pawn is in starting position
if (from_c.y == 6 and board.empty_in(Coord(5, from_c.x))
and board.empty_in(Coord(4, from_c.x))):
moves.append(Move(from_c, Coord(4, from_c.x))) # Move forward 2 steps
if board.empty_in(Coord(from_c.y - 1, from_c.x)):
moves.append(Move(from_c, Coord(from_c.y - 1, from_c.x))) # Move forward 1 step
# Attacking north-west
t_y, t_x = from_c.y - 1, from_c.x - 1
if (Coord.is_in_bounds(t_y, t_x)
and self.is_enemy(board.piece_in(Coord(t_y, t_x)))):
moves.append(Move(from_c, Coord(t_y, t_x)))
# Attacking north-east
t_y, t_x = from_c.y - 1, from_c.x + 1
if (Coord.is_in_bounds(t_y, t_x)
and self.is_enemy(board.piece_in(Coord(t_y, t_x)))):
moves.append(Move(from_c, Coord(t_y, t_x)))
else: # self.color == 'b'
if (from_c.y == 1 and board.empty_in(Coord(2, from_c.x))
and board.empty_in(Coord(3, from_c.x))):
moves.append(Move(from_c, Coord(3, from_c.x)))
if board.empty_in(Coord(from_c.y + 1, from_c.x)):
moves.append(Move(from_c, Coord(from_c.y + 1, from_c.x)))
# Attacking south-west
t_y, t_x = from_c.y + 1, from_c.x - 1
if (Coord.is_in_bounds(t_y, t_x)
and self.is_enemy(board.piece_in(Coord(t_y, t_x)))):
moves.append(Move(from_c, Coord(t_y, t_x)))
# Attacking south-east
t_y, t_x = from_c.y + 1, from_c.x + 1
if (Coord.is_in_bounds(t_y, t_x)
and self.is_enemy(board.piece_in(Coord(t_y, t_x)))):
moves.append(Move(from_c, Coord(t_y, t_x)))
return moves
class Knight(Piece):
def __str__(self):
if self.color == 'b': return u'\u2658'
return u'\u265e'
def valid_moves(self, board: 'Chessboard', from_c: 'Coord'):
moves = []
# Enumerating possible moves clockwise, starting from North
possible_targets = (
(from_c.y - 2, from_c.x + 1),
(from_c.y - 1, from_c.x + 2),
(from_c.y + 1, from_c.x + 2),
(from_c.y + 2, from_c.x + 1),
(from_c.y + 2, from_c.x - 1),
(from_c.y + 1, from_c.x - 2),
(from_c.y - 1, from_c.x - 2),
(from_c.y - 2, from_c.x - 1),
)
for t_y, t_x in possible_targets:
if (Coord.is_in_bounds(t_y, t_x)
and (board.empty_in(Coord(t_y, t_x)) or board.piece_in(Coord(t_y, t_x)).is_enemy(self))):
moves.append(Move(from_c, Coord(t_y, t_x)))
return moves
class Bishop(Piece):
def __str__(self):
if self.color == 'b': return u'\u2657'
return u'\u265d'
def valid_moves(self, board: 'Chessboard', from_c: 'Coord'):
moves = []
offsets = (
(-1, 1), # Move north-east
(1, 1), # Move south-east
(1, -1), # Move south-west
(-1, -1), # Move north-west
)
for offset in offsets:
t_y, t_x = from_c.y, from_c.x
while True:
t_y, t_x = t_y + offset[0], t_x + offset[1]
# Exceeded bounds
if not Coord.is_in_bounds(t_y, t_x):
break
t_c = Coord(t_y, t_x)
move = Move(from_c, t_c)
# Nobody here
if board.empty_in(t_c):
moves.append(move)
continue
# Bump into enemy
if board.piece_in(t_c).is_enemy(self):
moves.append(move)
break
# Bump into friend
if board.piece_in(t_c).is_friend(self):
break
return moves
class Rook(Piece):
def __str__(self):
if self.color == 'b': return u'\u2656'
return u'\u265c'
def valid_moves(self, board: 'Chessboard', from_c: 'Coord'):
moves = []
offsets = (
(-1, 0), # Move north
(0, 1), # Move east
(1, 0), # Move south
(0, -1), # Move west
)
for offset in offsets:
t_y, t_x = from_c.y, from_c.x
while True:
t_y, t_x = t_y + offset[0], t_x + offset[1]
# Exceeded bounds
if not Coord.is_in_bounds(t_y, t_x):
break
t_c = Coord(t_y, t_x)
move = Move(from_c, t_c)
# Nobody here
if board.empty_in(t_c):
moves.append(move)
continue
# Bump into enemy
if board.piece_in(t_c).is_enemy(self):
moves.append(move)
break
# Bump into friend
if board.piece_in(t_c).is_friend(self):
break
return moves
class Queen(Piece):
def __str__(self):
if self.color == 'b': return u'\u2655'
return u'\u265b'
def valid_moves(self, board: 'Chessboard', from_c: 'Coord'):
dummy_bishop = Bishop(self.color)
dummy_rook = Rook(self.color)
# A Queen's valid_moves is the union of a Bishop and a Rook's valid moves
moves = dummy_bishop.valid_moves(board, from_c) \
+ dummy_rook.valid_moves(board, from_c)
return moves
class King(Piece):
def __str__(self):
if self.color == 'b': return u'\u2654'
return u'\u265a'
def valid_moves(self, board: 'Chessboard', from_c: 'Coord'):
moves = []
# Enumerating possible moves clockwise, starting from north
possible_targets = (
(from_c.y - 1, from_c.x),
(from_c.y - 1, from_c.x + 1),
(from_c.y, from_c.x + 1),
(from_c.y + 1, from_c.x + 1),
(from_c.y + 1, from_c.x),
(from_c.y + 1, from_c.x - 1),
(from_c.y, from_c.x - 1),
(from_c.y + 1, from_c.x - 1)
)
for t_y, t_x in possible_targets:
if (Coord.is_in_bounds(t_y, t_x)
and (board.empty_in(Coord(t_y, t_x)) or board.piece_in(Coord(t_y, t_x)).is_enemy(self))):
moves.append(Move(from_c, Coord(t_y, t_x)))
return moves
|
[
"strelka@connect.hku.hk"
] |
strelka@connect.hku.hk
|
0b2eda8e021bc7530e43f336462b37a03d23ece8
|
604d1065eb2f098a4873fd9470cc7850d5bde86a
|
/datet.py
|
f8b4297fa2705d8337f2755419924f8bd3c5cf1b
|
[] |
no_license
|
rjayajadhav/Python-code
|
a05117a339b3b8591141374aa061700109de4577
|
c734fa83696779c9eb2a893b593813369eb84e10
|
refs/heads/main
| 2023-07-15T11:27:16.997616
| 2021-08-28T16:12:42
| 2021-08-28T16:12:42
| 392,303,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
import json
# some JSON:
x = '{ "name":"John", "age":30, "city":"New York"}'
z = '{ "name":"Jaya", "age":35, "city":"Pune"}'
# parse x:
y = json.loads(x)
n= json.loads(z)
# the result is a Python dictionary:
print(y["age"])
print(y["city"])
print(n["age"])
|
[
"noreply@github.com"
] |
noreply@github.com
|
8ac0480670678ce2f641aae18ee7719838e5f722
|
d30c6d691a34fc9181fb71e9712b9505384422ec
|
/数字,日期和时间/分数的计算_P96.py
|
be37c7b9b724074146f45662cb34e480751597bf
|
[] |
no_license
|
shishengjia/PythonDemos
|
cef474eb01ee9541ba0c70fc0750ee48a025f42f
|
c0a857b1cacdbb2b6b727a84f95f93b6e86d60c2
|
refs/heads/master
| 2021-01-01T16:15:19.593635
| 2017-10-26T07:18:46
| 2017-10-26T07:18:46
| 97,797,104
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
"""
fractions模块
"""
from fractions import Fraction
a = Fraction(1, 2)
b = Fraction(3, 4)
print(a + b) # 5/4
c = a + b
print(c.numerator) # 5 分子
print(c.denominator) # 4 分母
print(float(c)) # 1.25 转化为小数
print(c.limit_denominator(8))
x = 0.625
print(Fraction(*x.as_integer_ratio())) # 5/8 小数化分数
|
[
"shishengjia1@live.com"
] |
shishengjia1@live.com
|
2185d6664cc16b654ee558a6645234eab7ea8521
|
a3fdebd406a37da34561969f37ea7a5feb14f236
|
/src/testMorphology.py
|
df8d5f39a001e32fe92548e49e0c52c0abdc1df0
|
[] |
no_license
|
nikifaets/pointsProcessing
|
e076976914f2dbd51fbb64fa57194b47a3c56f87
|
74f2422c9a2117fc40bad6650cf97110f3b29c40
|
refs/heads/master
| 2021-09-12T10:39:13.233534
| 2018-04-15T19:32:02
| 2018-04-15T19:32:02
| 107,880,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,599
|
py
|
import cv2
import numpy as np
import extractor as ext
import findLines as fl
import laserFindPoints as lfp
import time
cap = cv2.VideoCapture(0)
cap.set(3,240)
cap.set(4,320)
projecting = False
while(True):
millis = int(round(time.time() * 1000))
ret, img = cap.read()
thresh,grayscale = lfp.threshImage(img)
kernel = np.ones((4,4), np.uint8)
kernel_open = np.ones((2,2), np.uint8)
erosion = cv2.erode(thresh, kernel, 1)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel_open)
dilation = cv2.dilate(opening, kernel, 1)
connectivity = 4
output = cv2.connectedComponentsWithStats(dilation, connectivity, cv2.CV_16U)
num_labels = output[0]
centroids = output[3]
#print(num_labels)
points = np.zeros((thresh.shape[0], thresh.shape[1], 1), np.uint8)
for i in centroids:
#print(i[1], i[0])
points.itemset((np.int(i[1]), np.int(i[0]), 0), 255)
#pointsList, draft = fl.getPoints(dilation, thresh.shape[1], thresh.shape[0])
#centroids = sorted(centroids, key = lambda point: point[1], reverse = False)
#pointsList = sorted(pointsList, key = lambda point: point.y, reverse = False)
#print(num_labels, len(pointsList))
cv2.imshow("img", img)
cv2.imshow("grayscale", grayscale)
cv2.imshow("thresh", thresh)
cv2.imshow("erosion", erosion)
cv2.imshow("opening", opening)
cv2.imshow("points", points)
cv2.imshow("dilation", dilation)
#cv2.imshow("draft", draft)
millisnew = int(round(time.time() * 1000))
#print(millisnew-millis)
k = cv2.waitKey(1)
if(k == 113):
#cv2.imwrite("bfsslow.jpg", thresh)
#print(centroids, pointsList)
break
|
[
"nikifaets11@gmail.com"
] |
nikifaets11@gmail.com
|
0b036f6d5d139798df7f73985ec061a219bf7a22
|
fcfbe523bb3d7cb6d36822a7f01d0a59410972a0
|
/7kyu/Help Bob count letters and digits.py
|
41ea21297356a09afde2a2c545289164dced0a3f
|
[] |
no_license
|
HighHopes/codewars
|
6239803d5255a9ab81c50222b1b1bc9293bc2fd9
|
65bcda121ec09d6d141cdf682ed17d223a0eee4e
|
refs/heads/master
| 2020-04-28T19:21:47.217399
| 2020-03-11T17:25:33
| 2020-03-11T17:25:33
| 175,502,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
"""Bob is a lazy man.
He needs you to create a method that can determine how many letters and digits are in a given string.
example:
"hel2!lo" --> 6
"wicked .. !" --> 6"""
import re
def count_letters_and_digits(s):
if s == None: return 0
return len(re.findall("[a-zA-Z0-9]", str(s)))
print(count_letters_and_digits("asdf!@A#12sd")) # 9
|
[
"oprisaalin@gmail.com"
] |
oprisaalin@gmail.com
|
f0532a41a963e81aeff001bdd8f8af1e6ba68351
|
d7be95f0099bc32bf6fd45589fda8dd7b1b0337f
|
/mysite/urls.py
|
34d5b8ae8dd9e842f55e6be355e7d438682581ce
|
[] |
no_license
|
Rafael-Wassoaski/projetoProntodePPi
|
5c3df7b53ea51ed63f3b5d558a3e414ab8f23e0c
|
e73618e2d3ec761a7964523d6a552e1a7cbd5947
|
refs/heads/master
| 2022-11-27T17:41:13.300843
| 2019-06-25T20:05:46
| 2019-06-25T20:05:46
| 193,741,206
| 0
| 0
| null | 2022-11-22T02:23:52
| 2019-06-25T16:05:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from mysite import settings
from django.contrib.auth import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('quiz/', include('quiz.urls')),
path('accounts/', include('accounts.urls')),
path('accounts/login/', views.LoginView.as_view(), name='login'),
path('accounts/logout/', views.LogoutView.as_view(), name='logout'),
path('accounts/', include('django.contrib.auth.urls')),
]
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"meireles200@hotmail.com"
] |
meireles200@hotmail.com
|
07b373d67b8a0825866ba2663627a162bf88e47e
|
abb94cfc69c7d83842fd4213d0ce16b33c8e9a3d
|
/venv/Scripts/easy_install-3.7-script.py
|
cc213bc4edb9eaa8697344a69f26ff68584ab052
|
[] |
no_license
|
TigranMelkonian/PersonalWebPage
|
5bc93ee7e17aa496ac1c84fd5f12a92fd05f9148
|
d975fba7dc1f665836bd3b19572862719e60099d
|
refs/heads/master
| 2020-09-17T07:52:17.410856
| 2019-11-25T21:26:37
| 2019-11-25T21:26:37
| 224,039,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
#!C:\Users\pete\PycharmProjects\MyFirstPythonWebApp\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"tigran@spotted.us"
] |
tigran@spotted.us
|
222904f660559d82269ebc7563fc3be1f7d00b69
|
9e0f5fac5711e69477770deb30dbaa53e8a5b961
|
/example.py
|
ffc8c0c13b85f4c356e40945cbce0754de887eb8
|
[
"MIT"
] |
permissive
|
qerty123/Vector
|
b462813f106e4e2c0a888e2c980dded442c3e2cb
|
9f065d34b0aa8f4a0ffb6c85c32ed941009c3960
|
refs/heads/master
| 2020-12-11T12:45:59.747431
| 2020-02-08T09:51:19
| 2020-02-08T09:51:19
| 233,853,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
# Copyright (c) 2020 Kapitonov Stanislav <delphi.troll@mail.ru>
from vector import Vector2d, Vector3d
v1 = Vector3d(3, -1, 2)
v2 = Vector3d(-1, 2, 1)
v3 = Vector3d(0, -2, 1)
v4 = Vector3d(-1, 1, 3)
dif = v3 - v4
# Find projection of vector v3 - v4 for vector v2
pr = dif.project(v2)
print('Projection of vector v3 - v4 for vector v2: ' + pr)
# Find space of triangle with sides v1 and v2
s = v1 * v2 / 2
print('Space of triangle with sides v1 and v2: ' + s)
# Mixed production of v1, v2 and dif
mp = v1 * v2 * dif
print('Mixed production of v1, v2 and dif: ' + mp)
|
[
"delphi.troll@mail.ru"
] |
delphi.troll@mail.ru
|
0ce4d0fdfbcbdcb08fd020f84fdb01abca1796f9
|
42685605f569e9d0afadc358ace6ce212e86bf1c
|
/1_Zadania/Dzien_3/5_Virtualenv_biblioteki/ddd.py
|
63411dd73ff18565b3727122a9f9e1681374153c
|
[] |
no_license
|
Danutelka/Coderslab-Podstawy-Python
|
e6bdbbd9dc2031cf9ec5d9d3eeba717d22e9ecd7
|
eed6a957f081e488ae3c94298718f7b93e17a93c
|
refs/heads/master
| 2020-08-04T15:21:28.058433
| 2019-04-07T19:24:54
| 2019-04-07T19:24:54
| 212,181,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62
|
py
|
import requests
r = requests.get("http://onet.pl")
print(r)
|
[
"kawecka.d@gmail.com"
] |
kawecka.d@gmail.com
|
ba5b5c54db62dfe9833b5715954dced8bc4760f2
|
ad53e0cb71d59c1a6708c84cbb5e712c2c695fa9
|
/weblog/app/models.py
|
a1da6f3223631c7b533278a246c784ec1a87915b
|
[] |
no_license
|
lyn233/DOBlog
|
304e1d030c2f05d94753584954321775ee4be764
|
2c827c0363d86dcb04c82f7bd9ae43da9a60fea9
|
refs/heads/master
| 2021-01-10T07:41:48.801034
| 2017-12-07T05:59:58
| 2017-12-07T05:59:58
| 45,041,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,356
|
py
|
# -*- coding:utf-8 -*-
from weblog.app import db
from flask_login import UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from werkzeug.security import generate_password_hash, check_password_hash
from flask import current_app,url_for
import bleach
from markdown import markdown
from .exceptions import ValidationError
class Permission():
WRITE_ARTICLES = 0x01
ADMINISTER = 0x02
class Role(db.Model):
__tablename__ = 'Role'
id = db.Column(db.Integer, primary_key=True)
role_name = db.Column(db.String(50), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
# user = db.relationship('Role', backref='role', lazy='dynamic')
# role_id = db.Column(db.Integer, db.ForeignKey('User.id'))
class User(UserMixin, db.Model):
__tablename__ = 'User'
def __init__(self, name, email, password_hash):
self.name = name
self.email = email
self.password_hash = password_hash
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
address = db.Column(db.String(128), index=True)
confirmed = db.Column(db.Boolean, default=False)
posts = db.relationship('Post', backref='author', lazy='dynamic')
# role_id = db.Column(db.Integer, db.ForeignKey('Role.id'))
#加盐加密生成与验证
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
#生成验证令牌
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
db.session.commit()
return True
#提交数据库
def save(self):
db.session.add(self)
db.session.commit()
return self
def to_json(self):
json_user = {
'username': self.name
}
return json_user
def create_user(name, email, password_hash):
user = User(name, email, password_hash)
user.password = password_hash
user.save()
return user
class Tag(db.Model):
__tablename__ = 'Tag'
id = db.Column(db.Integer, primary_key=True)
tag_name = db.Column(db.String(50))
tag_count = db.Column(db.Integer)
post = db.relationship('Post', backref='tag', lazy='dynamic')
@staticmethod
def from_json(json_post):
tag_name = json_post.get('tag_name')
return tag_name
#return Tag(tag_name=tag_name)
class Template(db.Model):
__tablename__ = 'Template'
id = db.Column(db.Integer, primary_key=True)
tem_body = db.Column(db.Text)
class Post(db.Model):
__tablename__ = 'Post'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100))
body = db.Column(db.Text)
summary = db.Column(db.Text)
post_time = db.Column(db.DateTime)
author_id = db.Column(db.Integer, db.ForeignKey('User.id'))
tag_id = db.Column(db.Integer, db.ForeignKey('Tag.id'))
body_html = db.Column(db.Text)
summary_html = db.Column(db.Text)
def to_json(self):
json_post = {
'url': url_for('api.get_post', id=self.id, _external=True),
'title': self.title,
'body': self.body,
'summary': self.summary,
'post_time': self.post_time
}
return json_post
@staticmethod
def from_json(json_post):
body = json_post.get('body')
title = json_post.get('title')
summary = json_post.get('summary')
if body is None or body == '':
raise ValidationError('post does not have a body')
return Post(body=body, title=title, summary=summary)
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code', 'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul','h1', 'h2', 'h3', 'p']
target.body_html = bleach.linkify(bleach.clean(markdown(value, output_format='html'), tags=allowed_tags, strip=True))
@staticmethod
def on_changed_summary(target,value,oldvalue,initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code', 'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul','h1', 'h2', 'h3', 'p']
target.summary_html= bleach.linkify(bleach.clean(markdown(value, output_format='html'), tags=allowed_tags, strip=True))
db.event.listen(Post.body, 'set', Post.on_changed_body)
db.event.listen(Post.summary, 'set', Post.on_changed_summary)
|
[
"daiguanlin@126.com"
] |
daiguanlin@126.com
|
1060e48d0bb83bb3216ccdf445a1b5b2cea8077e
|
303b3e0447e66c22471d82395ec1302ba6622a92
|
/python/query-stub.py
|
1dc5ac8bc7597200fd71af77e2354c39e42a126c
|
[] |
no_license
|
shuque/getdns-examples
|
b83b2feaab9fa7cb58033184b40be9f7b2091281
|
735c4a356f13ece075b27112d54caf1af8359452
|
refs/heads/master
| 2021-01-10T03:16:30.982938
| 2016-02-01T18:09:28
| 2016-02-01T18:09:28
| 47,571,878
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
#!/usr/bin/env python
import getdns, sys
hostname = sys.argv[1]
ctx = getdns.Context()
ctx.resolution_type = getdns.RESOLUTION_STUB
extensions = {}
results = ctx.address(name=hostname, extensions=extensions)
if results.status == getdns.RESPSTATUS_GOOD:
for addr in results.just_address_answers:
print(addr["address_data"])
elif results.status == getdns.RESPSTATUS_NO_NAME:
print("%s: No such domain name" % hostname)
else:
print("getdns.address() returned an error: %d" % results.status)
|
[
"shuque@gmail.com"
] |
shuque@gmail.com
|
d8b44b3e7c654ab09fe42ab83ec0937b19133d4d
|
cabc3bcc0f6fedc7e8f2cd011892e43f1fb80a92
|
/CustomErrors.py
|
ecfb6c9725bb7bb3f8dbe91be0e6e30e94c76627
|
[] |
no_license
|
nikist97/CodingChallenge
|
a3c87d33bab1b8d2fd4f46cdfeda78bffedbcfd7
|
db26c2473d8478386a6625b88b17cd2648a94131
|
refs/heads/master
| 2021-08-11T13:09:30.049882
| 2017-11-13T19:07:01
| 2017-11-13T19:07:01
| 110,493,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
class InvalidPriceError(ValueError):
"""
a custom type of error raised when there is an invalid price for a ticket, e.g. negative number for price
"""
def __init__(self, msg):
"""
the constructor for the error calls the parent's (ValueError) constructor
:param msg: the msg of the error
"""
super(InvalidPriceError, self).__init__(msg)
class InvalidPositionError(ValueError):
"""
a custom type of error raised when there is an invalid position for an event, e.g. out of bounds coordinates
"""
def __init__(self, msg):
"""
the constructor for the error calls the parent's (ValueError) constructor
:param msg: the msg of the error
"""
super(InvalidPositionError, self).__init__(msg)
class DuplicateIdentifierError(KeyError):
"""
a custom type of error raised when there is a duplicate identifier for en event, e.g when registering a new event
"""
def __init__(self, msg):
"""
the constructor for the error calls the parent's (KeyError) constructor
:param msg: the msg of the error
"""
super(DuplicateIdentifierError, self).__init__(msg)
|
[
"nikist97@abv.bg"
] |
nikist97@abv.bg
|
6506ce4fe77bbd8e8505bbda7a0e1ca97a15f9a8
|
4fe825814efae31cc777295f1d3e059dfbdedbfe
|
/train.py
|
d1a22ea06b036799cae0457e12673be7701ef5e4
|
[] |
no_license
|
gokulpch/gender-estimation-model
|
045b292956a36e35c1a30f42d7d5571cad77c688
|
aadbe6ff8765cada9bc3b68d3acef181c1935ffc
|
refs/heads/master
| 2020-06-10T12:28:50.789389
| 2019-06-25T06:11:37
| 2019-06-25T06:11:37
| 193,645,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,268
|
py
|
# import necessary packages
import matplotlib
matplotlib.use("Agg")
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.preprocessing.image import img_to_array
from keras.utils import to_categorical
from keras.utils import plot_model
from sklearn.model_selection import train_test_split
from model.smallervggnet import SmallerVGGNet
import matplotlib.pyplot as plt
import numpy as np
import argparse
import random
import cv2
import os
import glob
# handle command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="path to input dataset (i.e., directory of images)")
ap.add_argument("-m", "--model", type=str, default="gender_detection.model",
help="path to output model")
ap.add_argument("-p", "--plot", type=str, default="plot.png",
help="path to output accuracy/loss plot")
args = ap.parse_args()
# initial parameters
epochs = 100
lr = 1e-3
batch_size = 64
img_dims = (96,96,3)
data = []
labels = []
# load image files from the dataset
image_files = [f for f in glob.glob(args.dataset + "/**/*", recursive=True) if not os.path.isdir(f)]
random.seed(42)
random.shuffle(image_files)
# create groud-truth label from the image path
for img in image_files:
image = cv2.imread(img)
image = cv2.resize(image, (img_dims[0],img_dims[1]))
image = img_to_array(image)
data.append(image)
label = img.split(os.path.sep)[-2]
if label == "woman":
label = 1
else:
label = 0
labels.append([label])
# pre-processing
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
# split dataset for training and validation
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.2,
random_state=42)
trainY = to_categorical(trainY, num_classes=2)
testY = to_categorical(testY, num_classes=2)
# augmenting datset
aug = ImageDataGenerator(rotation_range=25, width_shift_range=0.1,
height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
horizontal_flip=True, fill_mode="nearest")
# build model
model = SmallerVGGNet.build(width=img_dims[0], height=img_dims[1], depth=img_dims[2],
classes=2)
# compile the model
opt = Adam(lr=lr, decay=lr/epochs)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
# train the model
H = model.fit_generator(aug.flow(trainX, trainY, batch_size=batch_size),
validation_data=(testX,testY),
steps_per_epoch=len(trainX) // batch_size,
epochs=epochs, verbose=1)
# save the model to disk
model.save(args.model)
# plot training/validation loss/accuracy
plt.style.use("ggplot")
plt.figure()
N = epochs
plt.plot(np.arange(0,N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0,N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0,N), H.history["acc"], label="train_acc")
plt.plot(np.arange(0,N), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="upper right")
# save plot to disk
plt.savefig(args.plot)
|
[
"gpurnach@cisco.com"
] |
gpurnach@cisco.com
|
d34d3f80e71ecef1c4524423b84ab098accdcfb4
|
e6b5790c886f651e142260571fe0d20eb5629f48
|
/datacamp/statistical-thinking-python/01_02_variance_and_std.py
|
c142b338bdf8399336cf420c9598677de42670c9
|
[] |
no_license
|
anderalex803/nuwara-online-courses
|
23dda7997a7659ca32dd0d82a571ec609c0dff0c
|
12ff31f5f88b0632319025eabb13aad375534590
|
refs/heads/master
| 2022-12-18T10:24:17.510602
| 2020-09-09T15:07:41
| 2020-09-09T15:07:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 723
|
py
|
" Compute variance step-by-step and using build-in function np.var"
# Array of differences to mean: differences
differences = versicolor_petal_length - np.mean(versicolor_petal_length)
# Square the differences: diff_sq
diff_sq = differences**2
# Compute the mean square difference: variance_explicit
variance_explicit = np.mean(diff_sq)
# Compute the variance using NumPy: variance_np
variance_np = np.var(versicolor_petal_length)
# Print the results
print(variance_explicit, variance_np)
"Compute standard deviation using square root of variance and using np.std"
# Print the square root of the variance = std
print(np.sqrt(variance_explicit))
# Print the standard deviation
print(np.std(versicolor_petal_length))
|
[
"noreply@github.com"
] |
noreply@github.com
|
4941b887741964e84f7b9663a1aee8aac7b087db
|
ed3c56e4d78142c4bc73a90fbc32d7ee48747fe0
|
/chainerV2による実践深層学習/test-mt.py
|
cb5d2d94e6e6900ee29fc92ee0ef45e6ae859db5
|
[] |
no_license
|
johne-numata/chainertest
|
6ca2332778f35f826a160125e08c922dd26293b2
|
c05cdcf0f13dcc75890258290404af35ce18ac0c
|
refs/heads/master
| 2021-04-15T05:51:01.745898
| 2018-08-17T04:29:11
| 2018-08-17T04:29:11
| 126,454,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,115
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import chainer
from chainer import cuda, Function, Variable, optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
jvocab = {}
jlines = open('jp.txt').read().split('\n')
for i in range(len(jlines)):
lt = jlines[i].split()
for w in lt:
if w not in jvocab:
jvocab[w] = len(jvocab)
jvocab['<eos>'] = len(jvocab)
jv = len(jvocab)
evocab = {}
id2wd = {}
elines = open('eng.txt').read().split('\n')
for i in range(len(elines)):
lt = elines[i].split()
for w in lt:
if w not in evocab:
val = len(evocab)
id2wd[val] = w
evocab[w] = val
val = len(evocab)
id2wd[val] = '<eos>'
evocab['<eos>'] = val
ev = len(evocab)
class MyMT(chainer.Chain):
def __init__(self, jv, ev, k):
super(MyMT, self).__init__(
embedx = L.EmbedID(jv, k),
embedy = L.EmbedID(ev, k),
H = L.LSTM(k, k),
W = L.Linear(k, ev),
)
def __call__(self, jline, eline):
self.H.reset_state()
for i in range(len(jline)):
wid = jvocab[jline[i]]
x_k = self.embedx(Variable(np.array([wid], dtype=np.int32)))
h = self.H(x_k)
x_k = self.embedx(Variable(np.array([jvocab['<eos>']], dtype=np.int32)))
tx = Variable(np.array([evocab[eline[0]]], dtype=np.int32))
h = self.H(x_k)
accum_loss = F.softmax_cross_entropy(self.W(h), tx)
for i in range(1,len(eline)):
wid = evocab[eline[i]]
x_k = self.embedy(Variable(np.array([wid], dtype=np.int32)))
next_wid = evocab['<eos>'] if (i == len(eline) - 1) else evocab[eline[i+1]]
tx = Variable(np.array([next_wid], dtype=np.int32))
h = self.H(x_k)
loss = F.softmax_cross_entropy(self.W(h), tx)
accum_loss += loss
return accum_loss
def mt(model, jline):
model.H.reset_state()
for i in range(len(jline)):
wid = jvocab[jline[i]]
x_k = model.embedx(Variable(np.array([wid], dtype=np.int32)))
h = model.H(x_k)
x_k = model.embedx(Variable(np.array([jvocab['<eos>']], dtype=np.int32)))
h = model.H(x_k)
wid = np.argmax(F.softmax(model.W(h)).data[0])
print id2wd[wid],
loop = 0
while (wid != evocab['<eos>']) and (loop <= 30):
x_k = model.embedy(Variable(np.array([wid], dtype=np.int32)))
h = model.H(x_k)
wid = np.argmax(F.softmax(model.W(h)).data[0])
if wid in id2wd:
print id2wd[wid],
else:
print wid,
loop += 1
print
jlines = open('jp-test.txt').read().split('\n')
demb = 100
for epoch in range(100):
model = MyMT(jv, ev, demb)
filename = "mt-" + str(epoch) + ".model"
serializers.load_npz(filename, model)
for i in range(len(jlines)-1):
jln = jlines[i].split()
jlnr = jln[::-1]
print epoch,": ",
mt(model, jlnr)
|
[
"hideo.numata@toshiba.co.jp"
] |
hideo.numata@toshiba.co.jp
|
82d22436bd0102102ac1d19ba37f0005304c2064
|
3ba314ca88e89dded85a3448e730e215c47f3ceb
|
/allSkyImagingModule/skyImaging/rfiPlotter.py
|
38081ce03fd7f192a1d9ce79da8cfc9a93ddbbf7
|
[] |
no_license
|
David-McKenna/allSkyImaging
|
9f3dc5984541a6d39617dd5fd583654d0901d685
|
e20a4bb48cca7814c32326177c121e149664764c
|
refs/heads/master
| 2021-07-06T11:28:58.345222
| 2019-02-06T13:21:06
| 2019-02-06T13:21:06
| 166,459,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,970
|
py
|
"""RFI Interative plot handlers.
Originally ported to Python by Joe McCauley, slightly modified for this module. The source contained the following header:
@author: Joe McCauley (joe.mccauley@tcd.ie)
Written for Python 2.7
Based on a translated matlab script originally from ASTRON for processing
xst data from an international LOFAR station.
"""
import numpy as np
from skyPlotter import informationArr
# Store some variables on a global level to help deal with the fact that these events are isolated.
global informationArr
def updateAnnot( xdata, ydata, pixels, annot, rawdata, **kwargs):
"""Update the plotted annotation
"""
y, x = pol2cart( ydata/180, xdata, pixels )
annot.xy = ( xdata, ydata )
# Inconsistent wrapping; plot the right variable.
if xdata < 0:
xdata += 2 * np.pi
text = 'Az=' + str( round( xdata * 180 / np.pi, 1 ) )+ ', El=' + str( round( np.arccos( ydata/180 ) * 180/np.pi, 1) ) + u'\xb0' + '\nInt.=' + '{:.3E}'.format((rawdata[int(y),int(x)]))
annot.set_text( text )
annot.get_bbox_patch().set_alpha( 0.66 )
annot.set_color('black')
def onclick(event, annot, pltObj, pixels, rawdata, **kwargs):
"""Handle the matplotlib click event
"""
vis = annot.get_visible()
if event.inaxes == pltObj:
if not vis:
updateAnnot(event.xdata, event.ydata, pixels, annot, rawdata)
annot.set_visible( True )
event.canvas.draw()
else:
annot.set_visible( False )
event.canvas.draw()
def hover(event, pltObj, pixels, rawdata, axColorBar, cbCursor, **kwargs):
"""Handle cursor movement (for the colorbar line)
"""
if event.xdata:
if event.inaxes == pltObj:
y,x = pol2cart( event.ydata / 180, event.xdata, pixels )
z=rawdata[ int( y ), int( x ) ]
zline = ( z - np.nanmin( rawdata ) ) / np.nanmax( rawdata-np.nanmin( rawdata ) ) # calculate where to put the z line
axColorBar = cleanCb(axColorBar)
cbCursor = axColorBar.plot( [ 0, 1 ], [ zline, zline ], 'w-', linewidth = 4 ) #plot the new one
event.canvas.draw()
global informationArr
informationArr['cbCursor'] = cbCursor
#fig.canvas.draw_idle()
def onaxesleave(event, pltObj, axColorBar, cbCursor, **kwargs):
"""Handle cursor leaving the plot
"""
cleanCb(axColorBar)
cbCursor = axColorBar.plot([0, 1],[0, 0], 'k-')
event.canvas.draw()
annot = pltObj.annotate( "", xy = ( 0, 0 ), xytext = ( 15, 15 ), textcoords = "offset points", bbox = dict( boxstyle = "round", fc = "b" ), arrowprops = dict( arrowstyle = "->" ) )
annot.set_visible( False )
global informationArr
informationArr['annot'] = annot
informationArr['cbCursor'] = cbCursor
def cleanCb(axColorBar):
"""Remove previous lines from the color bar
"""
for line in axColorBar.axes.lines:
line.remove()
return axColorBar
def pol2cart( rho, phi, pixels ):
"""Convert from polar coordinates to cartesian
"""
x = rho * np.cos( phi )
y = rho * np.sin( phi )
x=( pixels/2 )-( pixels/2 )*x
y=( pixels/2 )-( pixels/2 )*y
return( x, y )
|
[
"mckennd2@tcd.ie"
] |
mckennd2@tcd.ie
|
a211852f23f82de502629246d40e8e38a13b64de
|
96fe253e9a740b51dcd7f83d6ab01bb248c2bf4b
|
/patrones_arquitectura/DDD/value_object/prueba_cuenta_bancaria.py
|
013e69c83e8ab92979a1390c08af8ed518910598
|
[] |
no_license
|
vvalotto/Patrones_Disenio_Python
|
7574470752a5f14214434a927c2c5e0faaa592ba
|
7ab6a74e9b008c3434af0a56d4c2b6b7de3617bf
|
refs/heads/master
| 2021-04-28T19:16:21.535998
| 2018-10-21T14:05:36
| 2018-10-21T14:05:36
| 121,891,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from DDD.value_object.cuenta_bancaria import *
mi_dinero = Dinero(100, Moneda.Pesos)
print(mi_dinero.moneda)
print(mi_dinero.monto)
mi_cuenta = CuentaBancaria(1, mi_dinero)
print(mi_cuenta.balance.monto)
print(mi_cuenta.balance.moneda)
|
[
"vvalotto@gmail.com"
] |
vvalotto@gmail.com
|
c5e700d40f7b620b11f50fed1069337c0eda7ee2
|
07eb32060101b438934954d6f705305d54296380
|
/sorts.py
|
856dc7ab65436defd4947e49211a0610e84b7e39
|
[] |
no_license
|
pragmaticarun/algo_n_datastructures
|
e6742c48bbba22b2b1d2db52862c77d9c60566fc
|
bb359afb77df957dddfac9b476094e9ecb39e50e
|
refs/heads/master
| 2020-08-11T14:45:40.477662
| 2020-02-02T14:27:50
| 2020-02-02T14:27:50
| 214,582,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,539
|
py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
a = [4,9,1,-1,3,22,11,18]
for i in range(len(a)):
for j in range(i,0,-1):
if a[j] < a[j-1]:
a[j-1],a[j] = a[j],a[j-1]
else:
break
#print(a)
b = [90,75,42,1,3,22,27,65,41,0,-1]
h = 0
while(h < len(b)/3): h = 3*h + 1
while h>=1:
for i in range(h,len(b)):
j = i
while j >= h and b[j] < b[j-h]:
b[j-h],b[j] = b[j],b[j-h]
j -= h
h = h//3
#print(b)
a = [4,9,1,-1,3,22,11,18]
def merge(a,aux,lo,mid,hi):
for k in range(lo,hi+1):
aux[k]=a[k]
i = lo
j = mid+1
for k in range(lo,hi+1):
if j > hi:
a[k] = aux[i]
i += 1
elif i > mid:
a[k] = aux[j]
j += 1
else:
if aux[i] > aux[j]:
a[k] = aux[j]
j += 1
else:
a[k] = aux[i]
i += 1
def merge_helper(a):
aux = a.copy()
sz=1
while sz < len(a):
print(sz)
j = 0
for j in range(0,len(a)-sz,sz+sz):
print(j,j+sz-1,min(j+sz+sz-1,len(a)-1))
merge(a,aux,j,j+sz-1,min(j+sz+sz-1,len(a)-1))
sz += sz
merge_helper(b)
print(f"Merge Result {b}")
a = [4,9,1,-1,3,22,11,18]
def partition(a,lo,hi):
i = lo+1
j = hi
v = a[lo]
while i <= j:
while i <= hi and v > a[i]:
i += 1
while j >= lo and v < a[j]:
j -= 1
if i < j:
a[i],a[j] = a[j],a[i]
a[lo],a[j] = a[j],a[lo]
return j
def select(a,k):
k = k-1
lo = 0
hi = len(a)-1
while lo <= hi:
j = partition(a,lo,hi)
if j > k:
hi = j - 1
elif j < k:
lo = j + 1
else:
print(a[j])
break
b = [90,75,42,1,3,22,27,65,41,0,-1]
#select(b,1)
#select(b,len(b))
#partition(a,0,len(a)-1)
def three_way_quick_sort(a,lo,hi):
if lo >= hi:
return
v = a[lo]
i = lo
lt = lo
gt=hi
while i <= gt:
if v > a[i]:
a[i],a[lt] = a[lt],a[i]
i += 1
lt += 1
elif v < a[i]:
a[i],a[gt] = a[gt],a[i]
gt -= 1
else:
i += 1
three_way_quick_sort(a,lo,lt-1)
three_way_quick_sort(a,gt+1,hi)
three_way_quick_sort(b,0,len(b)-1)
print(f"Merge result Quick sort three way {b}")
|
[
"pragmaticarun@gmail.com"
] |
pragmaticarun@gmail.com
|
db55e22cfc6843e012dc04ca9c723fc8cb0b1c89
|
418e7f374c0b344cc8eb988eefda6e2a6e910251
|
/iss-rnns/ptb/ptb_word_lm.py
|
0dad35ef3346aaa52b21fd3f8d8656e4afabd589
|
[
"Apache-2.0"
] |
permissive
|
feiyuhug/SHS_LSTM
|
9e66d009ddb298d40df9f6704e9b410f1cc128f7
|
230888785c8c06733cac7649a162b505ea6bdc2c
|
refs/heads/master
| 2020-05-29T23:27:27.820331
| 2019-05-30T15:31:40
| 2019-05-30T15:31:40
| 189,436,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,327
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example / benchmark for building a PTB LSTM model.
Trains the model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
There are 3 supported model configurations:
===========================================
| config | epochs | train | valid | test
===========================================
| small | 13 | 37.99 | 121.39 | 115.91
| medium | 39 | 48.45 | 86.16 | 82.07
| large | 55 | 37.87 | 82.62 | 78.29
The exact results may vary depending on the random initialization.
The hyperparameters used in the model:
- init_scale - the initial scale of the weights
- learning_rate - the initial value of the learning rate
- max_grad_norm - the maximum permissible norm of the gradient
- num_layers - the number of LSTM layers
- num_steps - the number of unrolled steps of LSTM
- hidden_size - the number of LSTM units
- max_epoch - the number of epochs trained with the initial learning rate
- max_max_epoch - the total number of epochs for training
- keep_prob - the probability of keeping weights in the dropout layer
- lr_decay - the decay of the learning rate for each epoch after "max_epoch"
- batch_size - the batch size
The data required for this example is in the data/ dir of the
PTB dataset from Tomas Mikolov's webpage:
$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
$ tar xvf simple-examples.tgz
To run:
$ python ptb_word_lm.py --data_path=simple-examples/data/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import time
import pylab
import json
import logging
import numpy as np
import tensorflow as tf
from tensorflow.core.framework import summary_pb2
from time import gmtime, strftime
import reader
import importlib
import os.path
import matplotlib.pyplot as plt
flags = tf.flags
zero_threshold = 0.0001
flags.DEFINE_string(
"model", "small",
"A type of model. Possible options are: small, medium, large, sparselarge, validtestlarge.")
flags.DEFINE_string("data_path", None,
"Where the training/test data is stored.")
flags.DEFINE_string("restore_path", None,
"Model input directory.")
flags.DEFINE_string("config_file", None,
"Parameter config file.")
flags.DEFINE_bool("use_fp16", False,
"Train using 16-bit floats instead of 32bit floats")
flags.DEFINE_bool("display_weights", False,
"Display weight matrix.")
flags.DEFINE_string("regularizer", 'l1_regularizer',
"Regularizer type.")
flags.DEFINE_string("optimizer", 'gd',
"Optimizer of sgd: gd and adam.")
flags.DEFINE_string("freeze_mode", None,
"How to freeze zero weights.")
FLAGS = flags.FLAGS
def add_dimen_grouplasso(var, axis=0):
with tf.name_scope("DimenGroupLasso"):
t = tf.square(var)
t = tf.reduce_sum(t, axis=axis) + tf.constant(1.0e-8)
t = tf.sqrt(t)
reg = tf.reduce_sum(t)
return reg
def add_structure_grouplasso(var, coupled_var, couple_split_num=2):
with tf.name_scope("StructureGroupLasso"):
with tf.control_dependencies([tf.assert_equal(tf.size(tf.shape(var)), tf.constant(2)),
tf.assert_equal(tf.size(tf.shape(coupled_var)), tf.constant(2))]):
t1 = tf.square(var)
t1_col_sum = tf.reduce_sum(t1, axis=0)
t1_col_sum1, t1_col_sum2, t1_col_sum3, t1_col_sum4 = tf.split(t1_col_sum, 4)
t1_row_sum = tf.reduce_sum(t1, axis=1)
_, t1_row_sum2 = tf.split(t1_row_sum, 2)
t2 = tf.square(coupled_var)
t2_row_sum = tf.reduce_sum(t2, axis=1)
t2_row_sums = list(zip(tf.split(t2_row_sum, couple_split_num)))
reg_sum = t1_row_sum2 + \
t1_col_sum1 + t1_col_sum2 + t1_col_sum3 + t1_col_sum4 + \
t2_row_sums[0]+ \
tf.constant(1.0e-8)
reg_sqrt = tf.sqrt(reg_sum)
reg = tf.reduce_sum(reg_sqrt)
return reg
def add_blockwise_grouplasso(t, block_row_size, block_col_size):
raise NotImplementedError('Not debugged. And the implementation is very slow when block is small.')
with tf.name_scope("BlockGroupLasso"):
t = tf.expand_dims(tf.expand_dims(t,0),-1)
blocks = tf.extract_image_patches(t,
ksizes=[1, block_row_size, block_col_size, 1],
strides=[1, block_row_size, block_col_size, 1],
rates=[1, 1, 1, 1],
padding='VALID')
reg_sum = tf.constant(0.0)
zero_blocks = 0.0
total_blocks = 0.0
blocks = tf.unstack(blocks) # list of 3-D tensors
for b in blocks: # for each 3-D tensor
for bb in tf.unstack(b): # for each 2-D tensor
for block in tf.unstack(bb): # for each block
blk_len = tf.sqrt(tf.reduce_sum(tf.square(block))) + tf.constant(1.0e-8)
reg_sum = reg_sum + tf.cond(blk_len < zero_threshold,
lambda: tf.constant(0.0),
lambda: blk_len)
# set them to zeros and calculate sparsity
#block = tf.assign(block, tf.cond(blk_len < zero_threshold,
# lambda: tf.zeros_like(block),
# lambda: block))
zero_blocks = zero_blocks + tf.cond( tf.equal(tf.reduce_sum(tf.square(block)), 0.0),
lambda: tf.constant(1.0),
lambda: tf.constant(0.0))
total_blocks = total_blocks + 1.0
return reg_sum, zero_blocks/total_blocks
def plot_tensor_(t,title, coupled_t, coupled_iss=None):
if len(t.shape)==2:
print(title)
col_zero_idx = np.sum(np.abs(t), axis=0) == 0
row_zero_idx = np.sum(np.abs(t), axis=1) == 0
if coupled_t is not None:
coupled_row_zero_idx = np.sum(np.abs(coupled_t), axis=1) == 0
'''
col_sparsity = (' column sparsity: %d/%d' % (sum(col_zero_idx), t.shape[1]) )
row_sparsity = (' row sparsity: %d/%d' % (sum(row_zero_idx), t.shape[0]) )
print(col_sparsity)
print(row_sparsity)
if coupled_t is not None:
print('%d/ %d'%(sum(coupled_row_zero_idx), coupled_row_zero_idx.shape[0]))
'''
t = - (t != 0).astype(int)
weight_scope = abs(t).max()
#plt.title(title)
# col_zero_map = np.tile(col_zero_idx, (t.shape[0], 1))
# row_zero_map = np.tile(row_zero_idx.reshape((t.shape[0], 1)), (1, t.shape[1]))
# zero_map = col_zero_map + row_zero_map
# zero_map_cp = zero_map.copy()
# plt.subplot(3,1,2)
# plt.imshow(zero_map_cp,cmap=plt.get_cmap('gray'),interpolation='none')
# plt.title(col_sparsity + row_sparsity)
zero_map = - 128*np.ones_like(t)
if coupled_iss is not None:
zero_map[coupled_iss, :] = 0
match_idx = None
if 2*t.shape[0] == t.shape[1]:
subsize = int(t.shape[0]/2)
match_map = np.zeros(subsize,dtype=np.int)
match_map = match_map + row_zero_idx[subsize:2 * subsize]
match_map = match_map + coupled_row_zero_idx[0:subsize]
for blk in range(0,4):
match_map = match_map + col_zero_idx[blk*subsize : blk*subsize+subsize]
match_idx = np.where(match_map == 6)[0]
print(sum(match_map==6))
zero_map[subsize+match_idx,:] = 0
for blk in range(0, 4):
zero_map[:,blk*subsize+match_idx] = 0
#plt.title(' %d/%d matches' % (len(match_idx), sum(row_zero_idx[subsize:subsize*2])))
return match_idx
else:
print ('ignoring %s' % title)
return None
def plot_tensor(t,title, coupled_t, coupled_iss=None):
if len(t.shape)==2:
print(title)
col_zero_idx = np.sum(np.abs(t), axis=0) == 0
row_zero_idx = np.sum(np.abs(t), axis=1) == 0
if coupled_t is not None:
coupled_row_zero_idx = np.sum(np.abs(coupled_t), axis=1) == 0
col_sparsity = (' column sparsity: %d/%d' % (sum(col_zero_idx), t.shape[1]) )
row_sparsity = (' row sparsity: %d/%d' % (sum(row_zero_idx), t.shape[0]) )
plt.figure()
t = - (t != 0).astype(int)
weight_scope = abs(t).max()
plt.subplot(2, 1, 1)
plt.imshow(t.reshape((t.shape[0], -1))[::10,::10],
vmin=-weight_scope,
vmax=weight_scope,
cmap=plt.get_cmap('bwr'),
interpolation='none')
plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='off')
#plt.title(title)
# col_zero_map = np.tile(col_zero_idx, (t.shape[0], 1))
# row_zero_map = np.tile(row_zero_idx.reshape((t.shape[0], 1)), (1, t.shape[1]))
# zero_map = col_zero_map + row_zero_map
# zero_map_cp = zero_map.copy()
# plt.subplot(3,1,2)
# plt.imshow(zero_map_cp,cmap=plt.get_cmap('gray'),interpolation='none')
# plt.title(col_sparsity + row_sparsity)
zero_map = - 128*np.ones_like(t)
if coupled_iss is not None:
zero_map[coupled_iss, :] = 0
match_idx = None
if 2*t.shape[0] == t.shape[1]:
subsize = int(t.shape[0]/2)
match_map = np.zeros(subsize,dtype=np.int)
match_map = match_map + row_zero_idx[subsize:2 * subsize]
match_map = match_map + coupled_row_zero_idx[0:subsize]
for blk in range(0,4):
match_map = match_map + col_zero_idx[blk*subsize : blk*subsize+subsize]
match_idx = np.where(match_map == 6)[0]
print(sum(match_map==6))
zero_map[subsize+match_idx,:] = 0
for blk in range(0, 4):
zero_map[:,blk*subsize+match_idx] = 0
plt.subplot(2, 1, 2)
plt.imshow(zero_map[::10,::10],
vmin=-128,
vmax=128,
cmap=plt.get_cmap('bwr'), interpolation='none')
plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='off')
#plt.title(' %d/%d matches' % (len(match_idx), sum(row_zero_idx[subsize:subsize*2])))
return match_idx
else:
print ('ignoring %s' % title)
return None
def zerout_gradients_for_zero_weights(grads_and_vars, mode='element'):
""" zerout gradients for weights with zero values, so as to freeze zero weights
Args:
grads_and_vars: Lists of (gradient, variable).
mode: the mode to freeze weights.
'element': freeze all zero weights
'group': freeze rows/columns that are fully zeros
"""
gradients, variables = zip(*grads_and_vars)
zerout_gradients = []
for gradient, variable in zip(gradients, variables):
if gradient is None:
zerout_gradients.append(None)
continue
if mode=='element':
where_cond = tf.less(tf.abs(variable), zero_threshold)
elif mode=='group':
raise NotImplementedError('Group wise freezing is not implemented yet.')
else:
raise ValueError('Unsupported mode == %s' % mode)
zerout_gradient = tf.where(where_cond,
tf.zeros_like(gradient),
gradient)
zerout_gradients.append(zerout_gradient)
return list(zip(zerout_gradients, variables))
def data_type():
return tf.float16 if FLAGS.use_fp16 else tf.float32
class PTBInput(object):
"""The input data."""
def __init__(self, config, data, name=None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = reader.ptb_producer(
data, batch_size, num_steps, name=name)
class PTBModel(object):
"""The PTB model."""
def __init__(self, is_training, config, input_, config_params = None):
self._input = input_
self.config_params = config_params
batch_size = input_.batch_size
num_steps = input_.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
def lstm_cell():
# With the latest TensorFlow source code (as of Mar 27, 2017),
# the BasicLSTMCell will need a reuse parameter which is unfortunately not
# defined in TensorFlow 1.0. To maintain backwards compatibility, we add
# an argument check here:
if 'reuse' in inspect.getargspec(
tf.contrib.rnn.BasicLSTMCell.__init__).args:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True,
reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True)
attn_cell = lstm_cell
if is_training and config.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=config.keep_prob)
cell = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, data_type())
with tf.device("/cpu:0"):
embedding = tf.get_variable(
"embedding", [vocab_size, size], dtype=data_type())
inputs = tf.nn.embedding_lookup(embedding, input_.input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of models/tutorials/rnn/rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# inputs = tf.unstack(inputs, num=num_steps, axis=1)
# outputs, state = tf.contrib.rnn.static_rnn(
# cell, inputs, initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state, _) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, size])
softmax_w = tf.get_variable(
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(input_.targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=data_type())])
# L1 regularization
modname = importlib.import_module('tensorflow.contrib.layers')
the_regularizer = getattr(modname, FLAGS.regularizer)(scale=config_params['weight_decay'], scope=FLAGS.regularizer)
reg_loss = tf.contrib.layers.apply_regularization(the_regularizer, tf.trainable_variables()[1:])
self._regularization = reg_loss
sparsity = {}
# Group Lasso regularization
if config_params:
glasso_params = config_params.get('grouplasso', None)
else:
glasso_params = None
if glasso_params:
for train_var in tf.trainable_variables():
var_name = train_var.op.name
glasso_param = glasso_params.get(var_name,None)
if glasso_param:
# column group lasso
coef = glasso_params['global_decay'] * glasso_param.get('col_decay_multi', 0.0)
if coef:
glasso_reg = add_dimen_grouplasso(train_var, axis=0)
self._regularization = self._regularization + glasso_reg * coef
# row group lasso
coef = glasso_params['global_decay']*glasso_param.get('row_decay_multi', 0.0)
if coef:
glasso_reg = add_dimen_grouplasso(train_var, axis=1)
self._regularization = self._regularization + glasso_reg * coef
# structure lasso
coef = glasso_params['global_decay'] * glasso_param.get('structure_decay_multi', 0.0)
if coef:
# find the coupled layer/var
coupled_train_var = None
for _var in tf.trainable_variables():
if _var.op.name == glasso_param['coupled_layer']:
coupled_train_var = _var
break
couple_split_num = glasso_param.get('couple_split_num', 2)
glasso_reg = add_structure_grouplasso(train_var, coupled_train_var, couple_split_num=couple_split_num)
self._regularization = self._regularization + glasso_reg * coef
if config_params['weight_decay'] > 0 or glasso_params:
# sparsity statistcis
for train_var in tf.trainable_variables():
# zerout by small threshold to stablize the sparsity
sp_name = train_var.op.name
threshold = max(zero_threshold, 2*config_params['weight_decay'])
where_cond = tf.less(tf.abs(train_var), threshold)
train_var = tf.assign(train_var, tf.where(where_cond,
tf.zeros(tf.shape(train_var)),
train_var))
# statistics
s = tf.nn.zero_fraction(train_var)
sparsity[sp_name + '_elt_sparsity'] = s
if glasso_params and glasso_params.get(sp_name,None):
s = tf.nn.zero_fraction(tf.reduce_sum(tf.square(train_var), axis=0))
sparsity[sp_name + '_col_sparsity'] = s
s = tf.nn.zero_fraction(tf.reduce_sum(tf.square(train_var), axis=1))
sparsity[sp_name + '_row_sparsity'] = s
self._sparsity = sparsity
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost + self._regularization, tvars),
config.max_grad_norm)
if 'gd' == FLAGS.optimizer:
optimizer = tf.train.GradientDescentOptimizer(self._lr)
elif 'adam' == FLAGS.optimizer:
optimizer = tf.train.AdamOptimizer(self._lr)
else:
raise ValueError("Wrong optimizer!")
grads_vars = zip(grads, tvars)
if FLAGS.freeze_mode:
grads_vars = zerout_gradients_for_zero_weights(grads_vars, FLAGS.freeze_mode)
self._train_op = optimizer.apply_gradients(
grads_vars,
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
@property
def input(self):
return self._input
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def regularization(self):
return self._regularization
@property
def sparsity(self):
return self._sparsity
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
class SmallConfig(object):
"""Small config."""
def __init__(self):
self.init_scale = 0.1
self.learning_rate = 1.0
self.max_grad_norm = 5
self.num_layers = 2
self.num_steps = 20
self.hidden_size = 200
self.max_epoch = 4
self.max_max_epoch = 13
self.keep_prob = 1.0
self.lr_decay = 0.5
self.batch_size = 20
self.vocab_size = 10000
class MediumConfig(object):
"""Medium config."""
def __init__(self):
self.init_scale = 0.05
self.learning_rate = 1.0
self.max_grad_norm = 5
self.num_layers = 2
self.num_steps = 35
self.hidden_size = 650
self.max_epoch = 6
self.max_max_epoch = 39
self.keep_prob = 0.5
self.lr_decay = 0.8
self.batch_size = 20
self.vocab_size = 10000
class LargeConfig(object):
"""Large config."""
def __init__(self):
self.init_scale = 0.04
self.learning_rate = 1.0
self.max_grad_norm = 10
self.num_layers = 2
self.num_steps = 35
self.hidden_size = 1500
self.max_epoch = 14
self.max_max_epoch = 55
self.keep_prob = 0.35
self.lr_decay = 1 / 1.15
self.batch_size = 20
self.vocab_size = 10000
class SparseLargeConfig(object):
"""Sparse Large config."""
def __init__(self):
self.init_scale = 0.04
self.learning_rate = 1.0
self.max_grad_norm = 10
self.num_layers = 2
self.num_steps = 35
self.hidden_size = 1500
self.max_epoch = 14
self.max_max_epoch = 55
self.keep_prob = 0.60
self.lr_decay = 0.1
self.batch_size = 20
self.vocab_size = 10000
class ValidTestLargeConfig(object):
"""Large config."""
def __init__(self):
self.init_scale = 0.04
self.learning_rate = 0.0
self.max_grad_norm = 10
self.num_layers = 2
self.num_steps = 35
self.hidden_size = 1500
self.max_epoch = 0
self.max_max_epoch = 0
self.keep_prob = 1.0
self.lr_decay = 1.0
self.batch_size = 20
self.vocab_size = 10000
class TestConfig(object):
"""Tiny config, for testing."""
def __init__(self):
self.init_scale = 0.1
self.learning_rate = 1.0
self.max_grad_norm = 1
self.num_layers = 1
self.num_steps = 2
self.hidden_size = 2
self.max_epoch = 1
self.max_max_epoch = 1
self.keep_prob = 1.0
self.lr_decay = 0.5
self.batch_size = 20
self.vocab_size = 10000
def fetch_sparsity(session, model, eval_op=None, verbose=False):
outputs = {}
fetches = {
"sparsity": model.sparsity
}
vals = session.run(fetches)
sparsity = vals["sparsity"]
outputs['sparsity'] = sparsity
return outputs
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
outputs = {}
regularizations = 0.0
sparsity = {}
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"regularization": model.regularization,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
regularizations += vals["regularization"]
sparsity = session.run(model.sparsity)
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f cost: %.4f regularization: %.4f total_cost: %.4f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size,
np.exp(costs / iters),
costs / iters,
regularizations / iters,
costs / iters + regularizations / iters,
iters * model.input.batch_size / (time.time() - start_time)))
outputs['perplexity'] = np.exp(costs / iters)
outputs['cross_entropy'] = costs / iters
outputs['regularization'] = regularizations / iters
outputs['total_cost'] = costs / iters + regularizations / iters
outputs['sparsity'] = sparsity
return outputs
def get_config():
if FLAGS.model == "small":
return SmallConfig()
elif FLAGS.model == "medium":
return MediumConfig()
elif FLAGS.model == "large":
return LargeConfig()
elif FLAGS.model == "sparselarge":
return SparseLargeConfig()
elif FLAGS.model == 'validtestlarge':
return ValidTestLargeConfig()
elif FLAGS.model == "test":
return TestConfig()
else:
raise ValueError("Invalid model: %s", FLAGS.model)
def restore_trainables(sess, path):
if path:
assert tf.gfile.Exists(path)
ckpt = tf.train.get_checkpoint_state(path)
if ckpt and ckpt.model_checkpoint_path:
variables_to_restore = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, ckpt.model_checkpoint_path)
print('Pre-trained model restored from %s' % path)
else:
print('Restoring pre-trained model from %s failed!' % path)
exit()
def write_scalar_summary(summary_writer, tag, value, step):
value = summary_pb2.Summary.Value(tag=tag, simple_value=float(value))
summary = summary_pb2.Summary(value=[value])
summary_writer.add_summary(summary, step)
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
if not FLAGS.config_file:
raise ValueError("Must set --config_file to configuration file")
else:
with open(FLAGS.config_file, 'r') as fi:
config_params = json.load(fi)
# get logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('ptb_rnn')
logger.setLevel(logging.INFO)
# saving path
subfolder_name = strftime("%Y-%m-%d___%H-%M-%S", gmtime())
config_params['save_path'] = os.path.join(config_params['save_path'], subfolder_name)
if not os.path.exists(config_params['save_path']):
os.mkdir(config_params['save_path'])
else:
raise IOError('%s exist!' % config_params['save_path'])
log_file = os.path.join(config_params['save_path'], 'output.log')
logger.addHandler(logging.FileHandler(log_file))
logger.info('configurations in file:\n %s \n', config_params)
logger.info('tf.FLAGS:\n %s \n', vars(FLAGS))
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
config.keep_prob = config_params.get('dropout_keep_prob',config.keep_prob)
config.learning_rate = config_params.get('learning_rate', config.learning_rate)
eval_config = get_config()
eval_config.keep_prob = config_params.get('dropout_keep_prob',eval_config.keep_prob)
eval_config.learning_rate = config_params.get('learning_rate', eval_config.learning_rate)
eval_config.batch_size = 1
eval_config.num_steps = 1
logger.info('network configurations: \n %s \n', vars(config))
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.name_scope("Train"):
train_input = PTBInput(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config, input_=train_input, config_params=config_params)
with tf.name_scope("Valid"):
valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config, input_=valid_input, config_params=config_params)
with tf.name_scope("Test"):
test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mtest = PTBModel(is_training=False, config=eval_config,
input_=test_input, config_params = config_params)
saver = tf.train.Saver(tf.global_variables())
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
config_proto = tf.ConfigProto()
config_proto.gpu_options.allow_growth = True
config_proto.log_device_placement = False
with tf.Session(config=config_proto) as session:
coord = tf.train.Coordinator()
session.run(init)
threads = tf.train.start_queue_runners(sess=session, coord=coord)
if FLAGS.restore_path:
restore_trainables(session, FLAGS.restore_path)
if FLAGS.display_weights:
outputs = fetch_sparsity(session, mtest)
print("Sparsity: %s" % outputs['sparsity'])
#for train_var in tf.trainable_variables():
# plot_tensor(train_var.eval(), train_var.op.name)
var_list = tf.trainable_variables()
coupled_iss = None
coupled_iss = plot_tensor_(var_list[1].eval(), var_list[1].op.name, var_list[3].eval(), coupled_iss)
coupled_iss = plot_tensor_(var_list[3].eval(), var_list[3].op.name, var_list[5].eval(), coupled_iss)
coupled_iss = plot_tensor_(var_list[5].eval(), var_list[5].op.name, None, coupled_iss)
'''
plt.show()
'''
outputs = run_epoch(session, mvalid)
print("Restored model with Valid Perplexity: %.3f" % (outputs['perplexity']))
summary_writer = tf.summary.FileWriter(
config_params['save_path'],
graph=tf.get_default_graph())
for i in range(config.max_max_epoch):
if 'gd' == FLAGS.optimizer:
if FLAGS.model == "sparselarge":
lr_decay = config.lr_decay ** ( i // (config.max_max_epoch//3) )
else:
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
elif 'adam' == FLAGS.optimizer:
lr_decay = 1.0
else:
raise ValueError("Wrong optimizer!")
m.assign_lr(session, config.learning_rate * lr_decay)
write_scalar_summary(summary_writer, 'learning_rate', config.learning_rate * lr_decay, i+1)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
outputs = run_epoch(session, m, eval_op=m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f regularization: %.4f " % (i + 1, outputs['perplexity'], outputs['regularization']))
write_scalar_summary(summary_writer, 'TrainPerplexity', outputs['perplexity'], i + 1)
write_scalar_summary(summary_writer, 'cross_entropy', outputs['cross_entropy'], i + 1)
write_scalar_summary(summary_writer, 'regularization', outputs['regularization'], i + 1)
write_scalar_summary(summary_writer, 'total_cost', outputs['total_cost'], i + 1)
for key, value in outputs['sparsity'].items():
write_scalar_summary(summary_writer, key, value, i + 1)
checkpoint_path = os.path.join(config_params['save_path'], 'model.ckpt')
saver.save(session, checkpoint_path, global_step=i + 1)
outputs = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, outputs['perplexity']))
write_scalar_summary(summary_writer, 'ValidPerplexity', outputs['perplexity'], i + 1)
outputs = run_epoch(session, mtest)
print("Test Perplexity: %.3f" % outputs['perplexity'])
write_scalar_summary(summary_writer, 'TestPerplexity', outputs['perplexity'], 0)
coord.request_stop()
coord.join(threads)
plt.show()
if __name__ == "__main__":
tf.app.run()
|
[
"yuniange@gmail.com"
] |
yuniange@gmail.com
|
987b92e6df6ed00e5ca4fb6ce748e467405a8347
|
f4434c85e3814b6347f8f8099c081ed4af5678a5
|
/sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2019_03_22/aio/operations/_certificates_operations.py
|
4fdc763e3e5033ecae73c8337024f88ac2e80684
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
yunhaoling/azure-sdk-for-python
|
5da12a174a37672ac6ed8e3c1f863cb77010a506
|
c4eb0ca1aadb76ad892114230473034830116362
|
refs/heads/master
| 2022-06-11T01:17:39.636461
| 2020-12-08T17:42:08
| 2020-12-08T17:42:08
| 177,675,796
| 1
| 0
|
MIT
| 2020-03-31T20:35:17
| 2019-03-25T22:43:40
|
Python
|
UTF-8
|
Python
| false
| false
| 23,167
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CertificatesOperations:
"""CertificatesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothub.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list_by_iot_hub(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> "models.CertificateListDescription":
"""Get the certificate list.
Returns the list of certificates.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateListDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.models.CertificateListDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CertificateListDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
accept = "application/json"
# Construct URL
url = self.list_by_iot_hub.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CertificateListDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_iot_hub.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates'} # type: ignore
async def get(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
**kwargs
) -> "models.CertificateDescription":
"""Get the certificate.
Returns the certificate.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.models.CertificateDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CertificateDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CertificateDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
certificate_description: "models.CertificateBodyDescription",
if_match: Optional[str] = None,
**kwargs
) -> "models.CertificateDescription":
"""Upload the certificate to the IoT hub.
Adds new or replaces existing certificate.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param certificate_description: The certificate body.
:type certificate_description: ~azure.mgmt.iothub.models.CertificateBodyDescription
:param if_match: ETag of the Certificate. Do not specify for creating a brand new certificate.
Required to update an existing certificate.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.models.CertificateDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CertificateDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_description, 'CertificateBodyDescription')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CertificateDescription', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('CertificateDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
if_match: str,
**kwargs
) -> None:
"""Delete an X509 certificate.
Deletes an existing X509 certificate or does nothing if it does not exist.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'} # type: ignore
async def generate_verification_code(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
if_match: str,
**kwargs
) -> "models.CertificateWithNonceDescription":
"""Generate verification code for proof of possession flow.
Generates verification code for proof of possession flow. The verification code will be used to
generate a leaf certificate.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateWithNonceDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.models.CertificateWithNonceDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CertificateWithNonceDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
accept = "application/json"
# Construct URL
url = self.generate_verification_code.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CertificateWithNonceDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generate_verification_code.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}/generateVerificationCode'} # type: ignore
async def verify(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
if_match: str,
certificate_verification_body: "models.CertificateVerificationDescription",
**kwargs
) -> "models.CertificateDescription":
"""Verify certificate's private key possession.
Verifies the certificate's private key possession by providing the leaf cert issued by the
verifying pre uploaded certificate.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:param certificate_verification_body: The name of the certificate.
:type certificate_verification_body: ~azure.mgmt.iothub.models.CertificateVerificationDescription
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.models.CertificateDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CertificateDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.verify.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_verification_body, 'CertificateVerificationDescription')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CertificateDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
verify.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}/verify'} # type: ignore
|
[
"noreply@github.com"
] |
noreply@github.com
|
8dff565e4a3145736f3847daf00e0a91801c7e21
|
a4275e529b564c3ec5c084fb360c2f4207068477
|
/src/montague/validation.py
|
9a20e422a8c17d2122978e27db86e67d8d0db92f
|
[
"MIT"
] |
permissive
|
rmoorman/montague
|
aacc11837016400e37b69e18b2461a3246c2052c
|
423a2a5a773e975fa27f7b61627cc706fb084984
|
refs/heads/master
| 2020-12-29T01:41:17.262828
| 2015-06-18T00:46:45
| 2015-06-18T00:46:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,702
|
py
|
from __future__ import absolute_import
import collections
import types
def validate_montague_standard_format(config):
for key in ('globals', 'application', 'composite', 'filter', 'server', 'logging'):
assert key in config
assert isinstance(config[key], collections.Mapping)
def validate_config_loader_methods(config_loader):
assert hasattr(config_loader, 'config')
assert isinstance(config_loader.config, types.MethodType)
specific_methods_required = False
try:
result = config_loader.config()
validate_montague_standard_format(result)
except NotImplementedError:
# config loaders can fail to implement config() as long as they implement the other methods
specific_methods_required = True
for method in ('app_config', 'filter_config', 'server_config', 'logging_config'):
if specific_methods_required:
# If you don't implement .config(), you have to implement these
assert hasattr(config_loader, method)
assert isinstance(getattr(config_loader, method), types.MethodType)
# We don't know the names of actual apps/filters/etc to load, but we do know
# the loader shouldn't raise NotImplementedError if it has actually implemented them,
# so we can try that.
try:
getattr(config_loader, method)('__bogus__')
except NotImplementedError:
if specific_methods_required:
raise
except Exception:
# any other exception is fine here, because we don't know exactly what happens
# with a bogus name. usually KeyError, but maybe something else would be raised
pass
|
[
"jon@inklesspen.com"
] |
jon@inklesspen.com
|
7c378f7c5a01b084095dc622f0eca72997914d5e
|
b3b3ac6bef90834d3c02858b243afc84a30f35ab
|
/api/migrations/0003_container_prev_art.py
|
b71622eb2aef9f4cb99810b841bd3e3859fba695
|
[
"MIT"
] |
permissive
|
sinofeng/edgy-controller
|
ce17eff41a75e099260d58e5c2fe5f685fb1d197
|
140bf952fcc43bbba344f169c782132d500e1343
|
refs/heads/master
| 2023-07-25T00:18:31.879146
| 2020-12-31T10:45:42
| 2020-12-31T10:45:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-04-24 14:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20180424_1428'),
]
operations = [
migrations.AddField(
model_name='container',
name='prev_art',
field=models.DecimalField(decimal_places=3, default=0.0, max_digits=6),
),
]
|
[
"avgeris.marios@gmail.com"
] |
avgeris.marios@gmail.com
|
a1b6fb392741e41deadbff3e6f9ad7e1f8c4f790
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/63/usersdata/158/31530/submittedfiles/swamee.py
|
2c5e1ac2906cab6729e807c9e54f3c38de172e65
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
# -*- coding: utf-8 -*-
import math
f=float(input('digite f:'))
L=float(input('digite L:'))
Q=float(input('digite Q:'))
deltaH=float(input('digite deltaH:'))
v=float(input('digite v:'))
g=9.81
E=0.000002
D=((8*f*L*(Q)**2)/((math.pi)**2*g*deltaH))**1/5
print('D é:%.4f'%D)
Rey=(4*Q)/(math.pi*D*v)
print('Rey é:%.4f'%Rey)
K=0.25/(math.log10((E/(3.7*D))+(5.74)/(Rey**0.9)))**2
print('K é:%.4f'%K)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
fca6944ac17fdb848882685b614c20e6f19803df
|
fb594bf2cd33157a1d1d486e25c8014d6463fe9b
|
/project-1/expriments/plot-hidden-num.py
|
c3c6e7a2d5a09c0a704fe067d167cd044bdddd80
|
[] |
no_license
|
ShirleyHan6/CZ4042-Assignments
|
6469d2977b07302c5496eb9f83b360e6e5f06500
|
915b2e53008575741627333103bafbef1a91d600
|
refs/heads/master
| 2020-08-14T00:01:36.843411
| 2019-11-16T05:57:25
| 2019-11-16T05:57:25
| 215,060,266
| 0
| 2
| null | 2019-11-16T05:57:27
| 2019-10-14T14:05:58
|
Python
|
UTF-8
|
Python
| false
| false
| 489
|
py
|
import pickle
from utils.utils import plot_batched_accuracies, plot_train_val_accuracies
with open('val-accs-hidden-num.pickle', 'rb') as f:
val_accs_dict: dict = pickle.load(f)
with open('train-accs-opt-hidden-num.pickle', 'rb') as f:
train_accs: list = pickle.load(f)
with open('val-accs-opt-hidden-num.pickle', 'rb') as f:
val_accs: list = pickle.load(f)
plot_batched_accuracies(val_accs_dict, label_base='hidden num = ')
plot_train_val_accuracies(train_accs, val_accs)
|
[
"yli056@e.ntu.edu.sg"
] |
yli056@e.ntu.edu.sg
|
a09754bd46ba1e705aa2818cb0065461ffc04a4b
|
77088306e65adb0ae131b51a9c6acf799ec669bc
|
/train.py
|
922a433af50334f9d448a3c2ec1397361f4ddead
|
[] |
no_license
|
NeerajTulsani/ChatBot
|
452285c979957c3788684956b796b85351e1a835
|
0db1f6c908d032ee15f2abf013af55ce4f78b5d4
|
refs/heads/main
| 2023-04-26T23:35:51.674944
| 2021-05-25T05:13:00
| 2021-05-25T05:13:00
| 328,322,624
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,561
|
py
|
import numpy as np
import json
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from nltk_utils import bag_of_words, tokenize, lem
from model import NeuralNet
with open('intents.json', 'r') as f:
intents = json.load(f)
all_words = []
tags = []
ab = []
for intent in intents['intents']:
tag = intent['tag']
tags.append(tag)
for pattern in intent['patterns']:
w = tokenize(pattern)
all_words.extend(w)
ab.append((w, tag))
ignore_words = ['?', '.', '!']
all_words = [lem(w) for w in all_words if w not in ignore_words]
all_words = sorted(set(all_words))
tags = sorted(set(tags))
print(len(ab), "patterns")
print(len(tags), "tags:", tags)
print(len(all_words), "unique stemmed words:", all_words)
X_train = []
y_train = []
for (pattern_sentence, tag) in ab:
bag = bag_of_words(pattern_sentence, all_words)
X_train.append(bag)
label = tags.index(tag)
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
num_epochs = 1000
batch_size = 8
learning_rate = 0.001
input_size = len(X_train[0])
hidden_size = 8
output_size = len(tags)
print(input_size, output_size)
class ChatDataset(Dataset):
def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = y_train
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.n_samples
dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
model = NeuralNet(input_size, hidden_size, output_size)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
for (words, labels) in train_loader:
words = words
labels = labels.to(dtype=torch.long)
outputs = model(words)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 100 == 0:
print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
print(f'final loss: {loss.item():.4f}')
data = {
"model_state": model.state_dict(),
"input_size": input_size,
"hidden_size": hidden_size,
"output_size": output_size,
"all_words": all_words,
"tags": tags
}
FILE = "data.pth"
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}')
|
[
"noreply@github.com"
] |
noreply@github.com
|
434f569b0c237c070439c7b30b69de0e7e04c549
|
21f3162b80f5edd4e0be3f5bee04142f397b8e26
|
/tutorial/tutorial/spiders/img_spider.py
|
65a56601eecaf68bc2181c23e7cc2941f24fdb37
|
[] |
no_license
|
jiangbingo/scrapy-house
|
e599e02f2e42d598d3bce1e052d55d68211b9379
|
533e98a56a5c869f45928ded512b0a12f4894965
|
refs/heads/master
| 2020-04-09T17:32:50.701832
| 2016-12-01T14:25:23
| 2016-12-01T14:25:23
| 67,988,718
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,865
|
py
|
#!-*-coding:utf-8-*-
__author__ = 'PD-002'
import os
import json
import scrapy
from hashlib import md5
from tutorial.items import DmozItem, TestItem
class DmozSpider(scrapy.Spider):
"""
for test
"""
name = "img"
allowed_domains = ["img.org"]
start_urls = ["http://newhouse.fang.com/house/s/"]
def parse(self, response):
"""
:param response:
:return:
"""
# 查询到每个城市的url
city_names = response.xpath("//div[@class='city20141104nr']//a/text()").extract()
city_urls = response.xpath("//div[@class='city20141104nr']//a/@href").extract()
for i in range(len(city_names)):
sel = city_urls[i]
if sel != self.start_urls[0] and sel[-3:] != "txt":
response.meta["city"] = city_names[i].strip()
# import ipdb;ipdb.set_trace()
yield scrapy.Request(sel.strip(), callback=self.second_parse, dont_filter=True,
meta=response.meta.copy())
def second_parse(self, response):
"""
:param response:
:return:
"""
div_list = response.xpath("//div[@class='newnav20141104nr']/div")
if len(div_list) <= 0:
return
# 新房分类url
type_urls = div_list[3].xpath("div[@class='listBox']/ul//li/a/@href").extract()[:5]
type_names = div_list[3].xpath("div[@class='listBox']/ul//li/a/text()").extract()[:5]
# import ipdb;ipdb.set_trace()
for i in range(len(type_names)):
url = type_urls[i]
response.meta["type"] = type_names[i].strip()
if url != response.url and url[-3:] != "txt":
yield scrapy.Request(url.strip(), callback=self.page_parse, dont_filter=True, meta=response.meta.copy())
# # 二手房url
# for url in div_list[4].xpath("div[@class='listBox']/ul//li/a/@href").extract()[:2]:
# item = TestItem()
# item["url"] = url
# if url != response.url and url[-3:] != "txt":
# # yield item
# yield scrapy.Request(url, callback=self.page_parse, dont_filter=True)
# 写字楼url
# for url in div_list[8].xpath("div[@class='listBox']/ul//li/a/@href").extract():
# item = TestItem()
# item["url"] = url
# if url != response.url and url[-3:] != "txt":
# # yield item
# yield scrapy.Request(url, callback=self.page_parse, dont_filter=True)
def page_parse(self, response):
"""
:param response:
:return:
"""
# 分页
a_list = response.xpath("//a")
page_url = None
for a in a_list:
text = a.xpath("text()").extract()
if len(text) > 0 and ("末页".decode("utf-8") in text[0] or "尾页".decode("utf-8") in text[0]):
page_url = self._find_page_url(a.xpath("@href").extract()[0])
break
if page_url:
for a in a_list:
hrefs = a.xpath("@href").extract()
if len(hrefs) > 0:
href = hrefs[0]
if page_url in href:
new_page_url = response.urljoin(href)
if new_page_url[-3:] != "txt":
yield scrapy.Request(new_page_url.strip(), self.detail_page_parse,
dont_filter=True,
meta=response.meta.copy())
def detail_page_parse(self, response):
"""
:param response:
:return:
"""
url_list = response.xpath("//strong[@class='f14 fb_blue']/a/@href").extract()
if len(url_list) <= 0:
url_list = response.xpath("//div[@class='nlcd_name']/a/@href").extract()
if len(url_list) <= 0:
cache_list = response.xpath("//dd[@class='info rel floatr']/p[@class='title']/a/@href").extract()
for cache in cache_list:
url_list.append(response.urljoin(cache.strip()))
if len(url_list) > 0:
for url in url_list:
if "http" not in url:
url = response.urljoin(url)
url = url.strip()
city = response.meta["city"].replace(" ", "")
type_name = response.meta["type"].replace(" ", "")
dir_name = os.path.join(os.path.dirname(os.path.abspath("img_spiders.py")), "imgs", type_name,
city, url.split("/")[2])
if not os.path.exists(dir_name):
response.meta["dir_name"] = dir_name
yield scrapy.Request(url, self.find_img_page_parse, dont_filter=True,
meta=response.meta.copy())
def find_img_page_parse(self, response):
"""
:param response:
:return:
"""
texts = response.xpath("//div[@class='navleft tf']//a[5]/text()").extract()
if len(texts) > 0:
if "户型图".decode("utf-8") == texts[0]:
page_urls = response.xpath("//div[@class='navleft tf']//a[5]/@href").extract()
else:
page_urls = response.xpath("//div[@class='navleft tf']//a[6]/@href").extract()
if len(page_urls) > 0:
page_url = page_urls[0]
yield scrapy.Request(page_url.strip(), self.find_img_url_parse, dont_filter=True,
meta=response.meta.copy())
else:
return
def find_img_url_parse(self, response):
"""
:param response:
:return:
"""
img_urls = response.xpath("//ul[@class='by_img_list my_ul clearfix']//li/a/img/@src").extract()
names = response.xpath("//ul[@class='by_img_list my_ul clearfix']//li/a/p/text()").extract()
msgs = response.xpath("//li[@class='xx_list']")
if len(msgs) == 6:
if len(names) <= 0:
return
try:
mo = md5()
mo.update(names[0].encode("utf-8"))
img_name = mo.hexdigest()
dir_name = response.meta["dir_name"]
if not os.path.exists(dir_name):
os.makedirs(dir_name)
config = {"img_name": img_name}
types = msgs[0].xpath("em/text()").extract()
if len(types) > 0:
config["type"] = types[0].strip()
hxfbs = msgs[1].xpath("em/text()").extract()
if len(hxfbs) > 0:
config["hxfb"] = hxfbs[0].strip()
jzmjs = msgs[2].xpath("em/i/text()").extract()
if len(jzmjs) > 0:
config["jzmj"] = jzmjs[0].strip()
jjs = msgs[3].xpath("em/i/text()").extract()
if len(jjs) > 0:
config["jj"] = jjs[0].strip()
zjs = msgs[5].xpath("em/i/text()").extract()
if len(zjs) > 0:
config["zj"] = zjs[0].strip()
apartment_names = response.xpath("//div[@class='img_num fl']/strong/text()").extract()
if len(apartment_names) > 0:
config["apartment_name"] = apartment_names[0].strip()
file_name = os.path.join(dir_name, "config.txt")
fd = open(file_name, "w+")
fd.write(json.dumps(config))
fd.close()
url = img_urls[0].replace("124x82", "880x578")
response.meta["img_name"] = img_name
yield scrapy.Request(url.strip(), self.load_img,
dont_filter=True, meta=response.meta.copy())
except:
import ipdb;ipdb.set_trace()
else:
print "find_img_url_parse error", response.url
def load_img(self, response):
"""
:param response:
:return:
"""
dir_name = response.meta["dir_name"]
file_name = os.path.join(dir_name, response.meta["img_name"] + ".jpg")
with open(file_name, "wb+") as fd:
fd.write(response.body)
def _find_page_url(self, url):
"""
:param url:
:return:
"""
flag = "?page="
if url.find(flag) > 0:
return url.split("?page=")[0]
else:
a_list = url.split("/")[1:-2]
new_url = "/"
for i in a_list:
new_url = new_url + i + "/"
return new_url
|
[
"jiangbingo@hotmail.com"
] |
jiangbingo@hotmail.com
|
501ed856db70e2877ac34046453bf4981788b2b1
|
4c6c1e729e5d9ae2908ee2d580d91c60fc6b76ff
|
/accounts/urls.py
|
dafa5bc491d2bf87f8b6421b49992da951d61faa
|
[] |
no_license
|
vcrl/P13
|
6e098733bdd06a2c0a97835570d314ba9e758587
|
365c973a07326b71bf5ef9d16cbb9770edcfd415
|
refs/heads/master
| 2023-06-17T06:37:33.381796
| 2021-07-16T13:37:33
| 2021-07-16T13:37:33
| 364,944,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('signup/', views.signup, name="signup"),
path('signout/', views.signout, name="signout"),
path('signin/', views.loginuser, name="loginuser"),
path('profile/', views.displayprofile, name="profile"),
path('', views.return_to_frontpage, name="frontpage")
]
|
[
"vruel@pm.me"
] |
vruel@pm.me
|
b0e19c15084f07ee5e146abca8e1ab776d778910
|
57e8a41e63af3ef94001c03f07beacf39e340f0d
|
/cse_udub/cse_415_AI/hw3/ui.py
|
ae82ccb2a4d1982121f085a48ddff517f32e6a15
|
[] |
no_license
|
zhy9036/cs_2016_fall
|
0e5f9f1f84777729b1761675cfb27086269c2358
|
c35c0cf64038dda3888e48ca63e92c67dc9ad346
|
refs/heads/master
| 2020-04-10T03:55:45.688046
| 2018-02-17T19:26:49
| 2018-02-17T19:26:49
| 68,164,378
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,911
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'all.ui'
#
# Created: Sat Apr 26 22:09:41 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(542, 226)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
Dialog.setMinimumSize(QtCore.QSize(420, 226))
Dialog.setMaximumSize(QtCore.QSize(950, 226))
self.wMain = QtGui.QWidget(Dialog)
self.wMain.setGeometry(QtCore.QRect(10, 10, 521, 211))
self.wMain.setObjectName(_fromUtf8("wMain"))
self.pbB1 = QtGui.QPushButton(self.wMain)
self.pbB1.setGeometry(QtCore.QRect(380, 40, 61, 61))
self.pbB1.setObjectName(_fromUtf8("pbB1"))
self.pbB = QtGui.QPushButton(self.wMain)
self.pbB.setEnabled(True)
self.pbB.setGeometry(QtCore.QRect(0, 40, 71, 141))
self.pbB.setAutoDefault(False)
self.pbB.setObjectName(_fromUtf8("pbB"))
self.pbA2 = QtGui.QPushButton(self.wMain)
self.pbA2.setGeometry(QtCore.QRect(140, 120, 61, 61))
self.pbA2.setObjectName(_fromUtf8("pbA2"))
self.pbB5 = QtGui.QPushButton(self.wMain)
self.pbB5.setGeometry(QtCore.QRect(140, 40, 61, 61))
self.pbB5.setObjectName(_fromUtf8("pbB5"))
self.pbA = QtGui.QPushButton(self.wMain)
self.pbA.setGeometry(QtCore.QRect(450, 40, 71, 141))
self.pbA.setObjectName(_fromUtf8("pbA"))
self.pbA1 = QtGui.QPushButton(self.wMain)
self.pbA1.setGeometry(QtCore.QRect(80, 120, 61, 61))
self.pbA1.setObjectName(_fromUtf8("pbA1"))
self.pbA5 = QtGui.QPushButton(self.wMain)
self.pbA5.setGeometry(QtCore.QRect(320, 120, 61, 61))
self.pbA5.setObjectName(_fromUtf8("pbA5"))
self.pbA6 = QtGui.QPushButton(self.wMain)
self.pbA6.setGeometry(QtCore.QRect(380, 120, 61, 61))
self.pbA6.setObjectName(_fromUtf8("pbA6"))
self.lStatus = QtGui.QLabel(self.wMain)
self.lStatus.setGeometry(QtCore.QRect(10, 190, 501, 16))
self.lStatus.setText(_fromUtf8(""))
self.lStatus.setObjectName(_fromUtf8("lStatus"))
self.pbB6 = QtGui.QPushButton(self.wMain)
self.pbB6.setGeometry(QtCore.QRect(80, 40, 61, 61))
self.pbB6.setCheckable(False)
self.pbB6.setObjectName(_fromUtf8("pbB6"))
self.pbB2 = QtGui.QPushButton(self.wMain)
self.pbB2.setGeometry(QtCore.QRect(320, 40, 61, 61))
self.pbB2.setObjectName(_fromUtf8("pbB2"))
self.pbA3 = QtGui.QPushButton(self.wMain)
self.pbA3.setGeometry(QtCore.QRect(200, 120, 61, 61))
self.pbA3.setObjectName(_fromUtf8("pbA3"))
self.pbB4 = QtGui.QPushButton(self.wMain)
self.pbB4.setGeometry(QtCore.QRect(200, 40, 61, 61))
self.pbB4.setObjectName(_fromUtf8("pbB4"))
self.pbA4 = QtGui.QPushButton(self.wMain)
self.pbA4.setGeometry(QtCore.QRect(260, 120, 61, 61))
self.pbA4.setObjectName(_fromUtf8("pbA4"))
self.pbB3 = QtGui.QPushButton(self.wMain)
self.pbB3.setGeometry(QtCore.QRect(260, 40, 61, 61))
self.pbB3.setObjectName(_fromUtf8("pbB3"))
self.wStart = QtGui.QWidget(Dialog)
self.wStart.setGeometry(QtCore.QRect(0, 10, 411, 211))
self.wStart.setObjectName(_fromUtf8("wStart"))
self.tabWidget = QtGui.QTabWidget(self.wStart)
self.tabWidget.setGeometry(QtCore.QRect(10, 10, 391, 191))
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.pbPlayHuman = QtGui.QPushButton(self.tab)
self.pbPlayHuman.setEnabled(True)
self.pbPlayHuman.setGeometry(QtCore.QRect(140, 120, 101, 21))
self.pbPlayHuman.setObjectName(_fromUtf8("pbPlayHuman"))
self.label_2 = QtGui.QLabel(self.tab)
self.label_2.setGeometry(QtCore.QRect(140, 70, 101, 16))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.pbSelectFile = QtGui.QPushButton(self.tab)
self.pbSelectFile.setGeometry(QtCore.QRect(140, 20, 101, 23))
self.pbSelectFile.setObjectName(_fromUtf8("pbSelectFile"))
self.cbTime = QtGui.QComboBox(self.tab)
self.cbTime.setGeometry(QtCore.QRect(140, 90, 101, 20))
self.cbTime.setObjectName(_fromUtf8("cbTime"))
self.cbTime.addItem(_fromUtf8(""))
self.cbTime.addItem(_fromUtf8(""))
self.cbTime.addItem(_fromUtf8(""))
self.cbTime.addItem(_fromUtf8(""))
self.cbTime.addItem(_fromUtf8(""))
self.lbFile = QtGui.QLabel(self.tab)
self.lbFile.setGeometry(QtCore.QRect(140, 50, 101, 16))
self.lbFile.setObjectName(_fromUtf8("lbFile"))
self.tabWidget.addTab(self.tab, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.pbCreate = QtGui.QPushButton(self.tab_2)
self.pbCreate.setGeometry(QtCore.QRect(220, 20, 75, 23))
self.pbCreate.setObjectName(_fromUtf8("pbCreate"))
self.tbHostName = QtGui.QPlainTextEdit(self.tab_2)
self.tbHostName.setGeometry(QtCore.QRect(20, 20, 191, 25))
self.tbHostName.setObjectName(_fromUtf8("tbHostName"))
self.label_4 = QtGui.QLabel(self.tab_2)
self.label_4.setGeometry(QtCore.QRect(20, 0, 81, 20))
self.label_4.setFrameShape(QtGui.QFrame.NoFrame)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.label_3 = QtGui.QLabel(self.tab_2)
self.label_3.setGeometry(QtCore.QRect(20, 50, 71, 20))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.lvHosts = QtGui.QListWidget(self.tab_2)
self.lvHosts.setGeometry(QtCore.QRect(20, 70, 191, 91))
self.lvHosts.setObjectName(_fromUtf8("lvHosts"))
self.pbGo = QtGui.QPushButton(self.tab_2)
self.pbGo.setGeometry(QtCore.QRect(220, 140, 161, 23))
self.pbGo.setObjectName(_fromUtf8("pbGo"))
self.pbCancel = QtGui.QPushButton(self.tab_2)
self.pbCancel.setGeometry(QtCore.QRect(300, 20, 75, 23))
self.pbCancel.setObjectName(_fromUtf8("pbCancel"))
self.cbInternetOption = QtGui.QComboBox(self.tab_2)
self.cbInternetOption.setGeometry(QtCore.QRect(220, 50, 161, 20))
self.cbInternetOption.setObjectName(_fromUtf8("cbInternetOption"))
self.cbInternetOption.addItem(_fromUtf8(""))
self.cbInternetOption.addItem(_fromUtf8(""))
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.retranslateUi(Dialog)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Kalah - CSE415 - Spring 2014", None))
self.pbB1.setText(_translate("Dialog", "0", None))
self.pbB.setText(_translate("Dialog", "0", None))
self.pbA2.setText(_translate("Dialog", "0", None))
self.pbB5.setText(_translate("Dialog", "0", None))
self.pbA.setText(_translate("Dialog", "0", None))
self.pbA1.setText(_translate("Dialog", "0", None))
self.pbA5.setText(_translate("Dialog", "0", None))
self.pbA6.setText(_translate("Dialog", "0", None))
self.pbB6.setText(_translate("Dialog", "0", None))
self.pbB2.setText(_translate("Dialog", "0", None))
self.pbA3.setText(_translate("Dialog", "0", None))
self.pbB4.setText(_translate("Dialog", "0", None))
self.pbA4.setText(_translate("Dialog", "0", None))
self.pbB3.setText(_translate("Dialog", "0", None))
self.pbPlayHuman.setText(_translate("Dialog", "Play with AI", None))
self.label_2.setText(_translate("Dialog", "Time limit (ms):", None))
self.pbSelectFile.setText(_translate("Dialog", "Open AI File", None))
self.cbTime.setItemText(0, _translate("Dialog", "100", None))
self.cbTime.setItemText(1, _translate("Dialog", "500", None))
self.cbTime.setItemText(2, _translate("Dialog", "1000", None))
self.cbTime.setItemText(3, _translate("Dialog", "2000", None))
self.cbTime.setItemText(4, _translate("Dialog", "5000", None))
self.lbFile.setText(_translate("Dialog", "No file selected", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("Dialog", "Local AI", None))
self.pbCreate.setText(_translate("Dialog", "Create", None))
self.tbHostName.setPlainText(_translate("Dialog", "Game Name", None))
self.label_4.setText(_translate("Dialog", "Create game:", None))
self.label_3.setText(_translate("Dialog", "Or join game:", None))
self.pbGo.setText(_translate("Dialog", "Play with Internet", None))
self.pbCancel.setText(_translate("Dialog", "Cancel", None))
self.cbInternetOption.setItemText(0, _translate("Dialog", "Play with human", None))
self.cbInternetOption.setItemText(1, _translate("Dialog", "Play with AI", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("Dialog", "Internet", None))
|
[
"zhangyang9036@gmail.com"
] |
zhangyang9036@gmail.com
|
185d7e4291d29d014020b6b40ebfb2d8398b5f8c
|
cf444d07d8056416dfba34e73bba128567b7c692
|
/readandloadperfpadbenchasof.py
|
fd9df58a4e0df4d295b906d84dace758c8374d5d
|
[] |
no_license
|
rtstock/scriptsvapp01
|
cf9e993e5253e9a60dc191cca5e34532fa559ee1
|
7c2db888f0dcd92de62c031f9867e1c5cb4cbc0e
|
refs/heads/master
| 2021-01-23T00:29:11.267852
| 2017-03-21T19:24:34
| 2017-03-21T19:24:34
| 85,737,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,756
|
py
|
import ftplib
class perform:
#def __init__(self):
# print 'class initialized...'
# #self.DataFilePathName = []
# #self.BuildFilepaths()
def __init__(self,p_datafilepathname):
print 'initialized readandloadperfpadbenchasof.py'
self.DataFilePathName = p_datafilepathname
self.ReadAndLoad()
print 'exiting readandloadperfpadbenchasof.py'
def set_DataFilePathName(self,DataFilePathName):
self._DataFilePathName = DataFilePathName
def get_DataFilePathName(self):
return self._DataFilePathName
DataFilePathName = property(get_DataFilePathName, set_DataFilePathName)
def set_Results(self,Results):
self._Results = Results
def get_Results(self):
return self._Results
Results = property(get_Results, set_Results)
def xstr(self,s):
try:
return '' if s is None else str(s)
except:
return ''
def ReadAndLoad(self):
procresults = {}
try:
my_datafilepathname = self.DataFilePathName
# get and format the modified date
import os.path, time
print 'got here !', my_datafilepathname
filedatetime = os.path.getmtime(my_datafilepathname)
from datetime import datetime
filedatetime_forsql = datetime.fromtimestamp(filedatetime).strftime('%Y-%m-%d %H:%M:%S')
import bs4, sys
with open(my_datafilepathname, 'r') as f:
webpage = f.read().decode('utf-8')
soup = bs4.BeautifulSoup(webpage, "lxml")
market_index_ext = ''
fieldnames = {}
is_dataline = 0
total_deleted = 0
total_inserted = 0
for node in soup.find_all('th', attrs={}): #'style':'display: table-header-group; mso-number-format:\@;'
if node.attrs['class'][0] in ['HeaderCellNumeric','HeaderCellString']:
fieldnames[len(fieldnames)] = node.string
#print node.string
for nodeA in soup.find_all('tr', attrs={}):
print '-----------------------'
is_dataline = 0
fieldvalues = {}
for nodeB in nodeA.find_all('td', attrs={}):
#print 'got here!!'
#print nodeB.attrs['class'][0]
if nodeB.attrs['class'][0] in ['DataCellNumeric','DataCellString']:
#print 'got here!!!'
if fieldnames[len(fieldvalues)] == 'market_index_ext':
is_dataline = 1
market_index_ext = nodeB.string
#print market_index_ext, fieldnames[len(fieldvalues)],'=', nodeB.string
#print fieldnames[len(fieldvalues)]
#print ' ',nodeB.string
fieldvalues[fieldnames[len(fieldvalues)]] = nodeB.string
print 'got here xxxxxx'
if is_dataline == 1:
#print 'got here !@'
fieldnames_string = ''
fieldvalues_string = ''
for k,v in fieldvalues.items():
#print 'fieldvalues:',k, v
if v == None:
goodvaluestring = ''
else:
goodvaluestring = v
print 'fieldvalues:',k, goodvaluestring
fieldnames_string = fieldnames_string + k + ','
fieldvalues_string = fieldvalues_string + "'" + goodvaluestring + "',"
fieldnames_string = fieldnames_string[:-1]
fieldvalues_string = fieldvalues_string[:-1]
print 'fieldnames_string....................'
print fieldnames_string
print 'fieldvalues_string.............................'
print fieldvalues_string
print market_index_ext
#print fieldvalues[fieldnames[0]],fieldvalues[fieldnames[1]],fieldvalues[fieldnames[2]]
import pyodbc
cnxn = pyodbc.connect(r'DRIVER={SQL Server};SERVER=ipc-vsql01;DATABASE=DataAgg;Trusted_Connection=True;')
cursor = cnxn.cursor()
#print 'got here !@'
#sql_delete = "delete from dbo.xanalysisofbenchmarks_padbenchasof_imported where market_node_last_invested_date = ? and market_index_ext = ?", fieldvalues['market_node_last_invested_date'],fieldvalues['market_index_ext']
#print sql_delete
cursor.execute( "delete from dbo.xanalysisofbenchmarks_padbenchasof_imported where market_node_last_invested_date = ? and market_index_ext = ?", fieldvalues['market_node_last_invested_date'],fieldvalues['market_index_ext'] )
total_deleted = total_deleted + cursor.rowcount
print ' ',cursor.rowcount, 'records deleted'
cnxn.commit()
insert_sql = "insert into xanalysisofbenchmarks_padbenchasof_imported("+fieldnames_string+") values ("+fieldvalues_string+")"
#print insert_sql
cursor.execute(insert_sql)
procresults['records inserted'] = cursor.rowcount
total_inserted = total_inserted + cursor.rowcount
print ' ',cursor.rowcount, 'records inserted'
cnxn.commit()
procresults['resultvalue1'] = 'success'
procresults['total_deleted'] = total_deleted
procresults['total_inserted'] = total_inserted
except Exception,e:
print type(e)
print 'there was an error on ' + self.DataFilePathName
self.Results = procresults
if __name__=='__main__':
print 'running ___name___'
myDataFilePathName = r"//Ipc-vsql01/data/Batches/prod/WatchFolder/incoming/PagesOutput_GetPBAsOf_2016-11-30 132015270.xls"
o = perform(myDataFilePathName)
#o.DataFilePathName = r"//Ipc-vsql01/data/Batches/prod/WatchFolder/incoming/PagesOutput_GetPadPortBenchAsOf_20161124_ADAKAT.xls"
#o.ReadAndLoad()
print o.Results
|
[
"noreply@github.com"
] |
noreply@github.com
|
553ab201ccf7e1ce364dc3f6956d2b9849fcd972
|
b341adbf938239c37c7c2cfe1c295ad1396d339b
|
/week06/geojson_worked_example.py
|
562600cf296ecf90f15138f9bdce5102d79e63d1
|
[] |
no_license
|
sunda-y/UsingPythontoAccessWebData
|
c84025f1b724a2c10a5932c9a574ddde0079715f
|
964deed29b531a8a5a79f24d234ae5329b25dd44
|
refs/heads/master
| 2021-01-24T08:11:54.277570
| 2018-03-04T15:37:27
| 2018-03-04T15:37:27
| 122,972,524
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
import urllib.request, urllib.parse, urllib.error
import json
serviceurl = "http://maps.googleapis.com/maps/api/geocode/json?"
while True:
address = input("Please enter location: ")
if (len(address) < 1):
break
url = serviceurl + urllib.parse.urlencode(
{"address": address}
)
print("Retrieving:", url)
data = urllib.request.urlopen(url).read().decode()
print("Retrieving", len(data), "characters")
try:
js = json.loads(data)
except:
js = None
if js is None or "status" not in js or js["status"] != "OK":
print("Retrieving failed")
print(js)
continue
print(json.dumps(js, indent=4))
lat = js["results"][0]["geometry"]["location"]["lat"]
lng = js["results"][0]["geometry"]["location"]["lng"]
print("lat:", lat, "lng:", lng)
location = js["results"][0]["formatted_address"]
print("location:", location)
|
[
"SundaySunxiran@gmail.com"
] |
SundaySunxiran@gmail.com
|
a1f08199e9d65120982277c5b73430048437c363
|
e751c59ca3c98c8f6a98b7c6fc7167fe615aa1b0
|
/streamz/orderedweakset.py
|
82136ecdea1138314b8cd2277154f13f468712af
|
[
"BSD-3-Clause"
] |
permissive
|
yutiansut/streamz
|
a10e0d2beefd450b5d19cb7d78b4c8a333ebcd48
|
e51f0397d27957f8b3bfc78ecdb946cbfbac21b6
|
refs/heads/master
| 2020-07-10T15:23:35.567092
| 2019-12-24T07:07:43
| 2019-12-24T07:07:43
| 204,297,562
| 1
| 0
|
NOASSERTION
| 2019-12-24T07:07:44
| 2019-08-25T13:24:35
|
Python
|
UTF-8
|
Python
| false
| false
| 977
|
py
|
# -*- coding: utf8 -*-
# This is a copy from Stack Overflow
# https://stackoverflow.com/questions/7828444/indexable-weak-ordered-set-in-python
# Asked by Neil G https://stackoverflow.com/users/99989/neil-g
# Answered/edited by https://stackoverflow.com/users/1001643/raymond-hettinger
import collections
import weakref
class OrderedSet(collections.MutableSet):
def __init__(self, values=()):
self._od = collections.OrderedDict().fromkeys(values)
def __len__(self):
return len(self._od)
def __iter__(self):
return iter(self._od)
def __contains__(self, value):
return value in self._od
def add(self, value):
self._od[value] = None
def discard(self, value):
self._od.pop(value, None)
class OrderedWeakrefSet(weakref.WeakSet):
def __init__(self, values=()):
super(OrderedWeakrefSet, self).__init__()
self.data = OrderedSet()
for elem in values:
self.add(elem)
|
[
"mrocklin@gmail.com"
] |
mrocklin@gmail.com
|
04a6fc2e76234c2cc09fd5a634d68cf9561700ee
|
9cd8c0e01d6197c77b15d24ea46be2c4c5ea2e73
|
/src/justchat/urls.py
|
0b81a21cf0a8ec353944f20feceae33376d0644b
|
[
"MIT"
] |
permissive
|
vaibhav0000patel/Chat-Application-Django-Channels
|
6a0ecfb7b80505ed452ac2e8336aa2d3adbcf72d
|
7470ecf8e22951e72081d344d8123e768a75b0b0
|
refs/heads/main
| 2023-03-06T20:13:15.323707
| 2021-02-23T14:08:17
| 2021-02-23T14:08:17
| 341,572,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('chat/', include('chat.urls',namespace='chat')),
]
|
[
"vaibhav0000patel@gmail.com"
] |
vaibhav0000patel@gmail.com
|
275a0a4f256f1d54872f57ef1f6b4f51287f4070
|
38a868d3a4605e32739efe49343642c1bb84d924
|
/data_loader/unet_data_loader.py
|
4e69011b1a973a87589d5655d3b556dc71b29f19
|
[] |
no_license
|
ferraric/Semantic-Segmentation-of-Histopathological-Slides
|
c505c7f9e0710f04f23b50b0a3d563c979e1bef6
|
5461ec858c875f1e5463193466c82c48358cba9c
|
refs/heads/master
| 2023-07-03T15:33:56.048279
| 2021-07-30T11:18:13
| 2021-07-30T11:18:13
| 391,037,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,264
|
py
|
import tensorflow as tf
import os
import math
import numpy as np
from PIL import Image
AUTOTUNE = tf.data.experimental.AUTOTUNE
class UnetDataLoader:
def __init__(self, config, validation=False, preprocessing=None, use_image_augmentations=False):
self.use_image_augmentations = use_image_augmentations
self.preprocessing = preprocessing
self.config = config
if (validation):
dataset_path = config.validation_dataset_path
else:
dataset_path = config.train_dataset_path
# create list of image and annotation paths
all_files = os.listdir(dataset_path)
self.slide_paths = []
self.annotation_paths = []
for file in all_files:
if "slide" in file:
self.slide_paths.append(os.path.join(dataset_path, file))
elif "annotation" in file:
self.annotation_paths.append(os.path.join(dataset_path, file))
self.slide_paths.sort()
self.annotation_paths.sort()
self.image_count = len(self.slide_paths)
annotation_count = len(self.annotation_paths)
assert self.image_count == annotation_count, (
"The image count is {} and the annotation count is {}, but they should be"
"equal".format(self.image_count, annotation_count)
)
for i, slide_path in enumerate(self.slide_paths):
slide_name = os.path.split(slide_path)[1]
annotation_name = os.path.split(self.annotation_paths[i])[1]
assert slide_name.replace("slide", "") == annotation_name.replace(
"annotation", ""
), (
"Path names of slide {} and annotation {}"
"do not match".format(slide_name, annotation_name)
)
print("We found {} images and annotations".format(self.image_count))
dataset = tf.data.Dataset.from_tensor_slices({
'image_paths': self.slide_paths,
'labels': self.annotation_paths
})
dataset = dataset.map(lambda x: (
tf.py_function(self.parse_image_and_label, [x['image_paths'], x['labels'], False],
[tf.float32, tf.float32])))
dataset = dataset.map(self._fixup_shape)
if (validation):
self.dataset = dataset.repeat(-1).batch(self.config.batch_size)
else:
self.dataset = dataset.shuffle(buffer_size=self.config.shuffle_buffer_size).repeat(-1).batch(
self.config.batch_size)
def __len__(self):
return math.ceil(self.image_count / self.config.batch_size)
def parse_image_and_label(self, image, label, is_norwegian_data):
image_path = image.numpy().decode('UTF-8')
label_path = label.numpy().decode('UTF-8')
image_path_tensor = tf.io.read_file(image_path)
img = tf.image.decode_png(image_path_tensor, channels=3)
# Load image with Pillow to make sure we lod it in palette mode.
label = np.expand_dims(np.array(Image.open(label_path)), -1)
assert label.shape[2] == 1, "label should have 1 channel but has {}".format(label.shape[2])
if (is_norwegian_data):
# somehow the anotations are loaded as 0 and 255 instead of 0 and 1, thus we just divide by 255
label = np.divide(label, 255)
label = tf.keras.utils.to_categorical(label, num_classes=self.config.number_of_classes)
assert img.shape[2] == 3, "image should have 3 channels but has {}".format(img.shape[2])
assert label.shape[2] == self.config.number_of_classes, "label should have {} channels but has {}".format(
self.config.number_of_classes, label.shape[2])
img = tf.cast(img, tf.float32)
label = tf.cast(label, tf.float32)
if self.use_image_augmentations:
n_rotations = np.random.choice(4)
img = tf.image.rot90(img, n_rotations)
label = tf.image.rot90(label, n_rotations)
if (np.random.rand(1) > 0.5):
img = tf.image.flip_left_right(img)
label = tf.image.flip_left_right(label)
if (np.random.rand(1) > 0.5):
img = tf.image.flip_up_down(img)
label = tf.image.flip_up_down(label)
if self.preprocessing:
img = self.preprocessing(img)
return img, label
def _fixup_shape(self, images, labels):
images.set_shape([None, None, 3])
labels.set_shape([None, None, self.config.number_of_classes])
return images, labels
class NorwayUnetDataLoader(UnetDataLoader):
def __init__(self, config, validation=False, preprocessing=None, use_image_augmentations=False):
self.use_image_augmentations = use_image_augmentations
self.preprocessing = preprocessing
self.config = config
if (validation):
dataset_path = config.validation_dataset_path
print("Validating on the path {}".format(dataset_path))
else:
dataset_path = config.train_dataset_path
print("Training on the path {}".format(dataset_path))
# create list of image and annotation paths
self.slide_paths = []
self.annotation_paths = []
for slide_file in os.listdir(os.path.join(dataset_path, "patches")):
self.slide_paths.append(os.path.join(os.path.join(dataset_path, "patches"), slide_file))
for annotation_file in os.listdir(os.path.join(dataset_path, "annotations")):
self.annotation_paths.append(os.path.join(os.path.join(dataset_path, "annotations"), annotation_file))
self.slide_paths.sort()
self.annotation_paths.sort()
self.image_count = len(self.slide_paths)
annotation_count = len(self.annotation_paths)
assert self.image_count == annotation_count, (
"The image count is {} and the annotation count is {}, but they should be"
"equal".format(self.image_count, annotation_count)
)
for i, slide_path in enumerate(self.slide_paths):
slide_name = os.path.split(slide_path)[1]
annotation_name = os.path.split(self.annotation_paths[i])[1]
assert slide_name.replace("image", "") == annotation_name.replace(
"annotation", ""
), (
"Path names of slide {} and annotation {}"
"do not match".format(slide_name, annotation_name)
)
print("We found {} images and annotations".format(self.image_count))
dataset = tf.data.Dataset.from_tensor_slices({
'image_paths': self.slide_paths,
'labels': self.annotation_paths
})
dataset = dataset.map(lambda x: (
tf.py_function(self.parse_image_and_label, [x['image_paths'], x['labels'], True],
[tf.float32, tf.float32])))
dataset = dataset.map(self._fixup_shape)
if (validation):
self.dataset = dataset.repeat(-1).batch(self.config.batch_size, drop_remainder=True)
else:
self.dataset = dataset.shuffle(buffer_size=self.config.shuffle_buffer_size).repeat(-1).batch(
self.config.batch_size, drop_remainder=True)
|
[
"ferraric@student.ethz.ch"
] |
ferraric@student.ethz.ch
|
8efe16a04e801c369c14cae73a57768ce018fca8
|
7ada9e1ede668f402ae9598bc1e5c3caa17658d5
|
/app.py
|
8a289bef4395897b72277d2ebdd8646b27963cf4
|
[] |
no_license
|
nikolaskarta/Information_Systems_Project1
|
a2db2e838a824c2b0b62a340a782bd20b9ba9c5c
|
d6f325c73af0a3b835827da07ed78941bd36e100
|
refs/heads/main
| 2023-05-05T09:14:22.117293
| 2021-05-16T20:43:26
| 2021-05-16T20:43:26
| 367,971,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,748
|
py
|
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
from flask import Flask, request, jsonify, redirect, Response
import json
from bson import json_util
import uuid
import time
# Connect to our local MongoDB
client = MongoClient('mongodb://localhost:27017/')
# Choose database
db = client['InfoSys']
# Choose collections
students = db['Students']
users = db['Users']
# Initiate Flask App
app = Flask(__name__)
users_sessions = {}
def create_session(username):
user_uuid = str(uuid.uuid1())
users_sessions[user_uuid] = (username, time.time())
return user_uuid
def is_session_valid(user_uuid):
return user_uuid in users_sessions
# ΕΡΩΤΗΜΑ 1: Δημιουργία χρήστη
@app.route('/createUser', methods=['POST'])
def create_user():
# Request JSON data
data = None
try:
data = json.loads(request.data)
except Exception as e:
return Response("bad json content",status=500,mimetype='application/json')
if data == None:
return Response("bad request",status=500,mimetype='application/json')
if not "username" in data or not "password" in data:
return Response("Information incomplete",status=500,mimetype="application/json")
if users.find({"username":f"{data['username']}"}).count() == 0 :
users.insert({"username":f"{data['username']}", "password":f"{data['password']}"})
return Response(data['username']+" was added to the MongoDB", mimetype='application/json', status=200)
else:
return Response("A user with the given email already exists", mimetype='application/json', status=400)
# ΕΡΩΤΗΜΑ 2: Login στο σύστημα
@app.route('/login', methods=['POST'])
def login():
# Request JSON data
data = None
try:
data = json.loads(request.data)
except Exception as e:
return Response("bad json content",status=500,mimetype='application/json')
if data == None:
return Response("bad request",status=500,mimetype='application/json')
if not "username" in data or not "password" in data:
return Response("Information incomplete",status=500,mimetype="application/json")
if (users.find({"username":f"{data['username']}","password":f"{data['password']}"}).count() == 1):
user_uuid=create_session(data['username'])
res = {"uuid": user_uuid, "username": data['username']}
return Response(json.dumps(res), mimetype='application/json', status=200)
else:
return Response("Wrong username or password.",mimetype='application/json', status=400)
# ΕΡΩΤΗΜΑ 3: Επιστροφή φοιτητή βάσει email
@app.route('/getStudent', methods=['GET'])
def get_student():
# Request JSON data
data = None
try:
data = json.loads(request.data)
except Exception as e:
return Response("bad json content",status=500,mimetype='application/json')
if data == None:
return Response("bad request",status=500,mimetype='application/json')
if not "email" in data:
return Response("Information incomplete",status=500,mimetype="application/json")
uuid = request.headers.get('authorization')
if is_session_valid(uuid):
studentcursor = students.find_one({"email" : f"{data['email']}"})
student = json.loads(json_util.dumps(studentcursor))
if (student):
return Response(json.dumps(student), status=200, mimetype='application/json')
else:
return Response("Student not found", status=400, mimetype="application/json")
else:
return Response("Not authorized", status=401, mimetype='application/json')
# ΕΡΩΤΗΜΑ 4: Επιστροφή όλων των φοιτητών που είναι 30 ετών
@app.route('/getStudents/thirties', methods=['GET'])
def get_students_thirty():
thirtiescursor = students.find({"yearOfBirth": 1991})
Students = json.loads(json_util.dumps(thirtiescursor))
uuid = request.headers.get('authorization')
if is_session_valid(uuid):
if Students:
return Response(json_util.dumps(Students), status=200, mimetype='application/json')
else:
return Response("No students found.", status=400, mimetype="application/json")
else:
return Response("Not authorized", status=401, mimetype='application/json')
# ΕΡΩΤΗΜΑ 5: Επιστροφή όλων των φοιτητών που είναι τουλάχιστον 30 ετών
@app.route('/getStudents/oldies', methods=['GET'])
def get_students_oldies():
oldiescursor = students.find({"yearOfBirth": {'$lte' : 1991}})
Students = json.loads(json_util.dumps(oldiescursor))
uuid = request.headers.get('authorization')
if is_session_valid(uuid):
if Students:
return Response(json_util.dumps(Students), status=200, mimetype='application/json')
else:
return Response("No students found.", status=400, mimetype="application/json")
else:
return Response("Not authorized", status=401, mimetype='application/json')
# ΕΡΩΤΗΜΑ 6: Επιστροφή φοιτητή που έχει δηλώσει κατοικία βάσει email
@app.route('/getStudentAddress', methods=['GET'])
def get_studentAddress():
# Request JSON data
data = None
try:
data = json.loads(request.data)
except Exception as e:
return Response("bad json content",status=500,mimetype='application/json')
if data == None:
return Response("bad request",status=500,mimetype='application/json')
if not "email" in data:
return Response("Information incomplete",status=500,mimetype="application/json")
uuid = request.headers.get('authorization')
if is_session_valid(uuid):
addresscursor = students.find_one({"email" : f"{data['email']}", "address": {"$exists": 1}})
addressdict = json.loads(json_util.dumps(addresscursor))
if addressdict:
student = {
"name" : addressdict['name'],
"street": addressdict['address'][0]['street'],
"postcode": addressdict['address'][0]['postcode']
}
return Response(json.dumps(student), status=200, mimetype='application/json')
else:
return Response("No student found", status=400, mimetype="application/json")
else:
return Response("Not authorized", status=401, mimetype='application/json')
# ΕΡΩΤΗΜΑ 7: Διαγραφή φοιτητή βάσει email
@app.route('/deleteStudent', methods=['DELETE'])
def delete_student():
# Request JSON data
data = None
try:
data = json.loads(request.data)
except Exception as e:
return Response("bad json content",status=500,mimetype='application/json')
if data == None:
return Response("bad request",status=500,mimetype='application/json')
if not "email" in data:
return Response("Information incomplete",status=500,mimetype="application/json")
uuid = request.headers.get('authorization')
if is_session_valid(uuid):
delcursor = students.find_one({"email" : f"{data['email']}"})
deldict = json.loads(json_util.dumps(delcursor))
if deldict:
students.delete_one({"email" : f"{data['email']}"})
msg = deldict['name'] + "was deleted."
return Response(msg, status=200, mimetype='application/json')
else:
msg = "Student not found"
return Response(msg, status=400, mimetype="application/json")
else:
return Response("Not authorized", status=401, mimetype='application/json')
# ΕΡΩΤΗΜΑ 8: Εισαγωγή μαθημάτων σε φοιτητή βάσει email
@app.route('/addCourses', methods=['PATCH'])
def add_courses():
# Request JSON data
data = None
try:
data = json.loads(request.data)
except Exception as e:
return Response("bad json content",status=500,mimetype='application/json')
if data == None:
return Response("bad request",status=500,mimetype='application/json')
if not "email" in data or not "courses" in data:
return Response("Information incomplete",status=500,mimetype="application/json")
uuid = request.headers.get('authorization')
if is_session_valid(uuid):
courses = data['courses']
query = {"email": f"{data['email']}"}
newvalues = { "$set": { "courses": f"{courses}" } }
if (students.find({"email" : f"{data['email']}"}).count()==1):
students.update_one(query, newvalues)
return Response("Courses added succesfully", status=200, mimetype='application/json')
else:
return Response("Student not found", status=400, mimetype="application/json")
else:
return Response("Not authorized", status=401, mimetype='application/json')
# ΕΡΩΤΗΜΑ 9: Επιστροφή περασμένων μαθημάτων φοιτητή βάσει email
@app.route('/getPassedCourses', methods=['GET'])
def get_courses():
# Request JSON data
data = None
try:
data = json.loads(request.data)
except Exception as e:
return Response("bad json content",status=500,mimetype='application/json')
if data == None:
return Response("bad request",status=500,mimetype='application/json')
if not "email" in data:
return Response("Information incomplete",status=500,mimetype="application/json")
uuid = request.headers.get('authorization')
if is_session_valid(uuid):
coursescursor = students.find_one({"email" : f"{data['email']}", "courses": {"$exists": 1}})
coursesdict = json.loads(json_util.dumps(coursescursor))
student = {}
if coursesdict:
student = coursesdict['courses']
return Response(json.dumps(student), status=200, mimetype='application/json')
else:
return Response("No student found", status=400, mimetype="application/json")
else:
return Response("Not authorized", status=401, mimetype='application/json')
# Εκτέλεση flask service σε debug mode, στην port 5000.
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
|
[
"noreply@github.com"
] |
noreply@github.com
|
ffc7cf8cd00d31e3bf1693cd8ac482e0f6bdc78e
|
7dab00e63b7193010344a0f05e0cc641d7091f5f
|
/students/Zhengtang_Yang/lesson07/Activity/populate_job.py
|
b7cce248e766d043f66bf5f3457c618ce6bd910c
|
[] |
no_license
|
aurel1212/Sp2018-Online
|
9307e872c14c5ddd795bdc738b325de087895d55
|
263685ca90110609bfd05d621516727f8cd0028f
|
refs/heads/master
| 2020-04-05T18:35:49.761140
| 2018-06-19T18:24:27
| 2018-06-19T18:24:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,124
|
py
|
"""
Learning persistence with Peewee and sqlite
delete the database file to start over
(but running this program does not require it)
populate the DB with data
"""
from peewee import *
from v00_personjob_model import Person, Job, Department
import logging
def populate_db():
"""
add job data to database
"""
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
database = SqliteDatabase('../data/personjob.db')
logger.info('Working with Job class')
logger.info('Creating Job records: just like Person. We use the foreign key')
job_name = 0
start_date = 1
end_date = 2
salary = 3
person_employed = 4
department = 5
jobs = [
('Analyst', '2001-09-22', '2003-01-30',65500, 'Andrew','A101'),
('Senior analyst', '2003-02-01', '2006-10-22', 70000, 'Andrew','A201'),
('Senior business analyst', '2006-10-23', '2016-12-24', 80000, 'Andrew','A301'),
('Admin supervisor', '2012-10-01', '2014-11,10', 45900, 'Peter','401'),
('Admin manager', '2014-11-14', '2018-01,05', 45900, 'Peter','501')
]
try:
database.connect()
database.execute_sql('PRAGMA foreign_keys = ON;')
for job in jobs:
with database.transaction():
new_job = Job.create(
job_name = job[job_name],
start_date = job[start_date],
end_date = job[end_date],
salary = job[salary],
person_employed = job[person_employed],
department = job[department])
new_job.save()
logger.info('Reading and print all Job rows (note the value of person)...')
for job in Job:
logger.info(f'{job.job_name} : {job.start_date} to {job.end_date} for {job.person_employed}')
except Exception as e:
logger.info(f'Error creating = {job[job_name]}')
logger.info(e)
finally:
logger.info('database closes')
database.close()
if __name__ == '__main__':
populate_db()
|
[
"zyang888@uw.edu"
] |
zyang888@uw.edu
|
912732738030d84355aa57768facc7293bf43a88
|
187a6558f3c7cb6234164677a2bda2e73c26eaaf
|
/jdcloud_sdk/services/clouddnsservice/models/HostRRlb.py
|
a70c1c23618e54df92e3c0396719c63ee09311d7
|
[
"Apache-2.0"
] |
permissive
|
jdcloud-api/jdcloud-sdk-python
|
4d2db584acc2620b7a866af82d21658cdd7cc227
|
3d1c50ed9117304d3b77a21babe899f939ae91cd
|
refs/heads/master
| 2023-09-04T02:51:08.335168
| 2023-08-30T12:00:25
| 2023-08-30T12:00:25
| 126,276,169
| 18
| 36
|
Apache-2.0
| 2023-09-07T06:54:49
| 2018-03-22T03:47:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class HostRRlb(object):
def __init__(self, hostValue=None, id=None, weight=None, rate=None):
"""
:param hostValue: (Optional) 解析记录的值
:param id: (Optional) 解析记录的ID
:param weight: (Optional) 解析记录的权重
:param rate: (Optional) 此条记录在总均衡中的比率的100倍
"""
self.hostValue = hostValue
self.id = id
self.weight = weight
self.rate = rate
|
[
"oulinbao@jd.com"
] |
oulinbao@jd.com
|
4599ace703ad37d50d4e3e1ee36e57491d12aebf
|
6b5b3396ebf5c1af4f6f14cbeb03fc9bc4570a23
|
/www/Webserver.py
|
f703d55dccf3102a6d3ac6083df3373cec870017
|
[
"CC-BY-3.0",
"OFL-1.1",
"MIT"
] |
permissive
|
william-stearns/E_ink_dashboard
|
0f5b03ec2da1a54a5941a35f2b957b7e3d62abb0
|
1625b213baef336833497c4593157485cfffdad4
|
refs/heads/main
| 2023-02-03T21:20:00.375604
| 2020-12-13T10:36:02
| 2020-12-13T10:36:02
| 323,455,960
| 0
| 0
|
MIT
| 2020-12-21T21:52:08
| 2020-12-21T21:52:07
| null |
UTF-8
|
Python
| false
| false
| 9,334
|
py
|
from flask import Flask, render_template, request
from dashboard_forms import Dashform
#import create_pickle as p_j
import json
import os
app = Flask(__name__)
app.secret_key = 'dash_flask_key'
creddir = os.path.join(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))), 'credentials/dash_id.json')
# creddir_2 = os.path.join(os.path.dirname(
# os.path.dirname(os.path.realpath(__file__))), 'credentials')
tempdir = os.path.join(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))), 'www/templates/dash_id_template.json')
def Convert(string):
li = list(string.split(","))
k = []
for i in li:
str(i).replace(' ', '')
k.append(i)
return k
def formatting(string):
string = string.replace("[", "")
string = string.replace("]", "")
string = string.replace("'", "")
string = string.replace(" ", "")
return string
def json_exists(file_name):
return os.path.exists(file_name)
def getinfo():
data = []
if json_exists(creddir):
with open(creddir, "r") as rdash_id:
data = json.load(rdash_id)
return data
else:
with open(tempdir, "r") as f1, open(creddir, "w+") as f2:
f2.write(f1.read())
f2.close
with open(creddir, "r") as rdash_id:
data = json.load(rdash_id)
return data
def save_json(res):
with open(creddir, 'r') as f:
data = json.load(f)
data["Transit"]["T_URL"] = res["T_URL"]
data["Transit"]["T_API_KEY"] = res["T_API_KEY"]
data["Transit"]["Stops"] = Convert(res["Stops"])
data["Transit"]["T_BUS"] = res["T_BUS"]
data["Transit"]["T_BUS_TIME"] = res["T_BUS_TIME"]
data["Weather"]["W_URL"] = res["W_URL"]
data["Weather"]["UNITS"] = res["UNITS"]
data["Weather"]["W_API_KEY"] = res["W_API_KEY"]
data["Geolocation"]["G_URL"] = res["G_URL"]
data["Geolocation"]["G_API_KEY"] = res["G_API_KEY"]
data["Currency"]["C_URL_1"] = res["C_URL_1"]
data["Currency"]["C_API_KEY_1"] = res["C_API_KEY_1"]
data["Currency"]["C_URL_3"] = res["C_URL_3"]
data["Currency"]["C_URL_4"] = res["C_URL_4"]
data["Currency"]["CURR_CHECK"] = Convert(res["CURR_CHECK"])
data["Stocks"]["STOCK_W_URL"] = res["STOCK_W_URL"]
data["Stocks"]["STOCK_WE_URL"] = res["STOCK_WE_URL"]
data["Stocks"]["STOCK_API"] = res["STOCK_API"]
data["Stocks"]["STOCK_CHECK"] = Convert(res["STOCK_CHECK"])
data["Tasklist"]["gsheet_json"] = res["gsheet_json"]
data["Tasklist"]["sheetname"] = res["sheetname"]
data["G_Meetings"]["CREDENTIALS_FILE"] = res["CREDENTIALS_FILE"]
data["News"]["NEWS_URL"] = res["NEWS_URL"]
data["News"]["NEWS_API"] = res["NEWS_API"]
data["News"]["NEWS_SOURCES"] = str(res["NEWS_SOURCES"]).replace(' ', '')
data["System"]["waking_time"] = res["waking_time"]
data["System"]["sleeping_time"] = res["sleeping_time"]
data["System"]["mod_1_choice"] = res["mod_1_choice"]
data["System"]["mod_2_choice"] = res["mod_2_choice"]
data["System"]["mod_3_choice"] = res["mod_3_choice"]
data["System"]["mod_4_choice"] = res["mod_4_choice"]
data["System"]["refresh_time"] = res["refresh_time"]
data["System"]["awake"] = res["awake"]
os.remove(creddir)
with open(creddir, 'w+') as f:
json.dump(data, f, indent=4)
@ app.route('/', methods=['POST', 'GET'])
def login():
form = Dashform()
d_data = getinfo()
form.res_msg.label = ""
if request.method == 'POST':
form.res_msg.label = ""
if request.form['btn'] == 'Submit':
results = request.form
save_json(results)
form.res_msg.label = "Information saved successfully"
'''elif request.form['btn'] == 'Generate Pickle File':
results = request.form
p_j.get_calendar_service(results["CREDENTIALS_FILE"], creddir_2)
'''
d_data = getinfo()
form.T_URL.data = str(d_data["Transit"]["T_URL"])
form.T_API_KEY.data = str(d_data["Transit"]["T_API_KEY"])
form.Stops.data = formatting(str(d_data["Transit"]["Stops"]))
form.T_BUS.data = str(d_data["Transit"]["T_BUS"])
form.T_BUS_TIME.data = str(d_data["Transit"]["T_BUS_TIME"])
form.W_URL.data = str(d_data["Weather"]["W_URL"])
form.W_API_KEY.data = str(d_data["Weather"]["W_API_KEY"])
form.UNITS.data = str(d_data["Weather"]["UNITS"])
form.C_URL_1.data = str(d_data["Currency"]["C_URL_1"])
form.C_API_KEY_1.data = str(d_data["Currency"]["C_API_KEY_1"])
form.C_URL_3.data = str(d_data["Currency"]["C_URL_3"])
form.C_URL_4.data = str(d_data["Currency"]["C_URL_4"])
form.CURR_CHECK.data = formatting(str(d_data["Currency"]["CURR_CHECK"]))
form.STOCK_W_URL.data = str(d_data["Stocks"]["STOCK_W_URL"])
form.STOCK_WE_URL.data = str(d_data["Stocks"]["STOCK_WE_URL"])
form.STOCK_API.data = str(d_data["Stocks"]["STOCK_API"])
form.STOCK_CHECK.data = formatting(str(d_data["Stocks"]["STOCK_CHECK"]))
form.G_URL.data = str(d_data["Geolocation"]["G_URL"])
form.G_API_KEY.data = str(d_data["Geolocation"]["G_API_KEY"])
form.gsheet_json.data = str(d_data["Tasklist"]["gsheet_json"])
form.sheetname.data = str(d_data["Tasklist"]["sheetname"])
form.CREDENTIALS_FILE.data = str(d_data["G_Meetings"]["CREDENTIALS_FILE"])
form.NEWS_URL.data = str(d_data["News"]["NEWS_URL"])
form.NEWS_API.data = str(d_data["News"]["NEWS_API"])
form.NEWS_SOURCES.data = formatting(str(d_data["News"]["NEWS_SOURCES"]))
form.waking_time.data = str(d_data["System"]["waking_time"])
form.sleeping_time.data = str(d_data["System"]["sleeping_time"])
form.mod_1_choice.data = str(d_data["System"]["mod_1_choice"])
form.mod_2_choice.data = str(d_data["System"]["mod_2_choice"])
form.mod_3_choice.data = str(d_data["System"]["mod_3_choice"])
form.mod_4_choice.data = str(d_data["System"]["mod_4_choice"])
form.refresh_time.data = str(d_data["System"]["refresh_time"])
form.awake.data = str(d_data["System"]["awake"])
return render_template('Settings.html', form=form)
elif request.method == 'GET':
# populate the form on start
d_data = getinfo()
form.res_msg.label = ""
form.T_URL.data = str(d_data["Transit"]["T_URL"])
form.T_API_KEY.data = str(d_data["Transit"]["T_API_KEY"])
form.Stops.data = formatting(str(d_data["Transit"]["Stops"]))
form.T_BUS.data = str(d_data["Transit"]["T_BUS"])
form.T_BUS_TIME.data = str(d_data["Transit"]["T_BUS_TIME"])
form.W_URL.data = str(d_data["Weather"]["W_URL"])
form.W_API_KEY.data = str(d_data["Weather"]["W_API_KEY"])
form.UNITS.data = str(d_data["Weather"]["UNITS"])
form.C_URL_1.data = str(d_data["Currency"]["C_URL_1"])
form.C_API_KEY_1.data = str(d_data["Currency"]["C_API_KEY_1"])
form.C_URL_3.data = str(d_data["Currency"]["C_URL_3"])
form.C_URL_4.data = str(d_data["Currency"]["C_URL_4"])
form.CURR_CHECK.data = formatting(str(d_data["Currency"]["CURR_CHECK"]))
form.STOCK_W_URL.data = str(d_data["Stocks"]["STOCK_W_URL"])
form.STOCK_WE_URL.data = str(d_data["Stocks"]["STOCK_WE_URL"])
form.STOCK_API.data = str(d_data["Stocks"]["STOCK_API"])
form.STOCK_CHECK.data = formatting(str(d_data["Stocks"]["STOCK_CHECK"]))
form.G_URL.data = str(d_data["Geolocation"]["G_URL"])
form.G_API_KEY.data = str(d_data["Geolocation"]["G_API_KEY"])
form.gsheet_json.data = str(d_data["Tasklist"]["gsheet_json"])
form.sheetname.data = str(d_data["Tasklist"]["sheetname"])
form.CREDENTIALS_FILE.data = str(d_data["G_Meetings"]["CREDENTIALS_FILE"])
form.NEWS_URL.data = str(d_data["News"]["NEWS_URL"])
form.NEWS_API.data = str(d_data["News"]["NEWS_API"])
form.NEWS_SOURCES.data = formatting(str(d_data["News"]["NEWS_SOURCES"]))
form.waking_time.data = str(d_data["System"]["waking_time"])
form.sleeping_time.data = str(d_data["System"]["sleeping_time"])
form.mod_1_choice.data = str(d_data["System"]["mod_1_choice"])
form.mod_2_choice.data = str(d_data["System"]["mod_2_choice"])
form.mod_3_choice.data = str(d_data["System"]["mod_3_choice"])
form.mod_4_choice.data = str(d_data["System"]["mod_4_choice"])
form.refresh_time.data = str(d_data["System"]["refresh_time"])
form.awake.data = str(d_data["System"]["awake"])
return render_template('Settings.html', form=form)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@ app.route('/shutdown', methods=['GET'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
[
"noreply@github.com"
] |
noreply@github.com
|
308b48444a600edc33da2bb114ac11b915fe08b0
|
07009e377963302aaebb44861d1e96c4de359b7c
|
/maio/management/commands/maio_getimages.py
|
cb4597b04599b7b0001a3706b628a8d80a9e8bc4
|
[
"MIT"
] |
permissive
|
jonmsawyer/maio_old
|
3489307ecb02b54317ec7e88554b0acb973434ec
|
3670d0fe2a4e9173ac3d02fe7b629f563e935894
|
refs/heads/master
| 2021-06-21T09:00:18.703397
| 2017-07-08T20:54:06
| 2017-07-08T20:54:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,306
|
py
|
import os
import sys
import hashlib
import magic
from PIL import Image
from django.core.management.base import CommandError
import django.db.utils
from maio_core.models import File
from maio.management.commands._base import MaioBaseCommand
class Command(MaioBaseCommand):
args = '<None>'
help = 'Generates a pseudorandom SECRET_KEY for use in conf/site_settings.py'
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('directory', nargs='+', type=str)
def handle(self, *args, **kwargs):
from django.conf import settings
BASE_DIR = settings.BASE_DIR
MAIO_SETTINGS = settings.MAIO_SETTINGS
mimetype_extension = {
'image': {
'image/gif': '.gif',
'image/jpeg': '.jpg',
'image/pjpeg': '.jpg',
'image/png': '.png',
'image/svg+xml': '.svg',
'image/tiff': '.tiff',
'image/bmp': '.bmp',
'image/x-windows-bmp': '.bmp',
'image/x-tiff': '.tiff',
}
}
def usage():
self.out("Usage:")
self.out("")
self.out("%s DIR" % (sys.argv[0],))
self.out("")
self.out(" DIR")
self.out(" The directory to recursively walk for images to store in the database.")
self.out("")
def mk_md5_dir(md5, root):
if len(md5) == 32:
part1 = md5[0:2]
part2 = md5[2:4]
part3 = md5[4:6]
dirtomake = os.path.join(root, part1, part2, part3)
if os.path.isdir(dirtomake):
return dirtomake
if os.path.isdir(root):
os.makedirs(dirtomake)
return dirtomake
def is_image(mimetype):
for key, value in mimetype_extension['image'].iteritems():
if mimetype == key:
return True
return False
#directory = sys.argv[1]
directory = kwargs.get('directory', '')
self.out('Directory is:', str(directory))
if len(directory) == 0:
self.out("Please provide a directory to recursively walk for pictures.")
self.out("")
usage()
exit(1)
if not os.path.isdir(directory[0]):
self.out("\"%s\" is not a valid directory." % (directory,))
self.out("")
usage()
exit(1)
mime = magic.Magic(mime=True)
for root, subdirs, files in os.walk(directory[0]):
for filename in files:
try:
file_path = os.path.join(root, filename).decode('utf-8')
except UnicodeDecodeError as e:
if "'utf8' codec can't decode bytes" in str(e):
self.out("Error processing %s, unreadable file name ..." % (os.path.join(root, filename),))
continue
else:
raise
except:
raise
# get mime type
try:
mimetype = mime.from_file(file_path)
except IOError as e:
if 'File does not exist' in str(e):
self.out('file %s does not exist' % (file_path,))
continue
else:
raise
except UnicodeDecodeError as e:
self.out("File: ", file_path)
raise
except:
raise
if not is_image(mimetype):
self.out('%s is not a valid image type... (it might be a symlink?)' % (file_path,))
continue
# stat file
sfile = os.stat(file_path)
# open image
truncated = False
try:
im = Image.open(file_path)
if MAIO_SETTINGS.get('images_min_inclusive', '').lower() == 'and':
if im.size[0] < MAIO_SETTINGS.get('images_min_width', 0) or \
im.size[1] < MAIO_SETTINGS.get('images_min_height', 0):
continue
elif MAIO_SETTINGS.get('images_min_inclusive', '').lower() == 'or':
if im.size[0] < MAIO_SETTINGS.get('images_min_width', 0) and \
im.size[1] < MAIO_SETTINGS.get('images_min_height', 0):
continue
else:
pass
im.load()
if im.mode != "RGB":
im = im.convert("RGB")
except IOError as e:
self.out('Error in processing %s ...' % (file_path,),)
if 'truncated' in str(e):
self.out('truncated')
truncated = True
pass
elif 'cannot identify image file' in str(e):
self.out('invalid image file')
continue
elif 'No such file or directory' in str(e):
self.out('no such file or directory')
continue
else:
raise
# get md5sum
md5sum = hashlib.md5()
with open(file_path, 'rb') as fh:
md5sum.update(fh.read())
md5 = md5sum.hexdigest()
# process thumbnail
thumb_dir = mk_md5_dir(md5, settings.MAIO_SETTINGS['thumbnail_directory'])
thumb = os.path.join(thumb_dir,
md5 + '.jpg')
if not os.path.isfile(thumb):
im.thumbnail((128, 128), Image.ANTIALIAS)
im.save(thumb)
self.out(md5sum.hexdigest(), mimetype, file_path)
# save file information to the database
try:
file_path_hash = hashlib.md5()
file_path_hash.update(file_path.encode('utf-8'))
fph = file_path_hash.hexdigest()
f = File(mime_type=mimetype, size=sfile.st_size, mtime=sfile.st_mtime,
md5sum=md5, tn_path=thumb, file_path=file_path, file_path_hash=fph)
f.save()
except django.db.utils.IntegrityError:
f = File.objects.get(file_path_hash=fph)
if sfile.st_mtime == f.mtime:
self.out("Already in database and up-to-date, skipping %s ..." % (file_path,))
continue
f.mime_type = mimetype
f.size = sfile.st_size
f.mtime = sfile.st_mtime
f.md5sum = md5
f.tn_path = thumb
f.save()
except:
raise
|
[
"jon@jonmsawyer.com"
] |
jon@jonmsawyer.com
|
226f2dde2e7f83c6d967dd639b88280ff249582c
|
661c0a56baddaa344c9c06cfd70ad60e776efe41
|
/textminer/extractor.py
|
179f5e65114a098feb217ba40014a2dd301cf8de
|
[] |
no_license
|
lancekrogers/textminer
|
c6426bb519ac3d46c3fd6ec2dc2ac9ffa30562f9
|
21a24dc17e06d90d8df4ae785c72cbdc2a768dd3
|
refs/heads/master
| 2021-01-14T08:51:09.045553
| 2015-07-22T20:09:36
| 2015-07-22T20:09:36
| 37,545,282
| 0
| 0
| null | 2015-06-16T17:29:19
| 2015-06-16T17:29:19
| null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
import re
def phone_numbers(text):
return re.findall(r"\(\d{3}\).\d{3}.\d{4}", text)
# hard mode
def emails(text):
pass
|
[
"lancekincaid1994@gmail.com"
] |
lancekincaid1994@gmail.com
|
58ecd20ff1982f1c16f54d7e4aab822e9e33c351
|
b7a8cedcd7df9a70eda8432866bdbe8a33833dab
|
/Day 54 Web Dev/decorator.py
|
ca01adc8d724b84b07f1ce8739267057526e763c
|
[] |
no_license
|
shidoken/100DaysofPython
|
55bcd703382fa2ca875e428aee902652efaf4d27
|
3808560893dc7097d82fd59c545d647278415c8f
|
refs/heads/main
| 2023-04-03T18:10:08.202986
| 2021-04-16T07:12:16
| 2021-04-16T07:12:16
| 329,128,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 910
|
py
|
import time
def c_time():
current_time = time.time()
return current_time
def speed_calc_decorator(function):
def wrapper_function():
# Do something before
function()
function_name = function.__name__
time_1 = c_time()
function()
time_2 = c_time()
difference_time = float(time_2) - float(time_1)
print(f"{function_name} run speed: {difference_time}s")
# Do something after
return wrapper_function
def delay_decorator(function):
def wrapper_function():
time.sleep(2)
# Do something before
function()
function()
# Do something after
return wrapper_function
@speed_calc_decorator
def fast_function():
for i in range(10000000):
i * i
@speed_calc_decorator
def slow_function():
for i in range(100000000):
i * i
fast_function()
slow_function()
|
[
"noreply@github.com"
] |
noreply@github.com
|
9daa5c6e9687c22774d2f1effd501abdaf003b84
|
2412ab2a7fdefe1d165a6ec63292fdbe965c5131
|
/run_GOseq.py
|
0cd6278fe2ee1279bbe8a15bc3b07ebb23156e13
|
[] |
no_license
|
cfc424/NGS
|
e7651d9cbd1309821f5251c3ab3c9c7ba9e19352
|
a7fd0631adc7365d358e49f407e2ebe796163443
|
refs/heads/master
| 2023-03-21T14:36:05.877307
| 2019-11-13T01:09:13
| 2019-11-13T01:09:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,447
|
py
|
#!/usr/bin/env perl
use strict;
use warnings;
use Getopt::Long qw(:config no_ignore_case bundling pass_through);
my $usage = <<__EOUSAGE__;
###############################################################################################
#
# --factor_labeling <string> tab delimited file with format: factor<tab>feature_id
# or
# --genes_single_factor <string> list of genes to test (can be a matrix, only the first column is used for gene IDs)
#
# --GO_assignments <string> extracted GO assignments with format: feature_id <tab> GO:000001,GO:00002,...
#
# --lengths <string> feature lengths with format: feature_id <tab> length
#
###############################################################################################
__EOUSAGE__
;
my ($factor_labeling, $GO_file, $help_flag, $lengths_file, $genes_single_factor_file);
&GetOptions("factor_labeling=s" => \$factor_labeling,
"GO_assignments=s" => \$GO_file,
"lengths=s" => \$lengths_file,
"genes_single_factor=s" => \$genes_single_factor_file,
"help|h" => \$help_flag,
);
if ($help_flag) {
die $usage;
}
unless (($factor_labeling || $genes_single_factor_file) && $GO_file && $lengths_file) {
die $usage;
}
main: {
my $Rscript = "__runGOseq.R";
open (my $ofh, ">$Rscript") or die $!;
if ($genes_single_factor_file) {
print $ofh "factor_labeling = read.table(\"$genes_single_factor_file\", row.names=1)\n";
print $ofh "factor_labeling[,1] = rep('custom_list', dim(factor_labeling)[1])\n";
print $ofh "factor_labeling = factor_labeling[,1,drop=F]\n";
}
else {
print $ofh "factor_labeling = read.table(\"$factor_labeling\", row.names=1, header=F)\n";
}
print $ofh "colnames(factor_labeling) = c('type')\n";
print $ofh "factor_list = unique(factor_labeling[,1])\n";
print $ofh "gene_lengths = read.table(\"$lengths_file\", header=T, row.names=1)\n";
print $ofh "gene_lengths = as.matrix(gene_lengths[,1,drop=F])\n";
print $ofh "GO_info = read.table(\"$GO_file\", header=F, row.names=1,stringsAsFactors=F)\n";
print $ofh "GO_info_listed = apply(GO_info, 1, function(x) unlist(strsplit(x,',')))\n";
print $ofh "names(GO_info_listed) = rownames(GO_info)\n";
print $ofh "features_with_GO = rownames(GO_info)\n";
print $ofh "lengths_features_with_GO = gene_lengths[features_with_GO,]\n";
print $ofh "get_GO_term_descr = function(x) {\n";
print $ofh " d = 'none';\n"
. " go_info = GOTERM[[x]];\n"
. " if (length(go_info) >0) { d = paste(Ontology(go_info), Term(go_info), sep=' ');}\n"
. " return(d);\n"
. "}\n";
print $ofh "# build pwf based on ALL DE features\n";
print $ofh "cat_genes_vec = as.integer(features_with_GO %in% rownames(factor_labeling))\n";
print $ofh "library(goseq)\n";
print $ofh "library(GO.db)\n";
print $ofh "library(qvalue)\n";
print $ofh "pwf=nullp(cat_genes_vec,bias.data=lengths_features_with_GO)\n";
print $ofh "rownames(pwf) = names(GO_info_listed)\n";
print $ofh "for (feature_cat in factor_list) {\n";
print $ofh " message('Processing category: ', feature_cat)\n";
print $ofh " cat_genes_vec = as.integer(features_with_GO %in% rownames(factor_labeling)[factor_labeling\$type == feature_cat])\n";
#print $ofh " names(cat_genes_vec) = features_with_GO\n";
print $ofh " pwf\$DEgenes = cat_genes_vec\n";
print $ofh " res = goseq(pwf,gene2cat=GO_info_listed)\n";
## Process the over-represented
print $ofh " ## over-represented categories:\n";
#print $ofh " res\$over_represented_FDR = p.adjust(res\$over_represented_pvalue, method='BH')\n";
print $ofh " pvals = res\$over_represented_pvalue\n";
print $ofh " pvals[pvals > 1 -1e-10] = 1-1e-10\n";
print $ofh " q = qvalue(pvals)\n";
print $ofh " res\$over_represented_FDR = q\$qvalues\n";
if ($genes_single_factor_file) {
print $ofh "go_enrich_filename = paste(\"$genes_single_factor_file\", '.GOseq.enriched', sep='')\n";
}
else {
print $ofh " go_enrich_filename = paste(feature_cat,'.GOseq.enriched', sep='')\n";
}
print $ofh " result_table = res[res\$over_represented_pvalue<=0.05,]\n";
print $ofh " descr = unlist(lapply(result_table\$category, get_GO_term_descr))\n";
print $ofh " result_table\$go_term = descr;\n";
print $ofh " write.table(result_table[order(result_table\$over_represented_pvalue),], file=go_enrich_filename, sep='\t', quote=F, row.names=F)\n";
## Process the under-represented
print $ofh " ## under-represented categories:\n";
print $ofh " pvals = res\$under_represented_pvalue\n";
print $ofh " pvals[pvals>1-1e-10] = 1 - 1e-10\n";
print $ofh " q = qvalue(pvals)\n";
print $ofh " res\$under_represented_FDR = q\$qvalues\n";
if ($genes_single_factor_file) {
print $ofh " go_depleted_filename = paste(\"$genes_single_factor_file\", '.GOseq.depleted', sep='')\n";
}
else {
print $ofh " go_depleted_filename = paste(feature_cat,'.GOseq.depleted', sep='')\n";
}
print $ofh " result_table = res[res\$under_represented_pvalue<=0.05,]\n";
print $ofh " descr = unlist(lapply(result_table\$category, get_GO_term_descr))\n";
print $ofh " result_table\$go_term = descr;\n";
print $ofh " write.table(result_table[order(result_table\$under_represented_pvalue),], file=go_depleted_filename, sep='\t', quote=F, row.names=F)\n";
print $ofh "}\n";
close $ofh;
my $cmd = "R --vanilla -q < $Rscript";
my $ret = system($cmd);
if ($ret) {
die "Error, cmd: $cmd died with ret $ret";
}
else {
print STDERR "\n\nDone.\n\n";
}
exit(0);
}
__END__
Notes:
1. Get the transcript GO annotation by running Trinotate, getting a trinotate.xls report file, and then running:
trinotate-code/util/extract_GO_assignments_from_Trinotate_xls.pl --Trinotate_xls trinotate.xls -G --include_ancestral_terms > go_annotations.txt
# use -T instead of -G in above to get transcript instead of gene-level annotations.
2. Run GO-Seq like so, using this script 'run_GOseq.pl' included in Trinity:
TRINITY_HOME/Analysis/DifferentialExpression/run_GOseq.pl --factor_labeling factor_labeling.txt --GO_assignments go_annotations.txt --lengths gene.lengths.txt
The 'factor_labeling.txt' file should be of format:
gene_id (tab) factor
where factor is a string describing that subset of genes.
For example:
my_gene_A (tab) diff_expressed_cond_X_Y
my_gene_B (tab) diff_expressed_cond_X_Y
...
my_gene_M (tab) diff_cond_W_Z
my_gene_N (tab) diff_cond_W_Z
...
You can come up with whatever gene subset you want and call it whatever you want. The enrichment tests will be performed separately for
each factor defined.
The gene.lengths.txt file has the format
gene (tab) length
and you can use the same file you used earlier as part of doing the TMM normalization step and generating your FPKM matrix.
|
[
"root@zinc.(none)"
] |
root@zinc.(none)
|
20b8d239d8fca159e12fc334016dc66ab721cc89
|
d3092656c078cc461f7d80f74bfc51340be98bed
|
/KerasFuzzy/experiments/digit-recognizer-2.py
|
b2261336559d2aea92172341a6913acfdb491f0a
|
[] |
no_license
|
kenoma/KerasFuzzy
|
d7beadfe770bf93c20a9b8f6f561b31b4e54fe94
|
679db95dc74af91175f811c0bf21af880213e2a4
|
refs/heads/master
| 2023-01-11T06:02:19.772627
| 2023-01-07T08:14:23
| 2023-01-07T08:14:23
| 126,618,152
| 21
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,144
|
py
|
#%%
import sys
sys.path.insert(0, 'D:/projects/KerasFuzzy/KerasFuzzy/layers')
from fuzzy_layer_2 import FuzzyLayer2
from defuzzy_layer_2 import DefuzzyLayer2
from defuzzy_layer import DefuzzyLayer
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
np.random.seed(2)
random_seed = 2
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from keras.utils.np_utils import to_categorical
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras import backend as K
import random
import itertools
#%%
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
#%%
train = pd.read_csv("./digit-recognizer/train.csv")
test = pd.read_csv("./digit-recognizer/test.csv")
Y_train = train["label"]
X_train = train.drop(labels = ["label"], axis = 1)
del train
#%%
X_train = X_train / 255.0
test = test / 255.0
X_train = X_train.values.reshape(-1,28,28,1)
test = test.values.reshape(-1,28,28,1)
#%%
Y_train = to_categorical(Y_train, num_classes = 10)
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed)
#%%
g = plt.imshow(X_train[0][:,:,0])
#%%
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
#%%
latent_dim = 3
encoder_inputs = keras.Input(shape=(28, 28, 1))
x = layers.Conv2D(64, 3, activation="relu", padding="same")(encoder_inputs)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
x = layers.Conv2D(32, 5, activation="relu", strides=2, padding="same")(x)
shape_before_flattening = K.int_shape(x)
x = layers.Flatten()(x)
z_mean = layers.Dense(latent_dim)(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
z = Sampling()([z_mean, z_log_var])
encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name="encoder")
encoder.summary()
#%%
latent_inputs = keras.Input(shape=(latent_dim,))
x = layers.Dense(np.prod(shape_before_flattening[1:]), activation="relu")(latent_inputs)
x = layers.Reshape(shape_before_flattening[1:])(x)
x = layers.Conv2DTranspose(32, 5, activation="relu", strides=2, padding="same")(x)
x = layers.Conv2DTranspose(64, 3, activation="relu", padding="same")(x)
x = layers.Conv2DTranspose(64, 3, activation="relu", padding="same")(x)
decoder_outputs = layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same")(x)
decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder")
decoder.summary()
#%%
class VAE(keras.Model):
def __init__(self, encoder, decoder, **kwargs):
super(VAE, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
self.reconstruction_loss_tracker = keras.metrics.Mean(
name="reconstruction_loss"
)
self.kl_loss_tracker = keras.metrics.Mean(name="kl_loss")
@property
def metrics(self):
return [
self.total_loss_tracker,
self.reconstruction_loss_tracker,
self.kl_loss_tracker,
]
def train_step(self, data):
with tf.GradientTape() as tape:
z_mean, z_log_var, z = self.encoder(data)
reconstruction = self.decoder(z)
reconstruction_loss = tf.reduce_mean(
tf.reduce_sum(
keras.losses.binary_crossentropy(data, reconstruction), axis=(1, 2)
)
)
kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))
kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))
total_loss = reconstruction_loss + kl_loss
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
self.total_loss_tracker.update_state(total_loss)
self.reconstruction_loss_tracker.update_state(reconstruction_loss)
self.kl_loss_tracker.update_state(kl_loss)
return {
"loss": self.total_loss_tracker.result(),
"reconstruction_loss": self.reconstruction_loss_tracker.result(),
"kl_loss": self.kl_loss_tracker.result(),
}
#%%
vae = VAE(encoder, decoder, name="vae")
vae.compile(optimizer=keras.optimizers.Adam())
log_dir = "d:/projects/KerasFuzzy/logs/vae_" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
mnist_digits = np.concatenate([X_train, X_val], axis=0)
vae.fit(mnist_digits, epochs=40, batch_size=140, callbacks=[tensorboard_callback])
#%%
def plot_label_clusters(vae, data, labels):
z_means, _, _ = vae.encoder.predict(data)
fig, ax = plt.subplots(ncols=2, figsize=(12, 6))
ax[0].scatter(z_means[:, 0], z_means[:, 1], c=labels)
ax[0].set_xlabel("z[0]")
ax[0].set_xlabel("z[1]")
ax[1].scatter(z_means[:, 2], z_means[:, 1], c=labels)
ax[1].set_xlabel("z[2]")
ax[1].set_xlabel("z[1]")
plt.show()
plot_label_clusters(vae, X_train, [np.argmax(a) for a in Y_train])
plot_label_clusters(vae, X_val, [np.argmax(a) for a in Y_val])
# %%
base_model = keras.Model(encoder_inputs, z_mean)
base_model.trainable = False
fuzzy_centroids = 81
z_means, _, _ = vae.encoder.predict(X_train)
init_c = random.sample(list(z_means), fuzzy_centroids)
init_s = np.empty((fuzzy_centroids, latent_dim))
init_s.fill(0.1)
x = base_model(encoder_inputs, training = False)
x = FuzzyLayer2(fuzzy_centroids, initial_centers=init_c, initial_scales = init_s, name="fuzzy")(x)
x = DefuzzyLayer(fuzzy_centroids, name="defuzzy")(x)
x = layers.Dense(10, activation="softmax")(x)
model = keras.Model(encoder_inputs, x)
optimizer = keras.optimizers.RMSprop(learning_rate=0.003, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
model.summary()
learning_rate_reduction = keras.callbacks.ReduceLROnPlateau(monitor="val_loss",
patience=3,
verbose=1,
factor=0.8,
min_lr=0.000001)
epochs = 2
batch_size = 86
datagen = keras.preprocessing.image.ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=13,
zoom_range = 0.05,
width_shift_range=0.05,
height_shift_range=0.05,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train)
log_dir = "d:/projects/KerasFuzzy/logs/main_phase_1_" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
history = model.fit(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs,
validation_data = (X_val,Y_val),
verbose = 1,
steps_per_epoch=X_train.shape[0] // batch_size,
callbacks=[learning_rate_reduction, tensorboard_callback])
#%%
base_model.trainable = True
optimizer = keras.optimizers.RMSprop(learning_rate=0.003, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
model.summary()
epochs = 200
batch_size = 86
checkpoint_filepath = 'weights.{epoch:02d}-{val_loss:.2f}.h5'
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
monitor='val_accuracy',
mode='max',
save_best_only=True)
callback=keras.callbacks.EarlyStopping(
monitor='val_accuracy', min_delta=0, patience=40, verbose=2, mode='auto',
baseline=None, restore_best_weights=True)
log_dir = "d:/projects/KerasFuzzy/logs/main_phase_2_" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
history = model.fit(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs,
validation_data = (X_val,Y_val),
verbose = 1,
steps_per_epoch=X_train.shape[0] // batch_size,
callbacks=[
learning_rate_reduction,
tensorboard_callback,
#model_checkpoint_callback,
callback])
#%%
plot_label_clusters(vae, X_train, [np.argmax(a) for a in Y_train])
plot_label_clusters(vae, X_val, [np.argmax(a) for a in Y_val])
#%%
learned_centroids = []
weights = model.get_layer('fuzzy').get_weights()
for odim in range(fuzzy_centroids):
origin = np.dot(np.vstack([weights[0][odim], np.array([0,0,0, 1])]), np.array([0,0,0, 1]))
e1 = np.dot(np.vstack([weights[0][odim], np.array([0,0,0, 1])]), np.array([1,0,0, 1]))
e2 = np.dot(np.vstack([weights[0][odim], np.array([0,0,0, 1])]), np.array([0,1,0, 1]))
e3 = np.dot(np.vstack([weights[0][odim], np.array([0,0,0, 1])]), np.array([0,0,1, 1]))
me1 = np.dot(np.vstack([weights[0][odim], np.array([0,0,0, 1])]), np.array([-1,0,0, 1]))
me2 = np.dot(np.vstack([weights[0][odim], np.array([0,0,0, 1])]), np.array([0,-1,0, 1]))
me3 = np.dot(np.vstack([weights[0][odim], np.array([0,0,0, 1])]), np.array([0,0,-1, 1]))
plt.plot([-origin[0], -e1[0]], [-origin[1], -e1[1]], c = 'b', linewidth=2)
plt.plot([-origin[0], -e2[0]], [-origin[1], -e2[1]], c = 'b',linewidth=2)
plt.plot([-origin[0], -e3[0]], [-origin[1], -e3[1]], c = 'b',linewidth=2)
plt.plot([-origin[0], -me1[0]], [-origin[1], -me1[1]], c = 'b',linewidth=2)
plt.plot([-origin[0], -me2[0]], [-origin[1], -me2[1]], c = 'b',linewidth=2)
plt.plot([-origin[0], -me3[0]], [-origin[1], -me3[1]], c = 'b',linewidth=2)
learned_centroids.append(origin)
plt.scatter([a[0] for a in learned_centroids], [a[1] for a in learned_centroids], alpha=0.9, s=2)
#%%
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
Y_pred = model.predict(X_val)
Y_pred_classes = np.argmax(Y_pred,axis = 1)
Y_true = np.argmax(Y_val,axis = 1)
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
plot_confusion_matrix(confusion_mtx, classes = range(10))
errors = (Y_pred_classes - Y_true != 0)
Y_pred_classes_errors = Y_pred_classes[errors]
Y_pred_errors = Y_pred[errors]
Y_true_errors = Y_true[errors]
X_val_errors = X_val[errors]
def display_errors(errors_index,img_errors,pred_errors, obs_errors):
""" This function shows 6 images with their predicted and real labels"""
n = 0
nrows = 2
ncols = 3
fig, ax = plt.subplots(nrows,ncols,sharex=True,sharey=True)
for row in range(nrows):
for col in range(ncols):
error = errors_index[n]
ax[row,col].imshow((img_errors[error]).reshape((28,28)))
ax[row,col].set_title("Predicted label :{}\nTrue label :{}".format(pred_errors[error],obs_errors[error]))
n += 1
Y_pred_errors_prob = np.max(Y_pred_errors,axis = 1)
true_prob_errors = np.diagonal(np.take(Y_pred_errors, Y_true_errors, axis=1))
delta_pred_true_errors = Y_pred_errors_prob - true_prob_errors
sorted_dela_errors = np.argsort(delta_pred_true_errors)
most_important_errors = sorted_dela_errors[-6:]
display_errors(most_important_errors, X_val_errors, Y_pred_classes_errors, Y_true_errors)
results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001), name = "ImageId"), results],axis = 1)
submission.to_csv("cnn_mnist_fuzzy_b.csv", index=False)
# %%
|
[
"bghati@yandex.ru"
] |
bghati@yandex.ru
|
3b3b4383ef951f3c0055d62bc0357e73dea7e020
|
c85a766dca16877993cc4477e5da3c117851aa1d
|
/src/recaptcha/client/captcha.py
|
7a54cbad28f130fe454ad497335e9b257cdf4ae8
|
[] |
no_license
|
iguzu/banian
|
467ddebbb65126b3611dc9a7d4acf6a03c1d0287
|
9158d7036061400f262ddfa79210fe9f5710097a
|
refs/heads/master
| 2021-05-28T10:53:07.819656
| 2010-03-09T03:55:24
| 2010-03-09T03:55:24
| 532,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,653
|
py
|
import urllib
from google.appengine.api import urlfetch
"""
Adapted from http://pypi.python.org/pypi/recaptcha-client
to use with Google App Engine
by Joscha Feth <joscha@feth.com>
Version 0.1
"""
API_SSL_SERVER ="https://api-secure.recaptcha.net"
API_SERVER ="http://api.recaptcha.net"
VERIFY_SERVER ="api-verify.recaptcha.net"
class RecaptchaResponse(object):
def __init__(self, is_valid, error_code=None):
self.is_valid = is_valid
self.error_code = error_code
def displayhtml (public_key,
use_ssl = False,
error = None):
"""Gets the HTML to display for reCAPTCHA
public_key -- The public api key
use_ssl -- Should the request be sent over ssl?
error -- An error message to display (from RecaptchaResponse.error_code)"""
error_param = ''
if error:
error_param = '&error=%s' % error
if use_ssl:
server = API_SSL_SERVER
else:
server = API_SERVER
return """<script type= "text/javascript">var RecaptchaOptions = {theme: 'white'};</script>
<script type="text/javascript" src="%(ApiServer)s/challenge?k=%(PublicKey)s%(ErrorParam)s"></script>
<noscript>
<iframe src="%(ApiServer)s/noscript?k=%(PublicKey)s%(ErrorParam)s" height="300" width="500" frameborder="0"></iframe><br />
<textarea name="recaptcha_challenge_field" rows="3" cols="40"></textarea>
<input type='hidden' name='recaptcha_response_field' value='manual_challenge' />
</noscript>
""" % {
'ApiServer' : server,
'PublicKey' : public_key,
'ErrorParam' : error_param,
}
def submit (recaptcha_challenge_field,
recaptcha_response_field,
private_key,
remoteip):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_challenge_field -- The value of recaptcha_challenge_field from the form
recaptcha_response_field -- The value of recaptcha_response_field from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address
"""
if not (recaptcha_response_field and recaptcha_challenge_field and
len (recaptcha_response_field) and len (recaptcha_challenge_field)):
return RecaptchaResponse (is_valid = False, error_code = 'incorrect-captcha-sol')
headers = {
'Content-type': 'application/x-www-form-urlencoded',
"User-agent" : "reCAPTCHA GAE Python"
}
params = urllib.urlencode ({
'privatekey': private_key,
'remoteip' : remoteip,
'challenge': recaptcha_challenge_field,
'response' : recaptcha_response_field,
})
httpresp = urlfetch.fetch(
url = "http://%s/verify" % VERIFY_SERVER,
payload = params,
method = urlfetch.POST,
headers = headers
)
if httpresp.status_code == 200:
# response was fine
# get the return values
return_values = httpresp.content.splitlines();
# get the return code (true/false)
return_code = return_values[0]
if return_code == "true":
# yep, filled perfectly
return RecaptchaResponse (is_valid=True)
else:
# nope, something went wrong
return RecaptchaResponse (is_valid=False, error_code = return_values [1])
else:
# recaptcha server was not reachable
return RecaptchaResponse (is_valid=False, error_code = "recaptcha-not-reachable")
|
[
"sbl@iguzu.com"
] |
sbl@iguzu.com
|
93111d9e9273607e9c92ad7799985e969cb48408
|
e97434a363fc559d070ba7439abf07ee975e3415
|
/IPL/pointstable/serializers.py
|
e86338f7da56852b5574166e3f0737de70b88165
|
[] |
no_license
|
vipulpathak113/iplbackend
|
2c9311b5869018be9b4fc8e1c50f66d8d444e0f6
|
7be4c3cfe2c21ed4aef2634de0fbeb4a2e85142c
|
refs/heads/master
| 2020-07-30T21:30:23.578815
| 2019-09-23T13:38:22
| 2019-09-23T13:38:22
| 210,364,946
| 0
| 1
| null | 2020-07-21T22:00:00
| 2019-09-23T13:40:32
|
Python
|
UTF-8
|
Python
| false
| false
| 265
|
py
|
from rest_framework import serializers
from .models import PointsTable
class TeamlistSerializer(serializers.ModelSerializer):
class Meta:
model = PointsTable
fields = ('id', 'team_name', 'played', 'won', 'lost', 'no_result', 'points', 'nrr')
|
[
"vipulkumarpathak@ASDP1740PO02529.qait.com"
] |
vipulkumarpathak@ASDP1740PO02529.qait.com
|
b9c66f7ddec07dd1b3c1c83bccd25902d510d6a9
|
d149fc886482603e0cb3c7788284e9de16cc6edd
|
/DecisionTree/studentMain.py
|
e478aded910070a35d93dd1d66ab7e69c31be287
|
[] |
no_license
|
lishengxu/ud120-sklearn
|
12451eac1ad06ef0999980182ed7577db1c2e5bf
|
b831a944410a5e503255cba524499d87b1f9e820
|
refs/heads/master
| 2020-03-28T13:38:15.602433
| 2018-10-15T12:12:35
| 2018-10-15T12:12:35
| 148,413,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
#!/usr/bin/python
""" lecture and example code for decision tree unit """
import sys
from class_vis import prettyPicture, output_image
from prep_terrain_data import makeTerrainData
import matplotlib.pyplot as plt
import numpy as np
import pylab as pl
from classifyDT import classify
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the classify() function in classifyDT is where the magic
### happens--fill in this function in the file 'classifyDT.py'!
clf = classify(features_train, labels_train)
#### grader code, do not modify below this line
prettyPicture(clf, features_test, labels_test)
output_image("test.png", "png", open("test.png", "rb").read())
#1
print clf.score(features_test, labels_test)
#2
pred = clf.predict(features_test)
from sklearn.metrics import accuracy_score
print accuracy_score(pred, labels_test)
#3
from sklearn import tree
clf2 = tree.DecisionTreeClassifier(min_samples_split = 2)
clf2.fit(features_train, labels_train)
score2 = clf2.score(features_test, labels_test)
print "score2:", score2
clf50 = tree.DecisionTreeClassifier(min_samples_split = 50)
clf50.fit(features_train, labels_train)
score50 = clf50.score(features_test, labels_test)
print "score50:", score50
|
[
"lsx1@meitu.com"
] |
lsx1@meitu.com
|
afe7b68eebc859166be1c5e13503095b75df042c
|
3527ff6346f98a5b7c51ce3c58428227f4bc8617
|
/acwing/800.py
|
3e10fbf147a1197999a55e116c697baa1c94510e
|
[] |
no_license
|
ShawnDong98/Algorithm-Book
|
48e2c1158d6e54d4652b0791749ba05a4b85f96d
|
f350b3d6e59fd5771e11ec0b466f9ba5eeb8e927
|
refs/heads/master
| 2022-07-17T04:09:39.559310
| 2022-07-13T15:46:37
| 2022-07-13T15:46:37
| 242,317,482
| 0
| 0
| null | 2020-10-11T14:50:48
| 2020-02-22T09:53:41
|
C++
|
UTF-8
|
Python
| false
| false
| 277
|
py
|
n, m, x = map(int, input().split())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
i = 0
j = m -1
while i< n:
while j >= 0 and A[i] + B[j] > x:
j -= 1
if j >= 0 and A[i] + B[j] == x:
print(f'{i} {j}')
break
i += 1
|
[
"ShawnDong98@gmail.com"
] |
ShawnDong98@gmail.com
|
64a3215ec3906affa3702053fa372ce9684ba680
|
60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24
|
/IronPythonStubs/release/stubs.min/System/Windows/Media/Animation_parts/Timeline.py
|
fc7d6ac0df9229690da3777f143d98381abde625
|
[
"MIT"
] |
permissive
|
shnlmn/Rhino-Grasshopper-Scripts
|
a9411098c5d1bbc55feb782def565d535b27b709
|
0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823
|
refs/heads/master
| 2020-04-10T18:59:43.518140
| 2020-04-08T02:49:07
| 2020-04-08T02:49:07
| 161,219,695
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,492
|
py
|
class Timeline(Animatable,ISealable,IAnimatable,IResource):
""" Defines a segment of time. """
def AllocateClock(self,*args):
"""
AllocateClock(self: Timeline) -> Clock
Creates a System.Windows.Media.Animation.Clock for this System.Windows.Media.Animation.Timeline.
Returns: A clock for this System.Windows.Media.Animation.Timeline.
"""
pass
def Clone(self):
"""
Clone(self: Timeline) -> Timeline
Creates a modifiable clone of this System.Windows.Media.Animation.Timeline,making deep copies
of this object's values.
Returns: A modifiable clone of the current object. The cloned object's System.Windows.Freezable.IsFrozen
property is false even if the source's System.Windows.Freezable.IsFrozen property is true.
"""
pass
def CloneCore(self,*args):
"""
CloneCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a clone (deep copy) of the specified System.Windows.Freezable using base
(non-animated) property values.
sourceFreezable: The object to clone.
"""
pass
def CloneCurrentValue(self):
"""
CloneCurrentValue(self: Timeline) -> Timeline
Creates a modifiable clone of this System.Windows.Media.Animation.Timeline object,making deep
copies of this object's current values.
Returns: A modifiable clone of the current object. The cloned object's System.Windows.Freezable.IsFrozen
property is false even if the source's System.Windows.Freezable.IsFrozen property is true.
"""
pass
def CloneCurrentValueCore(self,*args):
"""
CloneCurrentValueCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a modifiable clone (deep copy) of the specified System.Windows.Freezable
using current property values.
sourceFreezable: The System.Windows.Freezable to be cloned.
"""
pass
def CreateClock(self,hasControllableRoot=None):
"""
CreateClock(self: Timeline,hasControllableRoot: bool) -> Clock
Creates a new System.Windows.Media.Animation.Clock from this
System.Windows.Media.Animation.Timeline and specifies whether the new
System.Windows.Media.Animation.Clock is controllable. If this
System.Windows.Media.Animation.Timeline has children,a tree of clocks is created with this
System.Windows.Media.Animation.Timeline as the root.
hasControllableRoot: true if the root System.Windows.Media.Animation.Clock returned should return a
System.Windows.Media.Animation.ClockController from its
System.Windows.Media.Animation.Clock.Controller property so that the
System.Windows.Media.Animation.Clock tree can be interactively controlled; otherwise,false.
Returns: A new System.Windows.Media.Animation.Clock constructed from this
System.Windows.Media.Animation.Timeline. If this System.Windows.Media.Animation.Timeline is a
System.Windows.Media.Animation.TimelineGroup that contains child timelines,a tree of
System.Windows.Media.Animation.Clock objects is created with a controllable
System.Windows.Media.Animation.Clock created from this System.Windows.Media.Animation.Timeline
as the root.
CreateClock(self: Timeline) -> Clock
Creates a new,controllable System.Windows.Media.Animation.Clock from this
System.Windows.Media.Animation.Timeline. If this System.Windows.Media.Animation.Timeline has
children,a tree of clocks is created with this System.Windows.Media.Animation.Timeline as the
root.
Returns: A new,controllable System.Windows.Media.Animation.Clock constructed from this
System.Windows.Media.Animation.Timeline. If this System.Windows.Media.Animation.Timeline is a
System.Windows.Media.Animation.TimelineGroup that contains child timelines,a tree of
System.Windows.Media.Animation.Clock objects is created with a controllable
System.Windows.Media.Animation.Clock created from this System.Windows.Media.Animation.Timeline
as the root.
"""
pass
def CreateInstance(self,*args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def CreateInstanceCore(self,*args):
"""
CreateInstanceCore(self: Freezable) -> Freezable
When implemented in a derived class,creates a new instance of the System.Windows.Freezable
derived class.
Returns: The new instance.
"""
pass
def FreezeCore(self,*args):
"""
FreezeCore(self: Timeline,isChecking: bool) -> bool
Makes this System.Windows.Media.Animation.Timeline unmodifiable or determines whether it can be
made unmodifiable.
isChecking: true to check if this instance can be frozen; false to freeze this instance.
Returns: If isChecking is true,this method returns true if this instance can be made read-only,or false
if it cannot be made read-only. If isChecking is false,this method returns true if this
instance is now read-only,or false if it cannot be made read-only,with the side effect of
having begun to change the frozen status of this object.
"""
pass
def GetAsFrozenCore(self,*args):
"""
GetAsFrozenCore(self: Timeline,sourceFreezable: Freezable)
Makes this instance a clone of the specified System.Windows.Media.Animation.Timeline object.
sourceFreezable: The System.Windows.Media.Animation.Timeline instance to clone.
"""
pass
def GetCurrentValueAsFrozenCore(self,*args):
"""
GetCurrentValueAsFrozenCore(self: Timeline,sourceFreezable: Freezable)
Makes this instance a frozen clone of the specified System.Windows.Media.Animation.Timeline.
Resource references,data bindings,and animations are not copied,but their current values are.
sourceFreezable: The System.Windows.Media.Animation.Timeline to copy and freeze.
"""
pass
@staticmethod
def GetDesiredFrameRate(timeline):
"""
GetDesiredFrameRate(timeline: Timeline) -> Nullable[int]
Gets the desired frame rate of the specified System.Windows.Media.Animation.Timeline.
timeline: The timeline from which to retrieve the desired frame rate.
Returns: The desired frame rate of this timeline. The default value is null.
"""
pass
def GetNaturalDuration(self,*args):
"""
GetNaturalDuration(self: Timeline,clock: Clock) -> Duration
Returns the length of a single iteration of this System.Windows.Media.Animation.Timeline.
clock: The System.Windows.Media.Animation.Clock that was created for this
System.Windows.Media.Animation.Timeline.
Returns: The length of a single iteration of this System.Windows.Media.Animation.Timeline,or
System.Windows.Duration.Automatic if the natural duration is unknown.
"""
pass
def GetNaturalDurationCore(self,*args):
"""
GetNaturalDurationCore(self: Timeline,clock: Clock) -> Duration
Returns the length of a single iteration of this System.Windows.Media.Animation.Timeline. This
method provides the implementation for
System.Windows.Media.Animation.Timeline.GetNaturalDuration(System.Windows.Media.Animation.Clock).
clock: The System.Windows.Media.Animation.Clock that was created for this
System.Windows.Media.Animation.Timeline.
Returns: The length of a single iteration of this System.Windows.Media.Animation.Timeline,or
System.Windows.Duration.Automatic if the natural duration is unknown.
"""
pass
def OnChanged(self,*args):
"""
OnChanged(self: Freezable)
Called when the current System.Windows.Freezable object is modified.
"""
pass
def OnFreezablePropertyChanged(self,*args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure and is not
intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def OnPropertyChanged(self,*args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPropertyChangedEventAr
gs) to also invoke any System.Windows.Freezable.Changed handlers in response to a changing
dependency property of type System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old and new values.
"""
pass
def ReadPreamble(self,*args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid thread. Inheritors of
System.Windows.Freezable must call this method at the beginning of any API that reads data
members that are not dependency properties.
"""
pass
@staticmethod
def SetDesiredFrameRate(timeline,desiredFrameRate):
""" SetDesiredFrameRate(timeline: Timeline,desiredFrameRate: Nullable[int]) """
pass
def ShouldSerializeProperty(self,*args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize the value for
the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false.
"""
pass
def WritePostscript(self,*args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the System.Windows.Freezable and invokes
its System.Windows.Freezable.OnChanged method. Classes that derive from System.Windows.Freezable
should call this method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def WritePreamble(self,*args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being accessed from a
valid threading context. System.Windows.Freezable inheritors should call this method at the
beginning of any API that writes to data members that are not dependency properties.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*args): #cannot find CLR constructor
"""
__new__(cls: type)
__new__(cls: type,beginTime: Nullable[TimeSpan])
__new__(cls: type,beginTime: Nullable[TimeSpan],duration: Duration)
__new__(cls: type,beginTime: Nullable[TimeSpan],duration: Duration,repeatBehavior: RepeatBehavior)
"""
pass
AccelerationRatio=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value specifying the percentage of the timeline's System.Windows.Media.Animation.Timeline.Duration spent accelerating the passage of time from zero to its maximum rate.
Get: AccelerationRatio(self: Timeline) -> float
Set: AccelerationRatio(self: Timeline)=value
"""
AutoReverse=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates whether the timeline plays in reverse after it completes a forward iteration.
Get: AutoReverse(self: Timeline) -> bool
Set: AutoReverse(self: Timeline)=value
"""
BeginTime=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the time at which this System.Windows.Media.Animation.Timeline should begin.
Get: BeginTime(self: Timeline) -> Nullable[TimeSpan]
Set: BeginTime(self: Timeline)=value
"""
DecelerationRatio=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value specifying the percentage of the timeline's System.Windows.Media.Animation.Timeline.Duration spent decelerating the passage of time from its maximum rate to zero.
Get: DecelerationRatio(self: Timeline) -> float
Set: DecelerationRatio(self: Timeline)=value
"""
Duration=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the length of time for which this timeline plays,not counting repetitions.
Get: Duration(self: Timeline) -> Duration
Set: Duration(self: Timeline)=value
"""
FillBehavior=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that specifies how the System.Windows.Media.Animation.Timeline behaves after it reaches the end of its active period.
Get: FillBehavior(self: Timeline) -> FillBehavior
Set: FillBehavior(self: Timeline)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the name of this System.Windows.Media.Animation.Timeline.
Get: Name(self: Timeline) -> str
Set: Name(self: Timeline)=value
"""
RepeatBehavior=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the repeating behavior of this timeline.
Get: RepeatBehavior(self: Timeline) -> RepeatBehavior
Set: RepeatBehavior(self: Timeline)=value
"""
SpeedRatio=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the rate,relative to its parent,at which time progresses for this System.Windows.Media.Animation.Timeline.
Get: SpeedRatio(self: Timeline) -> float
Set: SpeedRatio(self: Timeline)=value
"""
AccelerationRatioProperty=None
AutoReverseProperty=None
BeginTimeProperty=None
Completed=None
CurrentGlobalSpeedInvalidated=None
CurrentStateInvalidated=None
CurrentTimeInvalidated=None
DecelerationRatioProperty=None
DesiredFrameRateProperty=None
DurationProperty=None
FillBehaviorProperty=None
NameProperty=None
RemoveRequested=None
RepeatBehaviorProperty=None
SpeedRatioProperty=None
|
[
"magnetscoil@gmail.com"
] |
magnetscoil@gmail.com
|
f889324acec930ae67fcd208eae5a87585845cd0
|
2cda9caaa7a0f070139e39cae13744b11e0066ec
|
/Day5Class/NSAEncoder3.py
|
61b8b4c2abf814d71896f4ba16966cabb79187de
|
[] |
no_license
|
sc137/WASTC_Programming_Concepts_Python
|
68df4e5cf016c56c571cc2ef51f1919e6140bf4d
|
3f8d2ed614d04087e2211fa7a603c1b5a1e12c79
|
refs/heads/master
| 2021-01-19T00:31:29.481577
| 2016-07-05T19:05:35
| 2016-07-05T19:05:35
| 61,576,736
| 1
| 1
| null | 2016-06-24T20:54:59
| 2016-06-20T20:06:01
|
Python
|
UTF-8
|
Python
| false
| false
| 729
|
py
|
# version 1.0
fin = open('input.txt')
fout = open('output.txt', 'w')
encoded = []
decoded = []
while True:
sOld = fin.readline()
sNew = ""
if len(sOld) == 0:break
for i in range(len(sOld)):
sNew = chr(ord(sOld[i]) - 1)
encoded[len(encoded):] = [sNew] # Inserts at the end
i += 1
i = 0
while True: # this section is wrong and incomplete
if i == len(encoded): break
fout.write(encoded[i] + '\n')
i += 1
fout.close()
fin.close()
print("\n")
for i in range(len(encoded)):
sDecode = chr(ord(encoded[i]) + 1)
decoded[len(decoded):] = [sDecode] # Inserts at the end
i += 1
for i in range(len(decoded)):
print(decoded[i], end="")
i += 1
|
[
"sales@dansid.com"
] |
sales@dansid.com
|
dfa597d173fb2935cab5373869ba275323196172
|
7b1119c13af2a2c165d860123499ea1eba02b0f2
|
/Own Implementation Final/activation.py
|
932da22c017c53272c91e249e28f8fab421cee30
|
[] |
no_license
|
mohamed-minawi/RNN
|
232adf641b9037357d680ff968d18e87d41bfdab
|
bfc452a650153967189139c51ed289a70e14b5ad
|
refs/heads/master
| 2020-03-22T13:10:43.867256
| 2018-07-07T14:13:54
| 2018-07-07T14:13:54
| 140,088,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
import numpy as np
class Sigmoid:
def forward(self, x):
return 1.0 / (1.0 + np.exp(-x))
def backward(self, x, top_diff):
output = self.forward(x)
return (1.0 - output) * output * top_diff
class Tanh:
def forward(self, x):
return np.tanh(x)
def backward(self, x, top_diff):
output = self.forward(x)
return (1.0 - np.square(output)) * top_diff
|
[
"mohamed-minawi@aucegypt.edu"
] |
mohamed-minawi@aucegypt.edu
|
25c35446e2c1e506709da0324a51c48114a589fb
|
fab8b89c723caf54ed49e2f0da8be592243d21ee
|
/Matlab/Stepping-Stones_msg_gen/catkin_ws/src/morpheus_skates/src/calibration_arduino.py
|
98f7a24660c9f498b08d643c37d9f31ccb94fcf0
|
[] |
no_license
|
saunair/Stepping-Stones
|
97752f5b9dad10cbe8cb4a208b99fefe99b7be05
|
177c9e884779227beaa725efb1f061d1a52db751
|
refs/heads/master
| 2021-06-20T16:12:46.651961
| 2017-07-27T16:46:46
| 2017-07-27T16:46:46
| 72,512,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,395
|
py
|
#!/usr/bin/env python
# license removed for brevity
import rospy
import time
from std_msgs.msg import UInt16
import yaml
from morpheus_skates.msg import skate_feedback
import os
import rospkg
#config_file = '~/catkin_ws/src/motor/config/calibration_values.yaml'
#config_file = '/home/stepping/mrsd_team_H/Stepping-Stones/catkin_ws/src/morpheus_skates/config/calibration_values.yaml'
rospack = rospkg.RosPack()
morpheus_path = rospack.get_path('morpheus_skates')
config_file = morpheus_path + '/config/calibration_values.yaml'
class skate(object):
def __init__(self, name):
self.name = name
########### set appropriate values ########
self.MAX_preload_F1 = 20
self.MAX_preload_F2 = 20
self.MAX_preload_F3 = 20
########### initialize variables ##########
self.w = -1
self.bias_front_outer = -1
self.bias_front_inner = -1
self.bias_rear = -1
self.gain_front_outer = []
self.gain_front_inner = []
self.gain_rear = []
self.count = 0
self.sensor_no = -1
self.preload_front_outer = []
self.preload_front_inner = []
self.preload_rear = []
self.d = {}
###### restart routine for new sensors ############
def restart_routine(self):
self.w = -1
self.bias_front_outer = -1
self.bias_front_inner = -1
self.bias_rear = -1
self.gain_front_outer = []
self.gain_front_inner = []
self.gain_rear = []
self.count = 0
self.sensor_no = -1
self.preload_front_outer = []
self.preload_front_inner = []
self.preload_rear = []
def values(self,data):
print "repeat"
if self.count==0:
self.bias_front_outer = 0
self.bias_front_inner = 0
self.bias_rear = 0
self.count += 1
print data.header.stamp
if self.count<=200:
self.bias_front_outer += data.force_front_outer
self.bias_front_inner += data.force_front_inner
self.bias_rear += data.force_rear
self.count += 1
elif self.count==201:
self.bias_front_outer/=200
self.bias_front_inner/=200
self.bias_rear/=200
self.count += 1
self.w = -1
elif self.count == 202:
self.sensor_number = input("enter skate sensor number for gain calculation")
self.w = input("input weight here")
self.count += 1
####### add the skate name in the input
if self.w>0:
###### ignore these values for sync!!!! #########
if self.count<1200:
self.count += 1
elif self.count==1200:
if self.sensor_number == 1:
self.gain_front_outer = []
elif self.sensor_number == 2:
self.gain_front_inner = []
elif self.sensor_number == 3:
self.gain_rear = []
self.count += 1
#keep appending for another n values
elif self.count>1200 and self.count<1400:
if self.sensor_number == 1:
self.gain_front_outer.append(float(data.force_front_outer - self.bias_front_outer)/self.w)
print "check for sanjay", (data.force_front_outer - self.bias_front_outer), self.w
elif self.sensor_number == 2:
self.gain_front_inner.append(float(data.force_front_inner - self.bias_front_inner)/self.w)
elif self.sensor_number == 3:
self.gain_rear.append(float(data.force_rear - self.bias_rear)/self.w)
self.count += 1
###### end of routine for this sensor number##########
elif self.count==1400:
if self.sensor_number == 1:
print self.gain_front_outer
self.gain_front_outer = float(sum(self.gain_front_outer))/len(self.gain_front_outer)
print "check", self.gain_front_outer
elif self.sensor_number == 2:
self.gain_front_inner = float(sum(self.gain_front_inner))/len(self.gain_front_inner)
elif self.sensor_number == 3:
self.gain_rear = float(sum(self.gain_rear))/len(self.gain_rear)
self.count +=1
elif self.count == 1401:
if(self.gain_rear==[] or self.gain_front_inner==[] or self.gain_front_outer==[]):
##### go back to gain calculations
self.count = 202
else:
self.count += 1
####label: sync
###### ignore these values for sync!!!! #########
elif self.count <1800 and self.count>1401:
self.preload_front_outer = []
self.preload_front_inner = []
self.preload_rear = []
self.count += 1
elif self.count == 1800:
g = input("press a key if ready to test mechanical preload")
self.count += 1
############ mechanical preload code ################
elif self.count > 1800 and self.count < 2200:
self.preload_front_outer.append(float(data.force_front_outer - self.bias_front_outer)/self.gain_front_outer)
self.preload_front_inner.append(float(data.force_front_inner - self.bias_front_inner)/self.gain_front_inner)
self.preload_rear.append((float(data.force_rear - self.bias_front_outer)/self.gain_rear))
self.count += 1
elif self.count == 2200:
self.preload_front_outer = float(sum(self.preload_front_outer))/len(self.preload_front_outer)
self.preload_front_inner = float(sum(self.preload_front_inner))/len(self.preload_front_inner)
self.preload_rear = float(sum(self.preload_rear))/len(self.preload_rear)
#### go to sync if preload is above the threshold
if self.preload_front_outer > self.MAX_preload_F1:
print "Front outer load not corrected"
self.count = 1401
if self.preload_front_inner > self.MAX_preload_F2:
print "Front inner load not corrected"
self.count = 1401
if self.preload_rear > self.MAX_preload_F3:
print "Rear load not corrected"
self.count = 1401
if (self.preload_front_outer < self.MAX_preload_F1 and self.preload_front_inner < self.MAX_preload_F2 and self.preload_rear < self.MAX_preload_F3):
self.count += 1
elif self.count == 2201:
self.count = 0
if self.preload_front_outer < self.MAX_preload_F1 and self.preload_front_inner < self.MAX_preload_F2 and self.preload_rear < self.MAX_preload_F3:
self.data_update(self.sensor_number)
self.restart_routine()
else:
#### go back to preload routine
print "Wrong bias: Go though preloading routine again"
self.count = 1401
#self.restart_routine()
#elif self.count > 2201:
#self.restart_routine()
########### update the dictionary for this particular skate
elif self.w==-2:
### go to this preload directly ###
self.count = 1700
#### to get into the loop ####
self.w = 1
self.bias_front_outer = rospy.get_param(self.name + "_bias_front_outer")
self.bias_front_inner = rospy.get_param(self.name + "_bias_front_inner")
self.bias_rear = rospy.get_param(self.name + "_bias_rear")
self.gain_front_outer = rospy.get_param(self.name + "_gain_front_outer")
self.gain_front_inner = rospy.get_param(self.name + "_gain_front_inner")
self.gain_rear = rospy.get_param(self.name + "_gain_rear")
def data_update(self, sensor_number):
bias_front_outer = self.name + "_bias_front_outer"
bias_front_inner = self.name + "_bias_front_inner"
bias_rear = self.name + "_bias_rear"
gain_front_outer = self.name + "_gain_front_outer"
gain_front_inner = self.name + "_gain_front_inner"
gain_rear = self.name + "_gain_rear"
preload_front_outer = self.name + "_preload_front_outer"
preload_front_inner = self.name + "_preload_front_inner"
preload_rear = self.name + "_preload_rear"
if self.bias_front_outer != -1:
self.d[bias_front_outer] = self.bias_front_outer
self.d[bias_front_inner] = self.bias_front_inner
self.d[bias_rear] = self.bias_rear
self.d[gain_front_outer] = self.gain_front_outer
self.d[gain_front_inner] = self.gain_front_inner
self.d[gain_rear] = self.gain_rear
self.d[preload_front_outer] = self.preload_front_outer
self.d[preload_front_inner] = self.preload_front_inner
self.d[preload_rear] = self.preload_rear
else:
self.d[bias_front_outer] = rospy.get_param(bias_front_outer)
self.d[bias_front_inner] = rospy.get_param(bias_front_inner)
self.d[bias_rear] = rospy.get_param(bias_rear)
self.d[gain_front_outer] = rospy.get_param(gain_front_outer)
self.d[gain_front_inner] = rospy.get_param(gain_front_inner)
self.d[gain_rear] = rospy.get_param(gain_rear)
self.d[preload_front_outer] = rospy.get_param(preload_front_outer)
self.d[preload_front_inner] = rospy.get_param(preload_front_inner)
self.d[preload_rear] = rospy.get_param(preload_rear)
rospy.set_param(bias_front_outer ,self.d[bias_front_outer])
rospy.set_param(bias_front_inner ,self.d[bias_front_inner])
rospy.set_param(bias_rear ,self.d[bias_rear])
rospy.set_param(gain_front_outer ,self.d[gain_front_outer])
rospy.set_param(gain_front_inner ,self.d[gain_front_inner])
rospy.set_param(gain_rear ,self.d[gain_rear])
rospy.set_param(preload_front_outer,self.d[preload_front_outer])
rospy.set_param(preload_front_inner,self.d[preload_front_inner])
rospy.set_param(preload_rear ,self.d[preload_rear])
##### call the function that dumps the dictionary into a yaml file which is our config file###
self.write_into_file()
def write_into_file(self):
global config_file
if(self.name =='right'):
self.d['left_bias_front_outer'] = rospy.get_param('left_bias_front_outer')
self.d['left_bias_front_inner'] = rospy.get_param('left_bias_front_inner')
self.d['left_bias_rear'] = rospy.get_param('left_bias_rear')
self.d['left_preload_front_outer'] = rospy.get_param('left_preload_front_outer')
self.d['left_preload_front_inner'] = rospy.get_param('left_preload_front_inner')
self.d['left_gain_rear'] = rospy.get_param('left_gain_rear')
self.d['left_gain_front_outer'] = rospy.get_param('left_gain_front_outer')
self.d['left_gain_front_inner'] = rospy.get_param('left_gain_front_inner')
elif(self.name == 'left'):
self.d['right_bias_front_outer'] = rospy.get_param('right_bias_front_outer')
self.d['right_bias_front_inner'] = rospy.get_param('right_bias_front_inner')
self.d['right_bias_rear'] = rospy.get_param('right_bias_rear')
self.d['right_preload_front_outer'] = rospy.get_param('right_preload_front_outer')
self.d['right_preload_front_inner'] = rospy.get_param('right_preload_front_inner')
self.d['right_preload_rear'] = rospy.get_param('right_preload_rear')
self.d['right_gain_rear'] = rospy.get_param('right_gain_rear')
self.d['right_gain_front_outer'] = rospy.get_param('right_gain_front_outer')
self.d['right_gain_front_inner'] = rospy.get_param('right_gain_front_inner')
stream = file(config_file, 'w')
yaml.dump(self.d, stream)
a = raw_input("Confirm restart for preload by pressing enter")
self.count = 1402
#self.restart_routine()
def start(left_skate_start, right_skate_start):
rospy.init_node('bias', anonymous=True)
rospy.Subscriber("left" , skate_feedback, left_skate_start.values)
rospy.Subscriber("right", skate_feedback, right_skate_start.values)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
right_skate = skate('right')
left_skate = skate('left')
start(left_skate, right_skate)
|
[
"bfactor@andrew.cmu.edu"
] |
bfactor@andrew.cmu.edu
|
67c14f9b85a6fc763604cad2949acf2dbcbbab72
|
00c3f5934ba4e674c8f01ab1fc2d30f7eb9a803d
|
/Performance_Metric_Functions.py
|
61f8f2501d66a75e12708503079a7b8a7bc81b5e
|
[] |
no_license
|
alexanderbooth/Prattle_Interns
|
a67224ba82bafe661f95d7f8443fabbf009c3a03
|
a97c65596af89feee6e6db7be281ce08f0a5f459
|
refs/heads/master
| 2020-12-31T01:48:31.110739
| 2015-07-06T21:44:36
| 2015-07-06T21:44:36
| 37,205,087
| 0
| 0
| null | 2015-06-10T15:20:47
| 2015-06-10T15:20:46
|
Python
|
UTF-8
|
Python
| false
| false
| 5,587
|
py
|
# coding: utf-8
# In[2]:
import dateutil.parser
import json
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import pandas.io.data
import requests
import datetime
import math
import operator
import scipy
import requests
import calendar
import statsmodels.api as sm
# In[3]:
#NEEDS COMMENTS FROM ALEX
def isFirstDay(date, dic1):
d = date.to_datetime()
for i in range (1,32):
if dic1[d.month] == d.year:
return False
else:
if d.day==i:
dic1[d.month] = d.year
return True
#Are you sure its not this?
#def isFirstDay(date):
#return date.to_datetime().day == 1
# In[4]:
#returns a dictionary with the first day of each year
#input dic must be an empty dictionary
def isFirstDayYear(date, dic):
d = date.to_datetime()
for i in range (1,32):
if d.year in dic:
return False
else:
if d.day==i:
dic[d.year] = True
return True
# In[5]:
#Calculates the yearly returns from the price column in dataframe df. Dataframe df needs to have 'Date' column
#along with 'Price' column. Creates a Firstdayyear column of booleans in df.
#yr_returns is a list with floats
def yearlyReturns(df):
hasFirstDay = {}
df['Firstdayyear'] = df.loc[:, 'Date'].apply(isFirstDayYear, dic=hasFirstDay)
first_year_adj_close = []
last_year_adj_close = []
first_year = df[df['Firstdayyear']==True]
last_year = df[df['Firstdayyear'] == True]
first_year = first_year.drop(first_year.index[len(first_year)-1])
last_year = last_year.drop(last_year.index[0])
for i in first_year.index:
first_year_adj_close.append(first_year['Price'][i])
for i in last_year.index:
last_year_adj_close.append(last_year['Price'][i])
yr_returns = [(i - j)/i for i, j in zip(last_year_adj_close, first_year_adj_close)]
return yr_returns
# In[6]:
#Calculates the monthly returns from the Price column in dataframe df
#Dataframe df must have Date column as well as Price column
#Adds Firstdaymonth column to df
#output is a list of floats
def monthlyReturns(df):
hasFirstDayMonth = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0}
df['Firstdaymonth'] = df.loc[:, 'Date'].apply(isFirstDay, dic1=hasFirstDayMonth)
first_month_adj_close = []
last_month_adj_close = []
first_month = df[df['Firstdaymonth']==True]
last_month = df[df['Firstdaymonth']==True]
last_month = last_month.drop(last_month.index[0])
first_month = first_month.drop(first_month.index[len(first_month) - 1])
for i in first_month.index:
first_month_adj_close.append(first_month['Price'][i])
for i in last_month.index:
last_month_adj_close.append(last_month['Price'][i])
monthly_returns = [(i - j)/i for i, j in zip(last_month_adj_close, first_month_adj_close)]
return monthly_returns
# In[7]:
#adjust std depending on annualized range
#Takes in monthly or yearly returns (list of floats)
#outputs the sharpe_Ratio as a float
def sharpe_Ratio(returns):
std = (returns.std()*(12**.5))
return (returns.mean() - .02)/std
# In[8]:
#Takes in monthly or yearly returns (list of floats) and the DataFrame asset1Data
#asset1Data must have Date column along with Price column
#outputs the alpha as a float
def alpha(returns, asset1Data):
portfolio_total_return = returns[len(returns)-1] - returns[0]
asset1_monthly_return = monthlyReturns(asset1Data)
asset1_total_return = asset1_monthly_return[len(asset1_monthly_return) - 1] - asset1_monthly_return[0]
return portfolio_total_return - .02 - (1.0 * (asset1_total_return - .02))
# In[9]:
#returns list of maximum draw downs as floats
#input must be dataframe with Price column
#difficult to interpret. At first I didn't think the code would work, but trust it. It works
def maxDrawDown(df):
initial_max = 0
initial_index = 0
initial_index_integer = 0
#initialize array of local maximums
maximums = []
initial_max = df["Price"][initial_index_integer]
maximums.append(initial_max)
#initialize array of the indexes of the local maximums
indexes = []
indexes.append(initial_index_integer)
#initialize array of local minimums between maximums
minimums = []
#fill arrays with local maximums and their indexes
for i in range(len(df)):
if df["Price"][i] - df["Price"][initial_index_integer] > 0:
initial_max = df["Price"][i]
initial_index = df.index[i]
initial_index_integer = df.index.get_loc(initial_index)
maximums.append(initial_max)
indexes.append(initial_index_integer)
#fill array with the minimus between each 2 local maximums
minimum = 1000000000
for i in range(len(indexes)-1):
for j in range(indexes[i], indexes[i + 1]):
if df['Price'][j] < minimum:
minimum = df['Price'][j]
minimums.append(minimum)
minimum = 1000000000
#initialize max_draw_downs array
max_draw_downs = []
max_draw_down = 0
#fill array with the max draw downs
for i in range(len(maximums)-1):
max_draw_down = ((minimums[i] - maximums[i])/maximums[i])
max_draw_downs.append(max_draw_down)
return max_draw_downs
# In[10]:
#Takes in dataframe with Price and Date columns
#returns the annualized compound return from the dataframe
def annualizedCompoundReturn(df):
return (df.Price[len(df.Price)-1]/df.Price[0])**(1/float(len(yearlyReturns(df))))-1
|
[
"jacob.cavner@gmail.com"
] |
jacob.cavner@gmail.com
|
69977fa0a2c06e8ff76915a512f98a4297e62f81
|
bb607ff53fba9fe657291f6249c2c22abda2db4c
|
/settings.py
|
abb0840405d3700432a6d955f107a180498f66fb
|
[
"MIT"
] |
permissive
|
AdrianSosaUV/pacman
|
0c2442913f3b62fe42b19eb1e32e213ed8bd55e2
|
09865179cf0d92bab0f0276a3dfe49a6727c9bb4
|
refs/heads/main
| 2023-07-02T18:45:33.439762
| 2021-08-01T22:17:17
| 2021-08-01T22:17:17
| 381,440,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 736
|
py
|
import pygame
import random
import sys
from pygame.math import Vector2 as vec
##### SCREEN SETTINGS #####
TOP_BOTTOM_BUFFER = 50
WIDTH, HEIGHT = 610, 670
MAZE_WIDTH = WIDTH - TOP_BOTTOM_BUFFER
MAZE_HEIGHT = HEIGHT - TOP_BOTTOM_BUFFER
FPS = 60
COLS = 28
ROWS = 30
##### COLOR SETTINGS #####
BLACK = (0, 0, 0)
ORANGE = (170, 132, 58)
BLUE = (33, 137, 156)
WHITE = (255, 255, 255)
RED = (208, 22, 22)
GRAY = (107, 107, 107)
PURPLE = (112, 55, 163)
PLAYER_COLOR = (204, 204, 0)
GOLD = (255, 255, 204)
CHERRY = (220, 20, 60)
##### FONT SETTINGS #####
START_TEXT_SIZE = 17
START_FONT = 'arial_black'
##### NPC SETTINGS #####
OIKAKE = (255, 0, 0)
MACHIBUSE = (255, 192, 203)
KIMAGURE = (0, 255, 255)
OTOBOKE = (255, 165, 0)
|
[
"adrsosa@uv.mx"
] |
adrsosa@uv.mx
|
389998e0016dfb745d0865cb2c25a7640c42ec89
|
d4c1e34a07eebeaf1bce82ad860404373816caab
|
/loadXMLUI.py
|
b6a77755d6942c646d721508327777dd01384160
|
[] |
no_license
|
GiedreJursh/animation_importer-exporter_anim-blend
|
f245424d549b763400fe58bccb5146f4687d4422
|
8d0db13f054ad8257a98cf3dfa5da92449b12d46
|
refs/heads/master
| 2021-03-08T01:22:09.142790
| 2020-04-15T09:34:04
| 2020-04-15T09:34:04
| 246,308,669
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,444
|
py
|
#==============================================================================
# Namn: Giedre Jursenaite ================
# Datum: 2018-12-04 ================
#==============================================================================
# Imports:
from maya import OpenMayaUI as omui
import pymel.core as pm
import PySide2
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2 import QtWidgets
from PySide2.QtUiTools import *
from shiboken2 import wrapInstance
import sys
# Load Window:
def getMayaWin():
mayaWinPtr = omui.MQtUtil.mainWindow( )
mayaWin = wrapInstance( long( mayaWinPtr ), QtWidgets.QMainWindow )
# Loads UI through path:
def loadUI( path ):
loader = QUiLoader()
uiFile = QFile( path )
dirIconShapes = ""
buff = None
if uiFile.exists():
dirIconShapes = path
uiFile.open( QFile.ReadOnly )
buff = QByteArray( uiFile.readAll() )
uiFile.close()
else:
print "UI file missing! Exiting..."
exit(-1)
fixXML( path, buff )
qbuff = QBuffer()
qbuff.open( QBuffer.ReadOnly | QBuffer.WriteOnly )
qbuff.write( buff )
qbuff.seek( 0 )
ui = loader.load( qbuff, parentWidget = getMayaWin() )
ui.path = path
return ui
# Cleans up path:
def fixXML( path, qbyteArray ):
# first replace forward slashes for backslashes
if path[-1] != '/':
path += '/'
path = path.replace( "/", "\\" )
# construct whole new path with <pixmap> at the begining
tempArr = QByteArray( "<pixmap>" + path + "\\" )
# search for the word <pixmap>
lastPos = qbyteArray.indexOf( "<pixmap>", 0 )
while lastPos != -1:
qbyteArray.replace( lastPos, len( "<pixmap>" ), tempArr )
lastPos = qbyteArray.indexOf( "<pixmap>", lastPos + 1 )
return
# Creates class for UI controllers:
class UIController:
def __init__(self, ui):
self.ui = ui
ui.setWindowFlags(Qt.WindowStaysOnTopHint)
#==============================================================================
#//////////////////////////////////////////////////////////////////////////////
#==============================================================================
# Imports Import and Export Scripts:
sys.path.append("C:/Users/.../BlendAnimations/")
import BinaryExportForUI
reload(BinaryExportForUI)
import BinaryImportForUI
reload(BinaryImportForUI)
# Loads All UI for the Script:
ui = loadUI("C:/Users/.../BlendAnimations/UI/ChooseOneAlternative.ui")
importUI = loadUI("C:/Users/.../BlendAnimations/UI/ChooseAnimations.ui")
exportUI = loadUI("C:/Users/.../BlendAnimations/UI/ExportAnimations.ui")
jointUI = loadUI("C:/Users/.../BlendAnimations/UI/AjustJointsForLayers.ui")
bakeAnimUI = loadUI("C:/Users/.../BlendAnimations/UI/BakeAnimations.ui")
# Global lists
# Skleton
hirarchy = []
orientations = []
roatations = []
parentOrentations = []
parentOrentationsInvert = []
parentRotations = []
parentRotationsInvert = []
perentMatrixList = []
perentMatrixListInvers = []
# Binary file
jointNameListAnim1 = []
jointMatrixesListAnim1 = []
nrOFFramesAndJointsAnim1 = []
jointNameListAnim2 = []
jointMatrixesListAnim2 = []
nrOFFramesAndJointsAnim2 = []
jointNameListAnim3 = []
jointMatrixesListAnim3 = []
nrOFFramesAndJointsAnim3 = []
jointNameListAnim4 = []
jointMatrixesListAnim4 = []
nrOFFramesAndJointsAnim4 = []
pathsList1 = []
pathsList2 = []
pathsList3 = []
pathsList4 = []
animLayerList1 = []
animLayerList2 = []
animLayerList3 = []
animLayerList4 = []
# Creates class for Main Window.
uiCtrl1 = UIController(ui)
# Shows Main Window:
ui.show()
# Creates other classes:
# Choose animations to import Window:
uiCtrl2 = UIController(importUI)
# Export animation Window:
uiCtrl3 = UIController(exportUI)
# Ajust joints for import animations Window:
uiCtrl4 = UIController(jointUI)
# Ajust weights and bake animation Window:
uiCtrl5 = UIController(bakeAnimUI)
#==============================================================================
# The first Window on click events:
def OpenImport():
# Hide Main UI window and show import animations UI window
ui.hide()
# Show import animations UI window
importUI.show()
def OpenExport():
if(len(hirarchy) > 0):
del hirarchy[:]
del orientations[:]
del roatations[:]
del parentOrentations[:]
del parentOrentationsInvert[:]
del parentRotations[:]
del parentRotationsInvert[:]
del perentMatrixList[:]
del perentMatrixListInvers[:]
BinaryExportForUI.HirarchyListCreator(hirarchy, orientations, roatations, perentMatrixList, perentMatrixListInvers, parentOrentations, parentOrentationsInvert, parentRotations, parentRotationsInvert)
# Hide Main UI window and show import animations UI window
ui.hide()
# Show export animations UI window
exportUI.show()
for h in hirarchy:
exportUI.SourceList.addItem(str(h))
#==============================================================================
def FindAnimPath1():
path = BinaryImportForUI.OpenFiles()
if(path == None):
return
nameString = str(path)
nameString = nameString.split("'")
nameString = nameString[1]
if(importUI.AnimList1.count()>0):
importUI.AnimList1.takeItem(0)
importUI.AnimList1.addItem(str(nameString))
if(len(pathsList1)>0):
pathsList1.pop(0)
pathsList1.append(path)
def FindAnimPath2():
path = BinaryImportForUI.OpenFiles()
if(path == None):
return
nameString = str(path)
nameString = nameString.split("'")
nameString = nameString[1]
if(importUI.AnimList2.count()>0):
importUI.AnimList2.takeItem(0)
importUI.AnimList2.addItem(str(nameString))
if(len(pathsList2)>0):
pathsList2.pop(0)
pathsList2.append(path)
def FindAnimPath3():
path = BinaryImportForUI.OpenFiles()
if(path == None):
return
nameString = str(path)
nameString = nameString.split("'")
nameString = nameString[1]
if(importUI.AnimList3.count()>0):
importUI.AnimList3.takeItem(0)
importUI.AnimList3.addItem(str(nameString))
if(len(pathsList3)>0):
pathsList3.pop(0)
pathsList3.append(path)
def FindAnimPath4():
path = BinaryImportForUI.OpenFiles()
if(path == None):
return
nameString = str(path)
nameString = nameString.split("'")
nameString = nameString[1]
if(importUI.AnimList4.count()>0):
importUI.AnimList4.takeItem(0)
importUI.AnimList4.addItem(str(nameString))
if(len(pathsList4)>0):
pathsList4.pop(0)
pathsList4.append(path)
def LoadAnimations():
if(len(pathsList1)>0):
BinaryImportForUI.ReadFromFiles(pathsList1[0], jointNameListAnim1, jointMatrixesListAnim1, nrOFFramesAndJointsAnim1)
if(len(pathsList2)>0):
BinaryImportForUI.ReadFromFiles(pathsList2[0], jointNameListAnim2, jointMatrixesListAnim2, nrOFFramesAndJointsAnim2)
if(len(pathsList3)>0):
BinaryImportForUI.ReadFromFiles(pathsList3[0], jointNameListAnim3, jointMatrixesListAnim3, nrOFFramesAndJointsAnim3)
if(len(pathsList4)>0):
BinaryImportForUI.ReadFromFiles(pathsList4[0], jointNameListAnim4, jointMatrixesListAnim4, nrOFFramesAndJointsAnim4)
BinaryImportForUI.HirarchyListCreator(hirarchy, orientations, roatations, perentMatrixList, perentMatrixListInvers, parentOrentations, parentOrentationsInvert, parentRotations, perentMatrixListInvers)
importUI.hide()
jointUI.show()
for h in hirarchy:
jointUI.TargetList.addItem(str(h))
if(len(jointNameListAnim1)>0):
for l1 in jointNameListAnim1:
jointUI.AnimList1.addItem(l1)
if(len(jointNameListAnim2)>0):
for l2 in jointNameListAnim2:
jointUI.AnimList2.addItem(l2)
if(len(jointNameListAnim3)>0):
for l3 in jointNameListAnim3:
jointUI.AnimList3.addItem(l3)
if(len(jointNameListAnim4)>0):
for l4 in jointNameListAnim4:
jointUI.AnimList4.addItem(l4)
#==============================================================================
def TargetUp():
currentRow = jointUI.TargetList.currentRow()
temp = hirarchy[currentRow]
hirarchy[currentRow] = hirarchy[currentRow - 1]
hirarchy[currentRow - 1] = temp
currentItem = jointUI.TargetList.takeItem(currentRow)
jointUI.TargetList.insertItem(currentRow - 1, currentItem)
jointUI.TargetList.setCurrentRow(currentRow - 1)
temp = parentOrentations[currentRow]
parentOrentations[currentRow] = parentOrentations[currentRow - 1]
parentOrentations[currentRow - 1] = temp
temp = parentOrentationsInvert[currentRow]
parentOrentationsInvert[currentRow] = parentOrentationsInvert[currentRow - 1]
parentOrentationsInvert[currentRow - 1] = temp
temp = parentRotations[currentRow]
parentRotations[currentRow] = parentRotations[currentRow - 1]
parentRotations[currentRow - 1] = temp
temp = perentMatrixList[currentRow]
perentMatrixList[currentRow] = perentMatrixList[currentRow - 1]
perentMatrixList[currentRow - 1] = temp
temp = perentMatrixListInvers[currentRow]
perentMatrixListInvers[currentRow] = perentMatrixListInvers[currentRow - 1]
perentMatrixListInvers[currentRow - 1] = temp
def TargetDown():
currentRow = jointUI.TargetList.currentRow()
temp = hirarchy[currentRow]
hirarchy[currentRow] = hirarchy[currentRow + 1]
hirarchy[currentRow + 1] = temp
currentItem = jointUI.TargetList.takeItem(currentRow)
jointUI.TargetList.insertItem(currentRow + 1, currentItem)
jointUI.TargetList.setCurrentRow(currentRow + 1)
temp = parentOrentations[currentRow]
parentOrentations[currentRow] = parentOrentations[currentRow + 1]
parentOrentations[currentRow + 1] = temp
temp = parentOrentationsInvert[currentRow]
parentOrentationsInvert[currentRow] = parentOrentationsInvert[currentRow + 1]
parentOrentationsInvert[currentRow + 1] = temp
temp = parentRotations[currentRow]
parentRotations[currentRow] = parentRotations[currentRow + 1]
parentRotations[currentRow + 1] = temp
temp = perentMatrixList[currentRow]
perentMatrixList[currentRow] = perentMatrixList[currentRow + 1]
perentMatrixList[currentRow + 1] = temp
temp = perentMatrixListInvers[currentRow]
perentMatrixListInvers[currentRow] = perentMatrixListInvers[currentRow + 1]
perentMatrixListInvers[currentRow + 1] = temp
def TargetDelete():
currentRow = jointUI.TargetList.currentRow()
currentItem = jointUI.TargetList.takeItem(currentRow)
hirarchy.pop(currentRow)
parentOrentations.pop(currentRow)
parentOrentationsInvert.pop(currentRow)
parentRotations.pop(currentRow)
perentMatrixList.pop(currentRow)
perentMatrixListInvers.pop(currentRow)
def Anim1Up():
currentRow = jointUI.AnimList1.currentRow()
temp = jointNameListAnim1[currentRow]
jointNameListAnim1[currentRow] = jointNameListAnim1[currentRow - 1]
jointNameListAnim1[currentRow - 1] = temp
currentItem = jointUI.AnimList1.takeItem(currentRow)
jointUI.AnimList1.insertItem(currentRow - 1, currentItem)
jointUI.AnimList1.setCurrentRow(currentRow - 1)
temp = jointMatrixesListAnim1[currentRow]
jointMatrixesListAnim1[currentRow] = jointMatrixesListAnim1[currentRow - 1]
jointMatrixesListAnim1[currentRow - 1] = temp
def Anim2Up():
currentRow = jointUI.AnimList2.currentRow()
temp = jointNameListAnim2[currentRow]
jointNameListAnim2[currentRow] = jointNameListAnim2[currentRow - 1]
jointNameListAnim2[currentRow - 1] = temp
currentItem = jointUI.AnimList2.takeItem(currentRow)
jointUI.AnimList2.insertItem(currentRow - 1, currentItem)
jointUI.AnimList2.setCurrentRow(currentRow - 1)
temp = jointMatrixesListAnim2[currentRow]
jointMatrixesListAnim2[currentRow] = jointMatrixesListAnim2[currentRow - 1]
jointMatrixesListAnim2[currentRow - 1] = temp
def Anim3Up():
currentRow = jointUI.AnimList3.currentRow()
temp = jointNameListAnim3[currentRow]
jointNameListAnim3[currentRow] = jointNameListAnim3[currentRow - 1]
jointNameListAnim3[currentRow - 1] = temp
currentItem = jointUI.AnimList3.takeItem(currentRow)
jointUI.AnimList3.insertItem(currentRow - 1, currentItem)
jointUI.AnimList3.setCurrentRow(currentRow - 1)
temp = jointMatrixesListAnim3[currentRow]
jointMatrixesListAnim3[currentRow] = jointMatrixesListAnim3[currentRow - 1]
jointMatrixesListAnim3[currentRow - 1] = temp
def Anim4Up():
currentRow = jointUI.AnimList4.currentRow()
temp = jointNameListAnim4[currentRow]
jointNameListAnim4[currentRow] = jointNameListAnim4[currentRow - 1]
jointNameListAnim4[currentRow - 1] = temp
currentItem = jointUI.AnimList4.takeItem(currentRow)
jointUI.AnimList4.insertItem(currentRow - 1, currentItem)
jointUI.AnimList4.setCurrentRow(currentRow - 1)
temp = jointMatrixesListAnim4[currentRow]
jointMatrixesListAnim4[currentRow] = jointMatrixesListAnim4[currentRow - 1]
jointMatrixesListAnim4[currentRow - 1] = temp
def Anim1Down():
currentRow = jointUI.AnimList1.currentRow()
temp = jointNameListAnim1[currentRow]
jointNameListAnim1[currentRow] = jointNameListAnim1[currentRow + 1]
jointNameListAnim1[currentRow + 1] = temp
currentItem = jointUI.AnimList1.takeItem(currentRow)
jointUI.AnimList1.insertItem(currentRow + 1, currentItem)
jointUI.AnimList1.setCurrentRow(currentRow + 1)
temp = jointMatrixesListAnim1[currentRow]
jointMatrixesListAnim1[currentRow] = jointMatrixesListAnim1[currentRow + 1]
jointMatrixesListAnim1[currentRow + 1] = temp
def Anim2Down():
currentRow = jointUI.AnimList2.currentRow()
temp = jointNameListAnim2[currentRow]
jointNameListAnim2[currentRow] = jointNameListAnim2[currentRow + 1]
jointNameListAnim2[currentRow + 1] = temp
currentItem = jointUI.AnimList2.takeItem(currentRow)
jointUI.AnimList2.insertItem(currentRow + 1, currentItem)
jointUI.AnimList2.setCurrentRow(currentRow + 1)
temp = jointMatrixesListAnim2[currentRow]
jointMatrixesListAnim2[currentRow] = jointMatrixesListAnim2[currentRow + 1]
jointMatrixesListAnim2[currentRow + 1] = temp
def Anim3Down():
currentRow = jointUI.AnimList3.currentRow()
temp = jointNameListAnim3[currentRow]
jointNameListAnim3[currentRow] = jointNameListAnim3[currentRow + 1]
jointNameListAnim3[currentRow + 1] = temp
currentItem = jointUI.AnimList3.takeItem(currentRow)
jointUI.AnimList3.insertItem(currentRow + 1, currentItem)
jointUI.AnimList3.setCurrentRow(currentRow + 1)
temp = jointMatrixesListAnim3[currentRow]
jointMatrixesListAnim3[currentRow] = jointMatrixesListAnim3[currentRow + 1]
jointMatrixesListAnim3[currentRow + 1] = temp
def Anim4Down():
currentRow = jointUI.AnimList4.currentRow()
temp = jointNameListAnim4[currentRow]
jointNameListAnim4[currentRow] = jointNameListAnim4[currentRow + 1]
jointNameListAnim4[currentRow + 1] = temp
currentItem = jointUI.AnimList4.takeItem(currentRow)
jointUI.AnimList4.insertItem(currentRow + 1, currentItem)
jointUI.AnimList4.setCurrentRow(currentRow + 1)
temp = jointMatrixesListAnim4[currentRow]
jointMatrixesListAnim4[currentRow] = jointMatrixesListAnim4[currentRow + 1]
jointMatrixesListAnim4[currentRow + 1] = temp
def Anim1Delete():
currentRow = jointUI.AnimList1.currentRow()
currentItem = jointUI.AnimList1.takeItem(currentRow)
jointNameListAnim1.pop(currentRow)
jointMatrixesListAnim1.pop(currentRow)
def Anim2Delete():
currentRow = jointUI.AnimList2.currentRow()
currentItem = jointUI.AnimList2.takeItem(currentRow)
jointNameListAnim2.pop(currentRow)
jointMatrixesListAnim2.pop(currentRow)
def Anim3Delete():
currentRow = jointUI.AnimList3.currentRow()
currentItem = jointUI.AnimList3.takeItem(currentRow)
jointNameListAnim3.pop(currentRow)
jointMatrixesListAnim3.pop(currentRow)
def Anim4Delete():
currentRow = jointUI.AnimList4.currentRow()
currentItem = jointUI.AnimList4.takeItem(currentRow)
jointNameListAnim4.pop(currentRow)
jointMatrixesListAnim4.pop(currentRow)
def CreateBakedLayers():
nope1 = 1
nope2 = 1
nope3 = 1
nope4 = 1
if(len(jointMatrixesListAnim1) > 0):
if(len(hirarchy) == len(jointNameListAnim1)):
animName = BinaryImportForUI.FindAnimName(pathsList1[0])
pathsList1.append(animName)
BinaryImportForUI.CreateLayers(animName, hirarchy, nrOFFramesAndJointsAnim1[0], parentRotations, parentOrentations, parentOrentationsInvert, perentMatrixList, perentMatrixListInvers, jointMatrixesListAnim1, animLayerList1)
else:
sys.stdout.write('Error: The number of selected joints for target skeletton and source animation 1 must be the same.')
nope1 = 0
if(len(jointMatrixesListAnim2) > 0):
if(len(hirarchy) == len(jointNameListAnim2)):
animName = BinaryImportForUI.FindAnimName(pathsList2[0])
pathsList2.append(animName)
BinaryImportForUI.CreateLayers(animName, hirarchy, nrOFFramesAndJointsAnim2[0], parentRotations, parentOrentations, parentOrentationsInvert, perentMatrixList, perentMatrixListInvers, jointMatrixesListAnim2, animLayerList2)
else:
sys.stdout.write('Error: The number of selected joints for target skeletton and source animation 2 must be the same.')
nope2 = 0
if(len(jointMatrixesListAnim3) > 0):
if(len(hirarchy) == len(jointNameListAnim3)):
animName = BinaryImportForUI.FindAnimName(pathsList3[0])
pathsList3.append(animName)
BinaryImportForUI.CreateLayers(animName, hirarchy, nrOFFramesAndJointsAnim3[0], parentRotations, parentOrentations, parentOrentationsInvert, perentMatrixList, perentMatrixListInvers, jointMatrixesListAnim3, animLayerList3)
else:
sys.stdout.write('Error: The number of selected joints for target skeletton and source animation 3 must be the same.')
nope3 = 0
if(len(jointMatrixesListAnim4) > 0):
if(len(hirarchy) == len(jointNameListAnim4)):
animName = BinaryImportForUI.FindAnimName(pathsList4[0])
pathsList4.append(animName)
BinaryImportForUI.CreateLayers(animName, hirarchy, nrOFFramesAndJointsAnim4[0], parentRotations, parentOrentations, parentOrentationsInvert, perentMatrixList, perentMatrixListInvers, jointMatrixesListAnim4, animLayerList4)
else:
sys.stdout.write('Error: The number of selected joints for target skeletton and source animation 4 must be the same.')
nope4 = 0
if nope1 is not 0 and nope2 is not 0 and nope3 is not 0 and nope4 is not 0:
jointUI.hide()
bakeAnimUI.show()
pm.play(f = True)
if(len(pathsList1) > 0):
bakeAnimUI.AnimNameRef1.addItem(pathsList1[1])
if(len(pathsList2) > 0):
bakeAnimUI.AnimNameRef2.addItem(pathsList2[1])
if(len(pathsList3) > 0):
bakeAnimUI.AnimNameRef3.addItem(pathsList3[1])
if(len(pathsList4) > 0):
bakeAnimUI.AnimNameRef4.addItem(pathsList4[1])
def Slider1Moved():
newValue = bakeAnimUI.WeightSlider1.value()
newValue = float(newValue)/100.0
pm.animLayer(pathsList1[1], edit = True, w=newValue)
def Slider2Moved():
newValue = bakeAnimUI.WeightSlider2.value()
newValue = float(newValue)/100.0
pm.animLayer(pathsList2[1], edit = True, w=newValue)
def Slider3Moved():
newValue = bakeAnimUI.WeightSlider3.value()
newValue = float(newValue)/100.0
pm.animLayer(pathsList3[1], edit = True, w=newValue)
def Slider4Moved():
newValue = bakeAnimUI.WeightSlider4.value()
newValue = float(newValue)/100.0
pm.animLayer(pathsList4[1], edit = True, w=newValue)
#==============================================================================
def BakeAnimationsToBaseLayer():
pm.play(st = False)
BinaryImportForUI.BakeAnimations(hirarchy)
if(len(animLayerList1) > 0):
pm.delete(animLayerList1[0])
if(len(animLayerList2) > 0):
pm.delete(animLayerList2[0])
if(len(animLayerList3) > 0):
pm.delete(animLayerList3[0])
if(len(animLayerList4) > 0):
pm.delete(animLayerList4[0])
bakeAnimUI.hide()
ui.show()
#==============================================================================
def SourceUp():
currentRow = exportUI.SourceList.currentRow()
temp = hirarchy[currentRow]
hirarchy[currentRow] = hirarchy[currentRow - 1]
hirarchy[currentRow - 1] = temp
currentItem = exportUI.SourceList.takeItem(currentRow)
exportUI.SourceList.insertItem(currentRow - 1, currentItem)
exportUI.SourceList.setCurrentRow(currentRow - 1)
temp = parentOrentations[currentRow]
parentOrentations[currentRow] = parentOrentations[currentRow - 1]
parentOrentations[currentRow - 1] = temp
temp = parentOrentationsInvert[currentRow]
parentOrentationsInvert[currentRow] = parentOrentationsInvert[currentRow - 1]
parentOrentationsInvert[currentRow - 1] = temp
temp = parentRotations[currentRow]
parentRotations[currentRow] = parentRotations[currentRow - 1]
parentRotations[currentRow - 1] = temp
temp = parentRotationsInvert[currentRow]
parentRotationsInvert[currentRow] = parentRotationsInvert[currentRow - 1]
parentRotationsInvert[currentRow - 1] = temp
temp = perentMatrixList[currentRow]
perentMatrixList[currentRow] = perentMatrixList[currentRow - 1]
perentMatrixList[currentRow - 1] = temp
temp = perentMatrixListInvers[currentRow]
perentMatrixListInvers[currentRow] = perentMatrixListInvers[currentRow - 1]
perentMatrixListInvers[currentRow - 1] = temp
def SourceDown():
currentRow = exportUI.SourceList.currentRow()
temp = hirarchy[currentRow]
hirarchy[currentRow] = hirarchy[currentRow + 1]
hirarchy[currentRow + 1] = temp
currentItem = exportUI.SourceList.takeItem(currentRow)
exportUI.SourceList.insertItem(currentRow + 1, currentItem)
exportUI.SourceList.setCurrentRow(currentRow + 1)
temp = parentOrentations[currentRow]
parentOrentations[currentRow] = parentOrentations[currentRow + 1]
parentOrentations[currentRow + 1] = temp
temp = parentOrentationsInvert[currentRow]
parentOrentationsInvert[currentRow] = parentOrentationsInvert[currentRow + 1]
parentOrentationsInvert[currentRow + 1] = temp
temp = parentRotations[currentRow]
parentRotations[currentRow] = parentRotations[currentRow + 1]
parentRotations[currentRow + 1] = temp
temp = parentRotationsInvert[currentRow]
parentRotationsInvert[currentRow] = parentRotationsInvert[currentRow + 1]
parentRotationsInvert[currentRow + 1] = temp
temp = perentMatrixList[currentRow]
perentMatrixList[currentRow] = perentMatrixList[currentRow + 1]
perentMatrixList[currentRow + 1] = temp
temp = perentMatrixListInvers[currentRow]
perentMatrixListInvers[currentRow] = perentMatrixListInvers[currentRow + 1]
perentMatrixListInvers[currentRow + 1] = temp
def SourceDelete():
currentRow = exportUI.SourceList.currentRow()
currentItem = exportUI.SourceList.takeItem(currentRow)
hirarchy.pop(currentRow)
parentOrentations.pop(currentRow)
parentOrentationsInvert.pop(currentRow)
parentRotations.pop(currentRow)
parentRotationsInvert.pop(currentRow)
perentMatrixList.pop(currentRow)
perentMatrixListInvers.pop(currentRow)
def ExportAnimations():
filePath = BinaryExportForUI.CreateFilePath()
if(filePath == None):
return
frameStart = exportUI.FramesFromSpin.value()
frameEnd = exportUI.FramesToSpin.value()
nrOfframes = frameEnd - frameStart
if(frameEnd is not frameStart):
BinaryExportForUI.WriteToFile(filePath, hirarchy, parentRotationsInvert, parentOrentationsInvert, perentMatrixListInvers, perentMatrixList, parentOrentations, frameStart, frameEnd, nrOfframes)
exportUI.hide()
ui.destroy()
importUI.destroy()
jointUI.destroy()
bakeAnimUI.destroy()
exportUI.destroy()
#sys.exit()
else:
sys.stdout.write('Error: The number of frames should be more than 0.')
#==============================================================================
# Main UI window buttons:
ui.ImportAnim.clicked.connect(OpenImport)
ui.ExportAnim.clicked.connect(OpenExport)
importUI.ChoseAnim1.clicked.connect(FindAnimPath1)
importUI.ChoseAnim2.clicked.connect(FindAnimPath2)
importUI.ChoseAnim3.clicked.connect(FindAnimPath3)
importUI.ChoseAnim4.clicked.connect(FindAnimPath4)
importUI.LoadAnimButton.clicked.connect(LoadAnimations)
jointUI.CreateLayersButton.clicked.connect(CreateBakedLayers)
jointUI.TargetUp.clicked.connect(TargetUp)
jointUI.TargetDelete.clicked.connect(TargetDelete)
jointUI.TargetDown.clicked.connect(TargetDown)
jointUI.Anim1Up.clicked.connect(Anim1Up)
jointUI.Anim2Up.clicked.connect(Anim2Up)
jointUI.Anim3Up.clicked.connect(Anim3Up)
jointUI.Anim4Up.clicked.connect(Anim4Up)
jointUI.Anim1Delete.clicked.connect(Anim1Delete)
jointUI.Anim2Delete.clicked.connect(Anim2Delete)
jointUI.Anim3Delete.clicked.connect(Anim3Delete)
jointUI.Anim4Delete.clicked.connect(Anim4Delete)
jointUI.Anim1Down.clicked.connect(Anim1Down)
jointUI.Anim2Down.clicked.connect(Anim2Down)
jointUI.Anim3Down.clicked.connect(Anim3Down)
jointUI.Anim4Down.clicked.connect(Anim4Down)
bakeAnimUI.BakeAnimButton.clicked.connect(BakeAnimationsToBaseLayer)
bakeAnimUI.WeightSlider1.sliderMoved.connect(Slider1Moved)
bakeAnimUI.WeightSlider2.sliderMoved.connect(Slider2Moved)
bakeAnimUI.WeightSlider3.sliderMoved.connect(Slider3Moved)
bakeAnimUI.WeightSlider4.sliderMoved.connect(Slider4Moved)
exportUI.SourceUp.clicked.connect(SourceUp)
exportUI.SourceDelete.clicked.connect(SourceDelete)
exportUI.SourceDown.clicked.connect(SourceDown)
exportUI.ExportAnimationButton.clicked.connect(ExportAnimations)
|
[
"noreply@github.com"
] |
noreply@github.com
|
6e1aa88b726678e7f8b9712a1ce89865aa0879a7
|
98819f2bd523a159a5ea820f76f1705879048b32
|
/osb-proxy/editPOIexec
|
781f1de029acd8cff60eec463b1f255a790500dd
|
[] |
no_license
|
cboehme/mappa-mercia
|
97f9bcf11b4950772acf28c0f5fb0d14c2dee008
|
6a63007587d54c9771c71e707b734495b3041cdf
|
refs/heads/master
| 2023-07-19T04:43:12.884620
| 2021-09-09T11:55:08
| 2021-09-09T11:55:08
| 305,172,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 832
|
#!/usr/bin/env python
import MySQLdb
import cgi
import re
def main():
print "Content-type: text/html\n"
form = cgi.FieldStorage()
values = {}
values["id"] = form.getfirst("id", "")
values["text"] = form.getfirst("text", "")
# Check that parameters have a valid format:
if not re.match(r"^\d+$", values["id"]):
print "invalid or missing id"
return
if values["text"].strip() == "" or not re.match(r"^[^\x00-\x1f]*$", values["text"]):
print "invalid or missing text"
return
values["text"] = cgi.escape(values["text"], True)
conn = MySQLdb.connect("localhost", user="osb", passwd="osb12", db="osb")
curs = conn.cursor()
curs.execute("UPDATE bugs SET type = 0, text = CONCAT(text, '<hr />', %(text)s), last_changed = NOW() WHERE id = %(id)s and type = 0", values)
conn.commit()
print "comment added"
main()
|
[
"christoph@b3e.net"
] |
christoph@b3e.net
|
|
744c28d6995722779eb62143a829e3a0342ff3aa
|
600879a175c0d077ffdc7f06b630517eedc712c7
|
/parser.py
|
950b7c097dd191064002d04fe052056881e4a763
|
[] |
no_license
|
lebahoang/cooking-chatbot
|
7a41ae5275f81c1bc5f09d4a751abcbc66774be2
|
36c2dd74d29aa9f1e28a399651e36c65fbb4c06d
|
refs/heads/master
| 2021-01-20T15:09:47.185166
| 2017-05-15T04:19:33
| 2017-05-15T04:19:33
| 90,725,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,376
|
py
|
import os
import functools
import bson
import optparse
import pykka
import time
import Discusscooking
import Crutils
class DiscussActor(pykka.ThreadingActor):
def __init__(self):
super(DiscussActor, self).__init__()
self.posts = []
self.site = Discusscooking.Discusscooking(runPreConfig=False)
def parseDiscuss(self, pathToDiscussionStorage, discussId, mongoDB):
def cmp(a,b):
if len(a) < len(b):
return -1
elif len(a) > len(b):
return 1
if a < b:
return -1
elif a > b:
return 1
return 0
self.posts = []
pages = sorted(os.listdir(pathToDiscussionStorage + '/'), key=functools.cmp_to_key(cmp))
# items are pages in this discussion
for page in pages:
item = Crutils.Item(pathToDiscussionStorage, page, '', None)
posts = self.site.parse(item)
for post in posts:
post['threadId'] = discussId
# if posts is empty, print to debug
if not posts:
print('Check', pathToDiscussionStorage + '/' + page)
# is the first post in page doesnt have replyTo field, set this field to previous post
if posts and 'replyTo' not in posts[0] and self.posts:
posts[0]['replyTo'] = self.posts[-1]['postId']
self.posts.extend(posts)
for post in self.posts:
post['_id'] = bson.objectid.ObjectId()
mongoDB.db['posts'].insert_one(post)
def on_receive(self, msg):
if 'pathToDiscussionStorage' not in msg or 'discussId' not in msg or 'mongoDB' not in msg:
raise Exception('Missing pathToDiscussionStorage or discussId or mongoDB')
try:
self.parseDiscuss(msg['pathToDiscussionStorage'], msg['discussId'], msg['mongoDB'])
return 1, None
except Exception as e:
print(e)
return 0, msg
# class Count(pykka.ThreadingActor):
# def __init__(self, name):
# super(Count, self).__init__()
# self.sum = 0
# self.name = name
# def on_receive(self, a):
# time.sleep(5)
# v = a['a']
# self.sum += v
# print('return sum', self.name)
# return self.sum
# if self.name == '1':
# raise Exception('TEST')
# return 'FINE'
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option('--path', dest='path', default='.' , help='Root folder to start the parser')
parser.add_option('-p', '--pool-size', dest='poolSize', default='20' , help='Pool size of actor pool')
options, args = parser.parse_args()
options.poolSize = int(options.poolSize)
poolSize = options.poolSize
pool = [DiscussActor.start() for _ in range(poolSize)]
f = [None for _ in range(poolSize)]
discussions = os.listdir(options.path)
print('length of discussion', len(discussions))
print('length of pool', len(pool))
mongoDB = Crutils.mongoDriver()
i = 0
j = 0
while i < len(discussions):
if j < poolSize:
discussId = mongoDB.db['threads'].insert_one({ '_id': bson.objectid.ObjectId(), 'thread': discussions[i]}).inserted_id
f[j] = pool[j].ask({'pathToDiscussionStorage': options.path + '/' + discussions[i], 'discussId': discussId, 'mongoDB': mongoDB}, block=False)
i += 1
j += 1
else:
j = 0
for k in range(poolSize):
v, msg = f[k].get()
if v == 0:
print('Check', msg)
for i in range(poolSize):
pool[i].stop()
print('OK!!!')
|
[
"hoang.le@zinio.com"
] |
hoang.le@zinio.com
|
ae5543276e6ec4f6dc0823885d5ba0a303c5e818
|
cfc3fa658f826d02308453e557d82758895399c2
|
/datasets/covid_qa_deepset/covid_qa_deepset.py
|
d43c1e5924c54b5a73b245220e1e6d2c37d225e1
|
[
"Apache-2.0"
] |
permissive
|
meehawk/datasets
|
cac530ec0e17514c01cdff30302521d6303ed93b
|
b70141e3c5149430951773aaa0155555c5fb3e76
|
refs/heads/master
| 2023-03-29T12:51:54.700891
| 2021-04-08T17:22:53
| 2021-04-08T17:22:53
| 355,996,122
| 9
| 0
|
Apache-2.0
| 2021-04-08T17:31:03
| 2021-04-08T17:31:02
| null |
UTF-8
|
Python
| false
| false
| 4,607
|
py
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""COVID-QA: A Question Answering Dataset for COVID-19."""
from __future__ import absolute_import, division, print_function
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{moller2020covid,
title={COVID-QA: A Question Answering Dataset for COVID-19},
author={M{\"o}ller, Timo and Reina, Anthony and Jayakumar, Raghavan and Pietsch, Malte},
booktitle={Proceedings of the 1st Workshop on NLP for COVID-19 at ACL 2020},
year={2020}
}
"""
# You can copy an official description
_DESCRIPTION = """\
COVID-QA is a Question Answering dataset consisting of 2,019 question/answer pairs annotated by volunteer biomedical \
experts on scientific articles related to COVID-19.
"""
_HOMEPAGE = "https://github.com/deepset-ai/COVID-QA"
_LICENSE = "Apache License 2.0"
_URL = "https://raw.githubusercontent.com/deepset-ai/COVID-QA/master/data/question-answering/"
_URLs = {"covid_qa_deepset": _URL + "COVID-QA.json"}
class CovidQADeepset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="covid_qa_deepset", version=VERSION, description="COVID-QA deepset"),
]
def _info(self):
features = datasets.Features(
{
"document_id": datasets.Value("int32"),
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"is_impossible": datasets.Value("bool"),
"id": datasets.Value("int32"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
url = _URLs[self.config.name]
downloaded_filepath = dl_manager.download_and_extract(url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": downloaded_filepath},
),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
covid_qa = json.load(f)
for article in covid_qa["data"]:
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
document_id = paragraph["document_id"]
for qa in paragraph["qas"]:
question = qa["question"].strip()
is_impossible = qa["is_impossible"]
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"document_id": document_id,
"context": context,
"question": question,
"is_impossible": is_impossible,
"id": id_,
"answers": {
"answer_start": answer_starts,
"text": answers,
},
}
|
[
"noreply@github.com"
] |
noreply@github.com
|
3250c742e18eed7ddfe5a22d65db4e60f3926cbe
|
4039fa476687096b4cc6e931da6e81bc2f5f136b
|
/guestbook/migrations/0001_initial.py
|
818ec5376f6f2966eb42ad91719a4eeaa23caa4e
|
[] |
no_license
|
Kuzzmich/broomtrade
|
469073625d0e880640641defd939759639399009
|
93f82de850e8b9148adfd89f2158fb575d6cb73f
|
refs/heads/master
| 2020-03-08T20:19:34.258706
| 2018-06-07T12:45:31
| 2018-06-07T12:45:31
| 128,379,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
# Generated by Django 2.0 on 2018-04-11 09:18
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Guestbook',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.CharField(max_length=20, verbose_name='Пользователь')),
('posted', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Опубликовано')),
('content', models.TextField(verbose_name='Содержание')),
],
options={
'verbose_name': 'запись гостевой книги',
'verbose_name_plural': 'запись гостевой книги',
'ordering': ['-posted'],
},
),
]
|
[
"kramin.alexey@gmail.com"
] |
kramin.alexey@gmail.com
|
e08be3ba1cb5e7385ca2ee43f750efd00c3a8623
|
ecc3dd38ed577fe3d8eff65ab36e9123a3f9f48f
|
/Clustering/AngelCalvoTask1.py
|
1fc98490623b243d5c08b59c18963223cb5b1514
|
[] |
no_license
|
AngelCalvoGrande/MachineLearningTechniques
|
813299c394a805d5c422b9e1d426ffc0c4bdcfde
|
ac124af84581888fb73a64e263c865db3d42145a
|
refs/heads/master
| 2020-08-09T06:40:46.469766
| 2020-01-31T22:35:20
| 2020-01-31T22:35:20
| 214,023,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,639
|
py
|
# -*- coding: utf-8 -*-
"""
Editor de Spyder
Este es un archivo temporal
"""
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
#0 . Load the data
# read the csv
df = pd.read_csv("T2.csv")
# list the columns
list(df)
# print number of rows and columns
print (df.shape)
# 1. Filtering
# 1.1 Filter rows
# convert string to datetime .... Be careful!!! Spelling errors!!!
df['TimeStemp'] = pd.to_datetime(df['TimeStemp'])
# extract date from datetime
df['date'] = [d.date() for d in df['TimeStemp']]
# list the available days
df['date'].unique()
#filter data by date
df28 = df[(df['TimeStemp'] > '2016-04-28 00:00:00') & (df['TimeStemp'] <= '2016-04-28 23:59:59')]
print (df28.shape)
#1.2. Filter Features
#df28f = df28[[c for c in df if c.startswith('AccelerometerStat')]]
#df28f = df28[[c for c in df if c.startswith('Gyroscope')]]
df28f = df28[[c for c in df if c.startswith('Orientation')]]
#df28f = df28[[c for c in df if c.startswith('RotationVector')]]
#df28f = df28[[c for c in df if c.startswith('LinearAcceleration')]]
df28f = df28[[c for c in df28f if c.endswith('MEAN')]]
print(df28f)
list(df28f)
# RotationVector_cosThetaOver2_MEAN is a feature with all values as NaN
exclude = ["RotationVector_cosThetaOver2_MEAN"]
df28f = df28f.loc[:, df28f.columns.difference(exclude)]
# 1.3 remove missing values
df28f.isnull().values.any()
# filter/remove rows with missing values (na) (Be careful!!!)
df28f = df28f.dropna()
df28f.isnull().values.any()
print (df28f.shape)
# 2. Principal Component Analysis
#2.1 Scalation
from sklearn import preprocessing
scaler = preprocessing.StandardScaler()
datanorm = scaler.fit_transform(df28f)
#2.2 Modelling (PCA)
from sklearn.decomposition import PCA
n_components = 3
estimator = PCA (n_components)
X_pca = estimator.fit_transform(datanorm)
print(X_pca)
# is it representative the 2D projection?
print (estimator.explained_variance_ratio_)
#2.3 Plot
import matplotlib.pyplot as plt
import numpy
if (n_components >= 2):
x = X_pca[:,0]
y = X_pca[:,1]
plt.scatter(x,y)
plt.show()
if (n_components >= 3):
fig = plt.figure()
ax = Axes3D(fig)
x = X_pca[:,0]
y = X_pca[:,1]
z = X_pca[:,2]
ax.scatter(x,y,z)
plt.show()
# Clustering
from sklearn.cluster import KMeans
iterations = 10
max_iter = 300
tol = 1e-04
random_state = 0
k = 4
init = "random"
km = KMeans(k, init, n_init = iterations ,max_iter= max_iter, tol = tol,random_state = random_state)
labels = km.fit_predict(X_pca)
from sklearn import metrics
distortions = []
silhouettes = []
for i in range(2, 11):
km = KMeans(i, init, n_init = iterations ,max_iter= max_iter, tol = tol,random_state = random_state)
labels = km.fit_predict(X_pca)
distortions.append(km.inertia_)
silhouettes.append(metrics.silhouette_score(X_pca, labels))
plt.plot(range(2,11), distortions, marker='o')
plt.xlabel('K')
plt.ylabel('Distortion')
plt.show()
plt.plot(range(2,11), silhouettes , marker='o')
plt.xlabel('K')
plt.ylabel('Silhouette')
plt.show()
print (metrics.silhouette_score(X_pca, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X_pca, labels))
print('Distortion: %.2f' % km.inertia_)
print(labels)
x = X_pca[:,0]
y = X_pca[:,1]
plt.scatter(x,y, c = labels)
# plotting centroids
plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:,1], c='red',s=50)
plt.grid()
plt.show()
fig = plt.figure()
ax = Axes3D(fig)
x = X_pca[:,0]
y = X_pca[:,1]
z = X_pca[:,2]
plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:,1], km.cluster_centers_[:,2], c='red')
ax.scatter(x,y,z, c = labels)
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.