blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
86afa0e5e88a8970004df2cfeba52d5e44b8418e
|
d933cfa05e370c6f02bece2dd30b89745bf0fbed
|
/frozen_happines/urls.py
|
ac88c05a79715ebfae73f58f7efb14c0c34862db
|
[] |
no_license
|
pynef/frozen_happiness
|
828c98e428eccaf3fc74ccb57b4c7c62808e98b5
|
84829f44e5c133a4d7143285a5d32ddbc38d56bc
|
refs/heads/master
| 2020-12-24T21:36:52.701366
| 2016-05-16T12:07:43
| 2016-05-16T12:07:43
| 58,926,394
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^',include('frozen_happines.apps.web.urls')),
]
|
[
"nefinef@gmail.com"
] |
nefinef@gmail.com
|
755a9ec32509ad9fbac62ad3e09dbc46fc24ba6c
|
10af1202867e07ec4769670efbd3f32e6a297511
|
/captioning/neuraltalk2-master/coco-caption/pycocoevalcap/tokenizer/ptbtokenizer.py
|
990d342cddddc5c9a71fd2ed17c79d5e6509e241
|
[
"BSD-2-Clause-Views"
] |
permissive
|
amritasaha1812/bridge_seq_learning
|
75ab692fec6be3a650c6cd73f6a94d8c5181f49e
|
d8d383e35942584a13f18caf28a5f68a1eeb9642
|
refs/heads/master
| 2021-01-11T06:23:59.009266
| 2016-10-07T12:17:17
| 2016-10-07T12:17:17
| 69,964,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,866
|
py
|
#!/usr/bin/env python
#
# File Name : ptbtokenizer.py
#
# Description : Do the PTB Tokenization and remove punctuations.
#
# Creation Date : 29-12-2014
# Last Modified : Thu Mar 19 09:53:35 2015
# Authors : Hao Fang <hfang@uw.edu> and Tsung-Yi Lin <tl483@cornell.edu>
import os
import sys
import subprocess
import tempfile
import itertools
# path to the stanford corenlp jar
STANFORD_CORENLP_3_4_1_JAR = 'stanford-corenlp-3.4.1.jar'
# punctuations to be removed from the sentences
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
".", "?", "!", ",", ":", "-", "--", "...", ";"]
class PTBTokenizer:
"""Python wrapper of Stanford PTBTokenizer"""
def tokenize(self, captions_for_image):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, \
'edu.stanford.nlp.process.PTBTokenizer', \
'-preserveLines', '-lowerCase']
# ======================================================
# prepare data for PTB Tokenizer
# ======================================================
final_tokenized_captions_for_image = {}
image_id = [k for k, v in captions_for_image.items() for _ in range(len(v))]
#print captions_for_image.items()
sentences = '\n'.join([c['caption'].replace('\n', ' ') for k, v in captions_for_image.items() for c in v])
# ======================================================
# save sentences to temporary file
# ======================================================
path_to_jar_dirname=os.path.dirname(os.path.abspath(__file__))
tmp_file = tempfile.NamedTemporaryFile(delete=False, dir=path_to_jar_dirname)
tmp_file.write(sentences.encode('utf-8'))
tmp_file.close()
# ======================================================
# tokenize sentence
# ======================================================
cmd.append(os.path.basename(tmp_file.name))
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, \
stdout=subprocess.PIPE)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
lines = token_lines.split('\n')
# remove temp file
os.remove(tmp_file.name)
# ======================================================
# create dictionary for tokenized captions
# ======================================================
for k, line in zip(image_id, lines):
if not k in final_tokenized_captions_for_image:
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') \
if w not in PUNCTUATIONS])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image
|
[
"amrita.saha87@gmail.com"
] |
amrita.saha87@gmail.com
|
4fb1758cee4308cc464a65c8c616aa9e471991af
|
2c542b4fb1bb3b313911524ac3ea03528150ca1e
|
/saisie.py
|
579e348749fbca3bdf6b9c818a40415f777ef8d5
|
[] |
no_license
|
tnemelck/crypto
|
d5c2a4926da0d9fb2b5944fc199000e7de406a9f
|
5a2f797d2b137c14b792730bce9c9980cf7ceda1
|
refs/heads/master
| 2021-05-13T13:27:45.118081
| 2018-01-08T17:40:03
| 2018-01-08T17:40:03
| 116,707,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,990
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 3 17:17:51 2018
@author: tnemelk
"""
import generateurDeNombrePremier as gnp
import os
def saisie_int(inf, sup):
s = input("Choisissez bien. \n")
try:
r = int(s)
assert (r >= inf) and (r <= sup)
except (ValueError, AssertionError) as e:
print("La saisie est incorrecte, vous êtes négligeant, ce n'est pas grave, recommencez.")
r = saisie_int(inf, sup)
return r
def saisie_Tbloc():
s = input("Choisissez judicieusement. \n")
try:
r = int(s)
assert (r == 256) or (r==512) or (r==1024)
except (ValueError, AssertionError) as e:
print("Vous n'avez que 3 choix possibles, appliquez vous s'il vous plaît.")
r = saisie_Tbloc()
return r
def saisie_oui_non():
dic = {"oui":1, "non":0}
s = input("""Veuillez marquer "oui" ou "non" en toute lettre. \n""")
try:
r = dic[s]
except KeyError:
print("""Mon programmmeur ne m'a pas voulu flexible, ainsi, je suis navrée d'avoir à vous demander
de recommencer.""")
r = saisie_oui_non()
return r
def affiche_cle_3f(tf):
[co, t1, t2, tb] = tf.cle_secrete.split(":")
print("""La clé originale est """, co, "\n"
"""Les deux tweaks sont :""", t1, "et", t2, "\n"
"""Les blocs mesurent """, tb * 8, "bits")
def saisie_adr():
adr = input("Veuillez rentrer l'adresse de votre fichier. \n")
try:
assert os.path.isfile(adr)
except AssertionError:
"""L'adresse saisie ne renvoie pas à un fichier, et ... je ne sais pas quoi faire,
vous pourriez recommencer ... s'il vous-plaît ?"""
adr = saisie_adr()
return adr
def saisie_nBits(nbit):
lim = (2**nbit)-1
txt = "Entrez un nombre de" + nbit + ", soit inférieur à," + lim + "\n"
s = input(txt)
try:
r = int(s)
assert (r <= lim) and (r >= 0)
except (ValueError, AssertionError) as e:
print(""" Et non, dommage, recommence maintenant.""")
r = saisie_nBits(nbit)
print("\n")
return r
def saisie_cle_tf():
print("Choisissez la taille des blocs.")
t_bloc = saisie_Tbloc()
print("Entrez la clé originale")
cr = saisie_nBits(t_bloc)
print("Entrez le premier tweak")
t1 = saisie_nBits(64)
print("Entrez le second tweak")
t2 = saisie_nBits(64)
cle = [cr, t1, t2, t_bloc]
cle = map(str, cle)
print("\n")
return ":".join(cle)
def saisie_adr_cle_3f():
adr = saisie_adr()
f = open(adr,"r")
k = f.read()
f.close()
try:
Ks = k.split(":")
assert (len(Ks) == 4) and (all([k.isdigit() for k in Ks])) and \
(int(Ks[0]) < 2 ** (8 * int(Ks[3]))) and (int(Ks[1]) < 2**64) and (int(Ks[2]) < 2**64)
except AssertionError:
print("""La clé stockée dans le fichier est invalide, essaye encore.""")
adr = saisie_adr_cle_3f()
print("\n")
return adr
def affiche_cle_pblc_cs(cs):
[p, a1, a2, X, Y, W] = cs.cle_public.split(":")
print("Le grand nombre premier p vaut", p, "\n"
"Le premier nombre générateur a1 est", a1, "\n"
"Le second nombre générateur a2 est", a2, "\n"
"L'entier X vaut", X, "\n"
"L'entier Y vaut", Y, "\n"
"L'entier W vaut", W, "\n")
def affiche_cle_prive_cs(cs):
[p, a1, a2, x1, x2, y1, y2, w] = cs.cle_prive.split(":")
print("Le grand nombre premier p vaut", p, "\n"
"Le premier nombre générateur a1 est", a1, "\n"
"Le second nombre générateur a2 est", a2, "\n"
"L'entier x1 vaut", x1, "\n"
"L'entier x2 vaut", x2, "\n"
"L'entier y1 vaut", y1, "\n"
"L'entier y2 vaut", y2, "\n"
"L'entier w vaut", w, "\n")
def saisie_nb_prm_sur():
snp = input("Veuillez entrer votre nombre premier sûr.\n")
try:
np = int(snp)
assert gnp.test_premier_sur(np)
except (ValueError, AssertionError) as e:
print("Mais ... ce n'est pas un nombre premier sûr ! Hop hop hop, on recommence !")
np = saisie_nb_prm_sur()
print("\n")
return np
def saisie_nb_gen(p):
txt = "Veuillez entrer un nombre générateur a1 de" + str(p) + "\n"
sg = input(txt)
try:
g = int(sg)
assert gnp.test_gen_prem_sur(g, p)
except (ValueError, AssertionError) as e:
print("Pff, c'est pas un nombre générateur ça ! Ça, c'est juste nul.")
g = saisie_nb_gen(p)
return g
def saisie_entier(e):
txt = "Entrez votre nombre entier positif " + str(e) + "\n"
s = input(txt)
try:
n = int(s)
assert n >= 0
except (ValueError, AssertionError) as e:
print("Ce n'est pas un nombre entier positif, ah ça non !")
n = saisie_entier(e)
return n
def saisie_entier_pos(e):
txt = "Entrez votre nombre entier positif " + str(e) + "\n"
s = input(txt)
try:
n = int(s)
assert n > 0
except (ValueError, AssertionError) as e:
print("Ce n'est pas un nombre entier positif, ah ça non !")
n = saisie_entier(e)
return n
def saisie_cle_pblc_cs():
p = saisie_nb_prm_sur()
a1 = saisie_nb_gen() % p
a2 = saisie_nb_gen() % p
X = saisie_entier("X") % p
Y = saisie_entier("Y") % p
W = saisie_entier("W") % p
result = [p, a1, a2, X, Y, W]
result = map(str, result)
result = ":".join(result)
return result
def saisie_cle_prive_cs():
p = saisie_nb_prm_sur()
a1 = saisie_nb_gen() % p
a2 = saisie_nb_gen() % p
x1 = saisie_entier("x1") % p
y1 = saisie_entier("y1") % p
x2 = saisie_entier("x2") % p
y2 = saisie_entier("y2") % p
w = saisie_entier("w") % p
result = [p, a1, a2, x1, x2, y1, y2, w]
result = map(str, result)
result = ":".join(result)
return result
def saisie_adr_cle_pblc_cs():
adr = saisie_adr()
f = open(adr,"r")
k = f.read()
f.close()
try:
Ks = k.split(":")
assert (len(Ks) == 6) and (all([k.isdigit() for k in Ks])) and \
(gnp.test_premier_sur(Ks[0])) and \
gnp.test_gen_prem_sur(Ks[1], Ks[0]) and gnp.test_gen_prem_sur(Ks[2], Ks[0])
except AssertionError:
print("""La clé stockée dans le fichier est invalide, essaye encore.""")
adr = saisie_adr_cle_pblc_cs()
print("\n")
return adr
def saisie_adr_cle_prive_cs():
adr = saisie_adr()
f = open(adr,"r")
k = f.read()
f.close()
try:
Ks = k.split(":")
assert (len(Ks) == 8) and (all([k.isdigit() for k in Ks])) and \
(gnp.test_premier_sur(int(Ks[0]))) and \
gnp.test_gen_prem_sur(int(Ks[1]), int(Ks[0])) and gnp.test_gen_prem_sur(int(Ks[2]), int(Ks[0]))
except AssertionError:
print("""La clé stockée dans le fichier est invalide, essaye encore.""")
adr = saisie_adr_cle_pblc_cs()
print("\n")
return adr
|
[
"tnemelkb@gmail.com"
] |
tnemelkb@gmail.com
|
3502fcc6b3e92025f05f524cabb5ac4cba7720bc
|
7d6f4321de756fc2b4daec3b05551c5b1e311ec4
|
/Web_Spider/spiders/Scanner.py
|
3d89674ba2f08682724fa05006441cf5834420cc
|
[] |
no_license
|
serdaraltin/Scrapy-Web-Spider
|
c84be73a80e1fba0c656ca865dd13ba2384c56d4
|
0af0331c5982094dde2ea56a66e3bba6aefed95b
|
refs/heads/master
| 2021-02-08T04:00:37.800524
| 2020-03-01T07:40:44
| 2020-03-01T07:40:44
| 244,106,633
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
import scrapy
class QuotesSpider(scrapy.Spider):
name = "lamerhaber"
start_urls = [
'https://lamerhaber.com/',
'https://lamerhaber.com/category/hack-haber/',
'https://lamerhaber.com/category/ozel-haberler/',
'https://lamerhaber.com/category/hack-gruplari/',
'https://lamerhaber.com/category/haberler/',
'https://lamerhaber.com/category/teknoloji/',
'https://lamerhaber.com/category/duyurular/'
]
def parse(self, response):
for icerik in response.css('header.post-header'):
yield {
'kategori': icerik.css('p.post-categories a::text').get(),
'baslik': icerik.css('h2 a::text').get(),
'tarih': icerik.css('p.post-meta a::text').get()
}
|
[
"noreply@github.com"
] |
serdaraltin.noreply@github.com
|
25e73d0f46e4a57b1c947f110db94b2853e7fc10
|
79aa4b99a48bb16a907916ad63c902443420541a
|
/0056.py
|
680d3134344eab5388e4c0b5d48fd531d58e8b3f
|
[] |
no_license
|
mach8686devops/leetcode-100
|
62dec66c719d7cfa120ca9505701df49d8d5b982
|
f90526c9b073165b86b933cdf7d1dc496e68f2c6
|
refs/heads/main
| 2023-04-11T06:28:15.059587
| 2021-04-13T12:11:54
| 2021-04-13T12:11:54
| 329,346,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
# 合并空间
# 排序的做法
class Solution:
def merge(self, intervals):
if len(intervals) < 2:
return intervals
result = []
intervals.sort(key=lambda x: x[0])
for interval in intervals:
if len(result) == 0 or interval[0] > result[-1][-1]:
result.append(interval)
else:
result[-1][-1] = max(result[-1][-1], interval[1])
return result
|
[
"zhangjohn202@gmail.com"
] |
zhangjohn202@gmail.com
|
0b1e26ac589857328e504db76cfa69134a2cb7b6
|
181cf26a68637707a1b2aae0be250b606a92ef07
|
/venv/Scripts/pip-script.py
|
6f5372c0fac373569bf15cf02192a2bcb7d2bc6e
|
[] |
no_license
|
sunnypig2/SocialNetworkCode
|
bbc1b7fe731d19be88ef53008950785632b4b679
|
5c7b7d1b3caf5c1ae97c4309e30b701bbe224ea3
|
refs/heads/master
| 2020-05-16T16:56:59.397774
| 2019-06-03T09:37:40
| 2019-06-03T09:37:40
| 183,174,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
#!C:\Users\lenovo\Desktop\socialNetworkCode\venv\Scripts\python3.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.3','console_scripts','pip'
__requires__ = 'pip==9.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.3', 'console_scripts', 'pip')()
)
|
[
"email@example.com"
] |
email@example.com
|
f3260fb9bc4046cb9560d2d3f9ace38b07a48d39
|
ec47104831406b4fbba42ae09de842c41cdd7bad
|
/final_project.py
|
8bd75285e033b50d7b972082aac52ac6775229aa
|
[] |
no_license
|
kairzhan8/ml-project
|
c162cf25dcccfbcd3264576d76f88420ccf8aba8
|
ef27c2228d218b69173b0637f8f734db0c20b6d5
|
refs/heads/master
| 2021-08-30T22:45:02.723707
| 2017-12-19T17:40:19
| 2017-12-19T17:40:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,496
|
py
|
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score, ShuffleSplit
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
from show_confusion_matrix import show_confusion_matrix
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
import pandas as pd
from sklearn.naive_bayes import GaussianNB
from pandas.tools.plotting import scatter_matrix
import random
import statsmodels.api as sm
import time
from sklearn.preprocessing import normalize
from sklearn.svm import SVC
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import pylab as pl
from sklearn.neural_network import MLPClassifier
from threading import Thread
from sklearn.feature_selection import RFE
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
import itertools
filename = '/Users/Kairzhan/Desktop/ml_final_project/KidCreative.csv'
features=['Buy','Income','Is_Female','Is_Married','Has_College','Is_Professional','Is_Retired','Unemployed','Residence_Length','Dual_Income','Minors','Own','House','White','English','Prev_Child_Mag','Parent']
csv=pd.read_csv(filename,sep=',')
datasets=csv.as_matrix()
dataset=[]
target=[]
data=[]
for i in range(0,len(datasets)):
data.append([])
dataset.append([])
for j in range (len(datasets[i])):
if j==0:
continue
else:
dataset[i].append(datasets[i][j])
if j==1:
target.append(datasets[i][j])
else:
data[i].append(datasets[i][j])
dataset=np.asarray(dataset)
X=np.asarray(data)
Y=np.asarray(target)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=3)
X_train,X_val, y_train, y_val = train_test_split(X_train,y_train, test_size=0.25,random_state=3)
frame =pd.DataFrame(dataset)
frame.columns=features
cols_to_norm = ['Income','Residence_Length']
frame[cols_to_norm] = frame[cols_to_norm].apply(lambda x: (x - x.mean()) / (x.max() - x.min()))
columns=[(frame.Buy),(frame.Income),(frame.Is_Female),(frame.Is_Married),(frame.Has_College),(frame.Is_Professional),(frame.Is_Retired),(frame.Unemployed),(frame.Residence_Length),(frame.Dual_Income),(frame.Minors),(frame.Own),(frame.House),(frame.White),(frame.English),(frame.Prev_Child_Mag),(frame.Parent)]
forplot=[]
column=[]
row=[]
k=0
for i in range(0,len(columns)):
for j in range(i+1,len(columns)-1):
if columns[i].corr(columns[j])>0.6 or columns[i].corr(columns[j])<-0.6:
forplot.append(columns[i].corr(columns[j]))
column.append(features[i])
row.append(features[j])
k+=1
def get_correlations():
for i in range(0,len(columns)):
for j in range(i+1,len(columns)-1):
print ('corr btw',features[i],'and',features[j],columns[i].corr(columns[j]))
def draw_high_cor():
fig = plt.figure(figsize=(45, 15))
plots = len(forplot)
ax=[]
s=0
f=0
for i in range(0,plots):
ax.append(plt.subplot2grid((5,4), (s,f)))
f+=1
ax[i].scatter(frame[row[i]],frame[column[i]], s=10, c=[random.random(),random.random(),random.random()], marker="o")
ax[i].set_ylabel(column[i])
ax[i].set_xlabel(row[i])
if (i+1)%4==0:
s+=1
f=0
plt.show()
plt.close(fig)
def correlation_fig():
correlations = frame.corr()
sm.graphics.plot_corr(correlations, xnames=features,ynames=features)
plt.show()
def scatter_matrix_fig():
scatter_matrix(frame,alpha=0.5, figsize=(20, 20), diagonal='kde')
plt.show()
def hist_fig():
frame.hist()
plt.show()
#Bayes
nb=GaussianNB()
nb.fit(X_train,y_train)
nbpred=[]
#KNN
knn=KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train,y_train)
knnpred=[]
#DT
model = DecisionTreeClassifier(min_samples_split=5)
model.fit(X_train, y_train)
dtpred=[]
#LR
logit = LogisticRegression()
logit.fit(X_train,y_train)
logitpred=[]
#SVM
svc = SVC(kernel='rbf')
svc.fit(X_train,y_train)
svcpred=[]
#ANN
ann = MLPClassifier()
ann.fit(X_train,y_train)
annpred=[]
data_arr=list(X_val)
for i in range(0,len(data_arr)):
knnpred.append(knn.predict([data_arr[i]]))
dtpred.append(model.predict([data_arr[i]]))
nbpred.append(nb.predict([data_arr[i]]))
logitpred.append(logit.predict([data_arr[i]]))
svcpred.append(svc.predict([data_arr[i]]))
annpred.append(ann.predict([data_arr[i]]))
def general_accuracy():
print ("accuracy KNN Algorithm:",accuracy_score(y_val, knnpred))
print ("accuracy Data Tree:",accuracy_score(y_val, dtpred))
print ("accuracy Gaussian Normal:",accuracy_score(y_val,nbpred))
print ("accuracy Logistic Regression:",accuracy_score(y_val, logitpred))
print ("accuracy SVM :",accuracy_score(y_val, svcpred))
print ("accuracy ANN :",accuracy_score(y_val, annpred))
def get_conf(predicted):
tn, fp, fn, tp = confusion_matrix(y_val, predicted).ravel()
print ('True positives:',tp,'\nTrue negatives:',tn,'\nFalse negatives:',fn,'\nFalse positives',fp)
print(classification_report(np.asarray(y_val), np.asarray(predicted)))
print ('********************')
def model_implementation():
k_range=range(1,41)
k_scores=[]
p_name=['Value of K for KNN','Value of C in Logit','Value of Max iterations for Logit','Value of Max_depth for Decition Tree','Value of alpha for ANN','Value of C for SVM']
Max_range=pl.frange(0,200,5)
C_range=pl.frange(0.1,1,0.1)
n_folds=10
C_scores=[]
Max_scores=[]
scores_stds=[]
scores_std=[]
p_i=[]
p_j=[]
for k in k_range:
knn2 = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(knn2, X_train, y_train, cv=10)
k_scores.append(scores.mean())
scores_std.append(scores.std()*2)
scores_stds.append(scores_std)
k_scores, scores_std = np.array(k_scores), np.array(scores_std)
p_i.append(k_scores)
p_j.append(k_range)
scores_std=[]
for c in C_range:
log = LogisticRegression(C=c)
scores = cross_val_score(log, X_train, y_train, cv=10)
C_scores.append(scores.mean())
scores_std.append(scores.std()*2)
scores_stds.append(scores_std)
C_scores, scores_std = np.array(C_scores), np.array(scores_std)
p_i.append(C_scores)
p_j.append(C_range)
scores_std=[]
for M in Max_range:
log = LogisticRegression(max_iter=M)
scores = cross_val_score(log, X_train, y_train, cv=10)
Max_scores.append(scores.mean())
scores_std.append(scores.std()*2)
scores_stds.append(scores_std)
Max_scores, scores_std = np.array(Max_scores), np.array(scores_std)
p_i.append(Max_scores)
p_j.append(Max_range)
#Tree
tree_scores=[]
tree_range=range(3,10)
scores_std=[]
for M in tree_range:
dt = DecisionTreeClassifier(max_depth=M)
scores = cross_val_score(dt, X_train, y_train, cv=10)
tree_scores.append(scores.mean())
scores_std.append(scores.std()*2)
scores_stds.append(scores_std)
tree_scores, scores_std = np.array(tree_scores), np.array(scores_std)
p_i.append(tree_scores)
p_j.append(tree_range)
#ANN
ann_scores=[]
ann_range=pl.frange(0.0001,1,0.01)
scores_std=[]
for M in ann_range:
Ann = MLPClassifier(alpha=M)
scores = cross_val_score(Ann, X_train, y_train, cv=10)
ann_scores.append(scores.mean())
scores_std.append(scores.std()*2)
scores_stds.append(scores_std)
ann_scores, scores_std = np.array(ann_scores), np.array(scores_std)
p_i.append(ann_scores)
p_j.append(ann_range)
#CVM
cvm_scores=[]
cvm_range=pl.frange(0.1,10,0.1)
scores_std=[]
for M in cvm_range:
Cvm = SVC(C=M)
scores = cross_val_score(Cvm, X_train, y_train, cv=10)
cvm_scores.append(scores.mean())
scores_std.append(scores.std()*2)
scores_stds.append(scores_std)
cvm_scores, scores_std = np.array(cvm_scores), np.array(scores_std)
p_i.append(cvm_scores)
p_j.append(cvm_range)
plt.figure(figsize=(45, 20))
ax=[]
s=0
f=0
for i in range(0,len(p_i)):
ax.append(plt.subplot2grid((5,4), (s,f)))
f+=1
ax[i].semilogx(p_j[i], p_i[i],color='red')
std_error = scores_stds[i] / np.sqrt(n_folds)
ax[i].semilogx(p_j[i], p_i[i] + std_error, 'b--')
ax[i].semilogx(p_j[i], p_i[i] - std_error, 'b--')
ax[i].set_ylabel("Cross-validated accuracy")
ax[i].set_xlabel(p_name[i])
ax[i].fill_between(p_j[i], p_i[i] + std_error, p_i[i] - std_error)
ax[i].axhline(np.max(p_i[i]), linestyle='--', alpha=0.2)
ax[i].set_xlim([p_j[i][0], p_j[i][-1]])
if (i+1)%4==0:
s+=1
f=0
plt.show()
def new_models():
global logit2
print ("**********************************************")
print ("Neighbors = 27 is for best model KNeighborsClassifier")
knn2= KNeighborsClassifier(n_neighbors=27)
knn2.fit(X_train,y_train)
knnpred2=[]
print ("C=0.2 is best model for Logistic Regression for ")
logit2 = LogisticRegression(C=0.2)
logit2.fit(X_train,y_train)
logitpred2=[]
#DT
print ("max_depth=4 is best model for DT ")
d_tree1 = DecisionTreeClassifier(max_depth=4)
d_tree1.fit(X_train,y_train)
dtreepred=[]
#SVM
print ("Best Feature Selection - SVM 1.5")
s_v_m1 = SVC(C=1.5)
s_v_m1.fit(X_train,y_train)
s_v_pred=[]
#ANN
print ("Best Feature Selection - ANN 0.071")
a_n_n1 = MLPClassifier(alpha=0.071)
a_n_n1.fit(X_train,y_train)
a_n_npred=[]
for i in range(0,len(X_val)):
knnpred2.append(knn2.predict([X_val[i]]))
logitpred2.append(logit2.predict([X_val[i]]))
dtreepred.append(d_tree1.predict([X_val[i]]))
s_v_pred.append(s_v_m1.predict([X_val[i]]))
a_n_npred.append(a_n_n1.predict([X_val[i]]))
print ("accuracy Of New KNN:",accuracy_score(y_val, knnpred2))
print ("accuracy Of New LogisticRegression:",accuracy_score(y_val, logitpred2))
print ("accuracy Of New Decision Tree:",accuracy_score(y_val, dtreepred))
print ("accuracy Of New SVM:",accuracy_score(y_val, s_v_pred))
print ("accuracy Of New ANN:",accuracy_score(y_val, a_n_npred))
print ("\n********************LOGISTIC*********************")
print ("New Model VS OLD Model For Logit")
print('Logit Variance OLD: %.2f' % logit.score(X_val, y_val))
print('Logit Variance NEW: %.2f' % logit2.score(X_val, y_val))
y_pred=logit.predict(X_val)
get_mse_rmse_model(y_pred,'OLD','LOGIT')
y_pred=logit2.predict(X_val)
get_mse_rmse_model(y_pred,'NEW','LOGIT')
print ("\n***************************KNN***********************")
print ("New Model VS OLD Model For Knn")
print('KNN Variance OLD: %.2f' % knn.score(X_val, y_val))
print('KNN Variance NEW: %.2f' % knn2.score(X_val, y_val))
y_pred=knn.predict(X_val)
get_mse_rmse_model(y_pred,'OLD','KNN')
y_pred=knn2.predict(X_val)
get_mse_rmse_model(y_pred,'NEW','KNN')
print ("*******************************************************")
print ("New Model VS OLD Model For DT")
print('DT Variance OLD: %.2f' % model.score(X_val, y_val))
print('DT Variance NEW: %.2f' % d_tree1.score(X_val, y_val))
y_pred=model.predict(X_val)
get_mse_rmse_model(y_pred,'OLD','DT')
y_pred=d_tree1.predict(X_val)#
get_mse_rmse_model(y_pred,'NEW','DT')
print ("*******************************************************")
print ("New Model VS OLD Model For SVM")
print('SVM Variance OLD: %.2f' % svc.score(X_val, y_val))
print('SVM Variance NEW: %.2f' % s_v_m1.score(X_val, y_val))
y_pred=model.predict(X_val)
get_mse_rmse_model(y_pred,'OLD','SVM')
y_pred=d_tree1.predict(X_val)
get_mse_rmse_model(y_pred,'NEW','SVM')
print ("*******************************************************")
print ("New Model VS OLD Model For ANN")
print('ANN Variance OLD: %.2f' % ann.score(X_val, y_val))
print('ANN Variance NEW: %.2f' % a_n_n1.score(X_val, y_val))
y_pred=model.predict(X_val)
get_mse_rmse_model(y_pred,'OLD','ANN')
y_pred=d_tree1.predict(X_val)
get_mse_rmse_model(y_pred,'NEW','ANN')
#TEST
print ("********************TEST best parameters**************************")
knnpred_test=[]
logitpred_test=[]
svm_test=[]
ann_test=[]
dt_test=[]
for i in range(0,len(X_test)):
knnpred_test.append(knn2.predict([X_test[i]]))
logitpred_test.append(logit2.predict([X_test[i]]))
svm_test.append(s_v_m1.predict([X_test[i]]))
ann_test.append(a_n_n1.predict([X_test[i]]))
dt_test.append(d_tree1.predict([X_test[i]]))
print ("accuracy knn TEST:",accuracy_score(y_test, knnpred_test))
print ("accuracy logistic TEST:",accuracy_score(y_test, logitpred_test))
print ("accuracy SVM TEST:",accuracy_score(y_test, svm_test))
print ("accuracy DT TEST:",accuracy_score(y_test, dt_test))
print ("accuracy ANN TEST:",accuracy_score(y_test, ann_test))
#Checking Accuracy and ERRORs
def Tree_class():
model_Tree = ExtraTreesClassifier()
model_Tree.fit(X_train,y_train)
print (model_Tree.feature_importances_)
def get_mse_rmse(y_val_new,y_pred):
print("MSE3: %.2f" % (metrics.mean_squared_error(y_val_new,y_pred)))
print("MAE3: %.2f" % (metrics.mean_absolute_error(y_val_new,y_pred)))
print("RMSE3: %.2f" % (np.sqrt(metrics.mean_squared_error(y_val_new,y_pred))))
def accuracy_metrics_for_selected_features():
global logit2
xx=frame[['Income','Residence_Length']]
yy=frame['Buy']
xx= list(np.array(xx))
yy=list(np.array(yy))
X_train_new, X_test_new, y_train_new, y_test_new = train_test_split(xx, yy, test_size=0.2, random_state=3)#20% Test, 80%Train
X_train_new,X_val_new, y_train_new, y_val_new = train_test_split(X_train,y_train, test_size=0.25,random_state=3)#20% Validation 60%Train
print ("Best Feature Selection - Logistic Regression")
logit3 = LogisticRegression(C=0.2)
logit3.fit(X_train_new,y_train_new)
logitpred3=[]
for i in range(0,len(X_val_new)):
logitpred3.append(logit3.predict([X_val_new[i]]))
print ("accuracy Of New LogisticRegression:",accuracy_score(y_val_new, logitpred3))
y_pred=logit3.predict(X_val_new)
get_mse_rmse(y_val_new,y_pred)
print ("\nBest Feature Selection - Decision Tree")
d_tree = DecisionTreeClassifier(max_depth=4)
d_tree.fit(X_train_new,y_train_new)
dtreepred=[]
for i in range(0,len(X_val_new)):
dtreepred.append(d_tree.predict([X_val_new[i]]))
print ("accuracy Of New Decision Tree:",accuracy_score(y_val_new, dtreepred))
y_pred=d_tree.predict(X_val_new)
get_mse_rmse(y_val_new,y_pred)
print ("\nBest Feature Selection - KNN ")
k_nn = KNeighborsClassifier(n_neighbors=27)
k_nn.fit(X_train_new,y_train_new)
k_nnpred=[]
for i in range(0,len(X_val_new)):
k_nnpred.append(k_nn.predict([X_val_new[i]]))
print ("accuracy Of New KNN:",accuracy_score(y_val_new, k_nnpred))
y_pred=k_nn.predict(X_val_new)
get_mse_rmse(y_val_new,y_pred)
print ("Best Feature Selection - SVM")
s_v_m = SVC(C=1.5)
s_v_m.fit(X_train_new,y_train_new)
s_v_pred=[]
for i in range(0,len(X_val_new)):
s_v_pred.append(s_v_m.predict([X_val_new[i]]))
print ("accuracy Of New SVM:",accuracy_score(y_val_new, s_v_pred))
y_pred=s_v_m.predict(X_val_new)
get_mse_rmse(y_val_new,y_pred)
print ("Best Feature Selection - ANN")
a_n_n = MLPClassifier(alpha=0.071)
a_n_n.fit(X_train_new,y_train_new)
a_n_npred=[]
for i in range(0,len(X_val_new)):
a_n_npred.append(a_n_n.predict([X_val_new[i]]))
print ("accuracy Of New ANN:",accuracy_score(y_val_new, a_n_npred))
y_pred=a_n_n.predict(X_val_new)
get_mse_rmse(y_val_new,y_pred)
def feature_importance_random_forest():
names = features[1:]
rf = RandomForestRegressor(n_estimators=20, max_depth=4)
scores = []
for i in range(X.shape[1]):
score = cross_val_score(rf, X[:, i:i+1], Y, scoring="r2",
cv=ShuffleSplit(len(X), 3, .3))
scores.append((round(np.mean(score), 3), names[i]))
print (sorted(scores, reverse=True))
inp=''
while inp!='x':
print ("1 - Correlations")
print ("2 - Visualize correlation figure ")
print ('3 - Visualize scatter_matrix figure')
print ('4 - Visualize only highly correlated features')
print ('5 - Visualize histogram figure')
print ('6 - Print General accuracy for all appropriate algorithms')
print ("7 - Show newly generated Model's performation and accuracy")
print ("8 - Get feature Importance using ExtraTreeClassifier")
print ("9 - New_Model from Selecting important features, and their accuracy,errors,etc")
print ("10 - Get feature Importance using RandomForestClassifier")
print ('x - To exit')
inp=input('Enter The command: ')
if inp=='1':
Thread(target=get_correlations).start()
elif inp=='2':
correlation_fig()
elif inp=='3':
scatter_matrix_fig()
elif inp=='4':
draw_high_cor()
elif inp=='5':
hist_fig()
elif inp=='6':
Thread(target=general_accuracy).start()
elif inp=='7':
Thread(target=new_models).start()
elif inp=='8':
Tree_class()
elif inp=='9':
accuracy_metrics_for_selected_features()
elif inp=='10':
print ('You have to wait until it performes... about 3-5minutes...')
feature_importance_random_forest()
elif inp=='x':
print ('Exiting...')
else:
print ('No such command')
time.sleep(2)
|
[
"noreply@github.com"
] |
kairzhan8.noreply@github.com
|
85c31fdea8edee5bcfbb326f2c65874eca7eb679
|
910786e6fcc1021a523b71071225256f07444c8a
|
/env/lib/python3.8/tokenize.py
|
009b6cf237995eb246a8696f83f80473e7b1cef2
|
[] |
no_license
|
Hugo-cruz/birdie-ps-webcrawler
|
c71c115b440252b53a9280b5b97c0205acb20bcc
|
a64399f0aa00e9391ab386dac44fb69beef235c3
|
refs/heads/main
| 2023-01-02T23:59:00.370237
| 2020-10-21T01:31:30
| 2020-10-21T01:31:30
| 304,638,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48
|
py
|
/home/olodum/anaconda3/lib/python3.8/tokenize.py
|
[
"cruz@raccoon.ag"
] |
cruz@raccoon.ag
|
4425d2dc2406f7ea3aab5326e6b47153da11058a
|
2356ff9946a3122838b8c505c52eb922a614154e
|
/expenses_tracker/expenses_tracker/profiles/views.py
|
1d7e97ee4f9886cf0212fdb3d2a6ed97c7dcab05
|
[
"MIT"
] |
permissive
|
BoyanPeychinov/python_web_basics
|
8587a10c9e36fd0ebedd7bfefc636a73949410d4
|
2f892ac119f7fe3a5c03fc5e7b35670dc609a70f
|
refs/heads/main
| 2023-07-03T05:09:21.037914
| 2021-08-06T12:44:01
| 2021-08-06T12:44:01
| 374,112,261
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,439
|
py
|
from django.shortcuts import render, redirect
from expenses_tracker.core.profile_utills import get_profile
from expenses_tracker.expenses.models import Expense
from expenses_tracker.profiles.forms import CreateProfileForm, EditProfileForm
def profile_details(request):
profile = get_profile()
context = {
'profile': profile,
}
return render(request, 'profile.html', context)
def create_profile(request):
if request.method == "POST":
form = CreateProfileForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
form = CreateProfileForm()
context = {
'form': form,
}
return render(request, 'home-no-profile.html', context)
def edit_profile(request):
profile = get_profile()
if request.method == "POST":
form = EditProfileForm(request.POST, instance=profile)
if form.is_valid():
form.save()
return redirect('home')
else:
form = EditProfileForm(instance=profile)
context = {
'form': form,
}
return render(request, 'profile-edit.html', context)
def delete_profile(request):
profile = get_profile()
if request.method == "POST":
profile.delete()
Expense.objects.all.delete()
return redirect('home')
else:
context = {
}
return render(request, 'profile-delete.html', context)
|
[
"BoyanPeychinov@gmail.com"
] |
BoyanPeychinov@gmail.com
|
85bb2ea2d537ece6edc6ae1a6168ca2cbddc5380
|
d05b3cd50e1b0732eb487fba451bab6aaf713a02
|
/beginner/69c.py
|
95dc80b1a7f5b1bb9811e7feb7afaba2e19855a9
|
[] |
no_license
|
shiba24/atcoder-solutions
|
bf85319bee7ad742c58bc22e5ea3a1d5b7a2a733
|
1b30c017d6c8ac874a724039909cfec61f0bdc3b
|
refs/heads/master
| 2020-12-30T15:30:46.732647
| 2018-04-08T03:49:24
| 2018-04-08T03:49:24
| 91,154,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
import numpy as np
N = int(raw_input())
l = np.array(map(int, raw_input().split()))
mod_4 = len(np.where((l % 4 == 0))[0])
mod_2 = len(np.where(l % 4 == 2)[0])
if N <= 2 and mod_4 == 0 and mod_2 <= 1:
print 'No'
elif N <= mod_4 * 2 + 1:
print 'Yes'
elif N <= mod_4 * 2 + mod_2 and mod_2 >= 2:
print 'Yes'
elif N <= mod_4 * 2 + mod_2:
print 'Yes'
else:
print 'No'
|
[
"shiba.shintaro@gmail.com"
] |
shiba.shintaro@gmail.com
|
28b2e3ab1b084406129198cd83bff23f6079048a
|
9d3b3be57f15d5b3b45f81c5788922d80ed02477
|
/mr_base.py
|
3e4b4ef464342a64e1c01bdd480c9db710a67d20
|
[] |
no_license
|
wang-ye/python_mr
|
b465578537f0f9ef11a37226d52270eaeea6d7d9
|
9673cb462d1cd8aa174aa1e9fd478a08eacce746
|
refs/heads/master
| 2018-12-27T23:20:00.154300
| 2013-08-21T15:07:01
| 2013-08-21T15:07:01
| 12,272,821
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,419
|
py
|
#!/usr/bin/python
'''author: Ye Wang
The base file for MR.'''
import config
import os
import os.path
import sys
class MapReduceBase(object):
'''The Basic mapreduce framework. User supplies map_func and reduce_func.
'''
def __init__(self, num_processes, num_input_files, input_file_dir, output_file_dir):
self.num_processes = num_processes
self.num_input_files = num_input_files
self.input_file_dir = input_file_dir
self.output_file_dir = output_file_dir
# Constants definition.
self._HOME_DIR = config._BASE_DIR
self._TEMP_FILE_DIR = config._TMP_DIR + '/temp_dir/'
self._MAP_SUFFIX = '.map'
self._MERGE_SUFFIX = '.merge'
self._REDUCE_SUFFIX = '.red'
self._SEPERATOR = '#:-:#'
# Create the directories if they do not exist.
os.system('mkdir -p ' + self._TEMP_FILE_DIR)
os.system('mkdir -p ' + self.output_file_dir)
def get_input_path(self, input_dir, input_name):
input_base_name = int(input_name)
p1 = os.path.join(input_dir, str(input_base_name))
p2 = os.path.join(input_dir, '%05d' % input_base_name)
if p1 == p2:
return p2
elif not os.path.exists(p1) and os.path.exists(p2):
return p2
elif os.path.exists(p1) and not os.path.exists(p2):
return p1
else:
assert False, 'p1 = %s, p2 = %s.' % (p1, p2)
def pre_mr(self):
"""This will be run only once before any MR job starts."""
pass
def _start(self, partition_id):
pass
def map_on_file(self, partition_id):
'''Operate on file with the name partition_id.
If the partition_id = "001" and _TEMP_FILE_DIR = '~/home'
and there are 8 processes, then the intermediate output files
could be ~/home/1.0.map, ~/home/1.1.map,
..., ~/home/1.7.map
Two formats are accepted for the partition_id. It could be integers
or integer padded by 0 at the beginning. So both 3, and 00003 can be
accepted.
All the partition_id s is padded by 0 at the beginning to form
5 digits, i.e., 00001, 00007, 00128, 00013. We remove the left 0s in
the mapper.
'''
self._start(partition_id)
self.input_partition_id = partition_id
print 'In map, processing ', partition_id
sys.stdout.flush()
out_fp_list = []
for i in range(self.num_processes):
id_after_removal = str(partition_id).lstrip('0')
if not id_after_removal: # 000 case
id_after_removal = '0'
tmp_name = id_after_removal +'.'+ str(i) + self._MAP_SUFFIX
f = open(self._TEMP_FILE_DIR + os.sep + tmp_name, 'w')
out_fp_list.append(f)
input_file = open(self.input_file_dir + os.sep + partition_id)
# User code starts here.
for line in input_file:
line = line.strip() # Remove the '\n' in the line.
kv_list = self.map_func(line)
for key, val in kv_list:
output_str = '%s%s%s\n' % (key, self._SEPERATOR, val)
pkey = self.hash_func(key)
out_fp_list[pkey].write(output_str)
# User code ends here.
input_file.close()
for fp in out_fp_list:
fp.close()
print 'Finshed mapping ', partition_id
sys.stdout.flush()
def hash_func(self, key):
'''Return a integer smaller than self.num_processes.'''
return hash(key)%self.num_processes
def sort_merge_file(self, merge_file):
# Sort the data in the merge_file. Maybe out-of-core.
os.system('LC_ALL=C sort ' + ' --output=' + merge_file + '.tmp' + ' ' + merge_file)
os.system('mv ' + merge_file + '.tmp ' + merge_file)
def shuffle_on_file(self, thread_id):
'''Merge the intermediate files having the same id, and sort the data.'''
print 'In merge, merging ', thread_id
merge_file = self._TEMP_FILE_DIR + os.sep + str(thread_id) + self._MERGE_SUFFIX
if os.path.exists(merge_file): # Remove the existing files.
os.remove(merge_file)
os.system('touch ' + merge_file)
# Concatnate all files ending with .{thread_id}.map.
for i in range(self.num_input_files):
map_file_name = str(i) + '.' + str(thread_id) + self._MAP_SUFFIX
map_file_path = self._TEMP_FILE_DIR + os.sep + map_file_name
os.system('cat ' + map_file_path + ' >>' + merge_file)
# Sometimes, we do not need to sort. This decision is left to the actual
# implementation.
self.sort_merge_file(merge_file)
print 'Finshed merging ', thread_id
sys.stdout.flush()
def reduce_on_file(self, thread_id):
'''Reduce func.'''
print 'In Reduce ', thread_id
sys.stdout.flush()
reduce_file = self.output_file_dir + os.sep + str(thread_id) + self._REDUCE_SUFFIX
out_f = open(reduce_file, 'w')
merge_file = self._TEMP_FILE_DIR + os.sep + str(thread_id) + self._MERGE_SUFFIX
input_f = open(merge_file)
# User code. The file contains reduce tuples from multiple keys.
# The key/value pairs are sorted.
self.reduce_func(input_f, out_f)
# User codes end here.
print 'Finished Reduce ' + str(thread_id)
sys.stdout.flush()
input_f.close()
out_f.close()
def map_func(self, line):
'''Process the line and return a list of (key, val) pairs.
It must be implemented.'''
assert False
def reduce_func(self, input_f, output_f):
'''Read the input, and write the output_f or somewhere else.
It must be implemented.'''
assert False
if __name__ == '__main__':
assert (sys.argv[1] == 'True' or sys.argv[1] == 'False')
run_mr_mode = sys.argv[1]
pass
|
[
"wangye880191@gmail.com"
] |
wangye880191@gmail.com
|
58871d6a211a6c7a2638e33078d578c29251118d
|
e389a8b4d4d7a21b3049f191e9600666e69d51ec
|
/_Archived/DT_GUI/NeuroportDBS-master/NeuroportDBS-master/PlotDBSTrack/brpylib.py
|
2400bbd5c6df419a2eddbe0c60da5df1daaa669a
|
[] |
no_license
|
Doug1983/MRI_GUI
|
01f8a593ab135b79e39f0d9ac142a7137c7b5fa8
|
35d9409ecf6409caa33c0ff4a6a6a37eb1ad7f73
|
refs/heads/master
| 2020-04-07T09:56:48.612452
| 2019-02-27T18:47:16
| 2019-02-27T18:47:16
| 158,270,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69,243
|
py
|
# -*- coding: utf-8 -*-
"""
Collection of classes used for reading headers and data from Blackrock files
current version: 1.3.2 --- 08/12/2016
@author: Mitch Frankel - Blackrock Microsystems
Version History:
v1.0.0 - 07/05/2016 - initial release - requires brMiscFxns v1.0.0
v1.1.0 - 07/08/2016 - inclusion of NsxFile.savesubsetnsx() for saving subset of Nsx data to disk4
v1.1.1 - 07/09/2016 - update to NsxFile.savesubsetnsx() for option (not)overwriting subset files if already exist
bug fixes in NsxFile class as reported from beta user
v1.2.0 - 07/12/2016 - bug fixes in NsxFile.savesubsetnsx()
added version control and checking for brMiscFxns
requires brMiscFxns v1.1.0
v1.3.0 - 07/22/2016 - added 'samp_per_s' to NsxFile.getdata() output
added close() method to NsxFile and NevFile objects
NsxFile.getdata() now pre-allocates output['data'] as zeros - speed and safety
v1.3.1 - 08/02/2016 - bug fixes to NsxFile.getdata() for usability with Python 2.7 as reported from beta user
patch for use with multiple NSP sync (overwriting of initial null data from initial data packet)
__future__ import for use with Python 2.7 (division)
minor modifications to allow use of Python 2.6+
v1.3.2 - 08/12/2016 - bug fixes to NsXFile.getdata()
"""
from __future__ import division # for those using Python 2.6+
import numpy as np
from collections import namedtuple
from datetime import datetime
from math import ceil
from os import path as ospath
from struct import calcsize, pack, unpack, unpack_from
from brMiscFxns import openfilecheck, brmiscfxns_ver
# Version control set/check
brpylib_ver = "1.3.2"
brmiscfxns_ver_req = "1.2.0"
if brmiscfxns_ver.split('.') < brmiscfxns_ver_req.split('.'):
raise Exception("brpylib requires brMiscFxns " + brmiscfxns_ver_req + " or higher, please use latest version")
# Patch for use with Python 2.6+
try: input = raw_input
except NameError: pass
# Define global variables to remove magic numbers
# <editor-fold desc="Globals">
WARNING_SLEEP_TIME = 5
DATA_PAGING_SIZE = 1024**3
DATA_FILE_SIZE_MIN = 1024**2 * 10
STRING_TERMINUS = '\x00'
UNDEFINED = 0
ELEC_ID_DEF = 'all'
START_TIME_DEF = 0
DATA_TIME_DEF = 'all'
DOWNSAMPLE_DEF = 1
START_OFFSET_MIN = 0
STOP_OFFSET_MIN = 0
UV_PER_BIT_21 = 0.25
WAVEFORM_SAMPLES_21 = 48
NSX_BASIC_HEADER_BYTES_22 = 314
NSX_EXT_HEADER_BYTES_22 = 66
DATA_BYTE_SIZE = 2
TIMESTAMP_NULL_21 = 0
NO_FILTER = 0
BUTTER_FILTER = 1
SERIAL_MODE = 0
RB2D_MARKER = 1
RB2D_BLOB = 2
RB3D_MARKER = 3
BOUNDARY_2D = 4
MARKER_SIZE = 5
DIGITAL_PACKET_ID = 0
NEURAL_PACKET_ID_MIN = 1
NEURAL_PACKET_ID_MAX = 2048
COMMENT_PACKET_ID = 65535
VIDEO_SYNC_PACKET_ID = 65534
TRACKING_PACKET_ID = 65533
BUTTON_PACKET_ID = 65532
CONFIGURATION_PACKET_ID = 65531
PARALLEL_REASON = 1
PERIODIC_REASON = 64
SERIAL_REASON = 129
LOWER_BYTE_MASK = 255
FIRST_BIT_MASK = 1
SECOND_BIT_MASK = 2
CLASSIFIER_MIN = 1
CLASSIFIER_MAX = 16
CLASSIFIER_NOISE = 255
CHARSET_ANSI = 0
CHARSET_UTF = 1
CHARSET_ROI = 255
COMM_RGBA = 0
COMM_TIME = 1
BUTTON_PRESS = 1
BUTTON_RESET = 2
CHG_NORMAL = 0
CHG_CRITICAL = 1
ENTER_EVENT = 1
EXIT_EVENT = 2
# </editor-fold>
# Define a named tuple that has information about header/packet fields
FieldDef = namedtuple('FieldDef', ['name', 'formatStr', 'formatFnc'])
# <editor-fold desc="Header processing functions">
def processheaders(curr_file, packet_fields):
"""
:param curr_file: {file} the current BR datafile to be processed
:param packet_fields : {named tuple} the specific binary fields for the given header
:return: a fully unpacked and formatted tuple set of header information
Read a packet from a binary data file and return a list of fields
The amount and format of data read will be specified by the
packet_fields container
"""
# This is a lot in one line. First I pull out all the format strings from
# the basic_header_fields named tuple, then concatenate them into a string
# with '<' at the front (for little endian format)
packet_format_str = '<' + ''.join([fmt for name, fmt, fun in packet_fields])
# Calculate how many bytes to read based on the format strings of the header fields
bytes_in_packet = calcsize(packet_format_str)
packet_binary = curr_file.read(bytes_in_packet)
# unpack the binary data from the header based on the format strings of each field.
# This returns a list of data, but it's not always correctly formatted (eg, FileSpec
# is read as ints 2 and 3 but I want it as '2.3'
packet_unpacked = unpack(packet_format_str, packet_binary)
# Create a iterator from the data list. This allows a formatting function
# to use more than one item from the list if needed, and the next formatting
# function can pick up on the correct item in the list
data_iter = iter(packet_unpacked)
# create an empty dictionary from the name field of the packet_fields.
# The loop below will fill in the values with formatted data by calling
# each field's formatting function
packet_formatted = dict.fromkeys([name for name, fmt, fun in packet_fields])
for name, fmt, fun in packet_fields:
packet_formatted[name] = fun(data_iter)
return packet_formatted
def format_filespec(header_list):
return str(next(header_list)) + '.' + str(next(header_list)) # eg 2.3
def format_timeorigin(header_list):
year = next(header_list)
month = next(header_list)
_ = next(header_list)
day = next(header_list)
hour = next(header_list)
minute = next(header_list)
second = next(header_list)
millisecond = next(header_list)
return datetime(year, month, day, hour, minute, second, millisecond * 1000)
def format_stripstring(header_list):
string = bytes.decode(next(header_list), 'latin-1')
return string.split(STRING_TERMINUS, 1)[0]
def format_none(header_list):
return next(header_list)
def format_freq(header_list):
return str(float(next(header_list)) / 1000) + ' Hz'
def format_filter(header_list):
filter_type = next(header_list)
if filter_type == NO_FILTER: return "none"
elif filter_type == BUTTER_FILTER: return "butterworth"
def format_charstring(header_list):
return int(next(header_list))
def format_digconfig(header_list):
config = next(header_list) & FIRST_BIT_MASK
if config: return 'active'
else: return 'ignored'
def format_anaconfig(header_list):
config = next(header_list)
if config & FIRST_BIT_MASK: return 'low_to_high'
if config & SECOND_BIT_MASK: return 'high_to_low'
else: return 'none'
def format_digmode(header_list):
dig_mode = next(header_list)
if dig_mode == SERIAL_MODE: return 'serial'
else: return 'parallel'
def format_trackobjtype(header_list):
trackobj_type = next(header_list)
if trackobj_type == UNDEFINED: return 'undefined'
elif trackobj_type == RB2D_MARKER: return '2D RB markers'
elif trackobj_type == RB2D_BLOB: return '2D RB blob'
elif trackobj_type == RB3D_MARKER: return '3D RB markers'
elif trackobj_type == BOUNDARY_2D: return '2D boundary'
elif trackobj_type == MARKER_SIZE: return 'marker size'
else: return 'error'
def getdigfactor(ext_headers, idx):
max_analog = ext_headers[idx]['MaxAnalogValue']
min_analog = ext_headers[idx]['MinAnalogValue']
max_digital = ext_headers[idx]['MaxDigitalValue']
min_digital = ext_headers[idx]['MinDigitalValue']
return float(max_analog - min_analog) / float(max_digital - min_digital)
# </editor-fold>
# <editor-fold desc="Header dictionaries">
nev_header_dict = {
'basic': [FieldDef('FileTypeID', '8s', format_stripstring), # 8 bytes - 8 char array
FieldDef('FileSpec', '2B', format_filespec), # 2 bytes - 2 unsigned char
FieldDef('AddFlags', 'H', format_none), # 2 bytes - uint16
FieldDef('BytesInHeader', 'I', format_none), # 4 bytes - uint32
FieldDef('BytesInDataPackets', 'I', format_none), # 4 bytes - uint32
FieldDef('TimeStampResolution', 'I', format_none), # 4 bytes - uint32
FieldDef('SampleTimeResolution', 'I', format_none), # 4 bytes - uint32
FieldDef('TimeOrigin', '8H', format_timeorigin), # 16 bytes - 8 x uint16
FieldDef('CreatingApplication', '32s', format_stripstring), # 32 bytes - 32 char array
FieldDef('Comment', '256s', format_stripstring), # 256 bytes - 256 char array
FieldDef('NumExtendedHeaders', 'I', format_none)], # 4 bytes - uint32
'ARRAYNME': FieldDef('ArrayName', '24s', format_stripstring), # 24 bytes - 24 char array
'ECOMMENT': FieldDef('ExtraComment', '24s', format_stripstring), # 24 bytes - 24 char array
'CCOMMENT': FieldDef('ContComment', '24s', format_stripstring), # 24 bytes - 24 char array
'MAPFILE': FieldDef('MapFile', '24s', format_stripstring), # 24 bytes - 24 char array
'NEUEVWAV': [FieldDef('ElectrodeID', 'H', format_none), # 2 bytes - uint16
FieldDef('PhysicalConnector', 'B', format_charstring), # 1 byte - 1 unsigned char
FieldDef('ConnectorPin', 'B', format_charstring), # 1 byte - 1 unsigned char
FieldDef('DigitizationFactor', 'H', format_none), # 2 bytes - uint16
FieldDef('EnergyThreshold', 'H', format_none), # 2 bytes - uint16
FieldDef('HighThreshold', 'h', format_none), # 2 bytes - int16
FieldDef('LowThreshold', 'h', format_none), # 2 bytes - int16
FieldDef('NumSortedUnits', 'B', format_charstring), # 1 byte - 1 unsigned char
FieldDef('BytesPerWaveform', 'B', format_charstring), # 1 byte - 1 unsigned char
FieldDef('SpikeWidthSamples', 'H', format_none), # 2 bytes - uint16
FieldDef('EmptyBytes', '8s', format_none)], # 8 bytes - empty
'NEUEVLBL': [FieldDef('ElectrodeID', 'H', format_none), # 2 bytes - uint16
FieldDef('Label', '16s', format_stripstring), # 16 bytes - 16 char array
FieldDef('EmptyBytes', '6s', format_none)], # 6 bytes - empty
'NEUEVFLT': [FieldDef('ElectrodeID', 'H', format_none), # 2 bytes - uint16
FieldDef('HighFreqCorner', 'I', format_freq), # 4 bytes - uint32
FieldDef('HighFreqOrder', 'I', format_none), # 4 bytes - uint32
FieldDef('HighFreqType', 'H', format_filter), # 2 bytes - uint16
FieldDef('LowFreqCorner', 'I', format_freq), # 4 bytes - uint32
FieldDef('LowFreqOrder', 'I', format_none), # 4 bytes - uint32
FieldDef('LowFreqType', 'H', format_filter), # 2 bytes - uint16
FieldDef('EmptyBytes', '2s', format_none)], # 2 bytes - empty
'DIGLABEL': [FieldDef('Label', '16s', format_stripstring), # 16 bytes - 16 char array
FieldDef('Mode', '?', format_digmode), # 1 byte - boolean
FieldDef('EmptyBytes', '7s', format_none)], # 7 bytes - empty
'NSASEXEV': [FieldDef('Frequency', 'H', format_none), # 2 bytes - uint16
FieldDef('DigitalInputConfig', 'B', format_digconfig), # 1 byte - 1 unsigned char
FieldDef('AnalogCh1Config', 'B', format_anaconfig), # 1 byte - 1 unsigned char
FieldDef('AnalogCh1DetectVal', 'h', format_none), # 2 bytes - int16
FieldDef('AnalogCh2Config', 'B', format_anaconfig), # 1 byte - 1 unsigned char
FieldDef('AnalogCh2DetectVal', 'h', format_none), # 2 bytes - int16
FieldDef('AnalogCh3Config', 'B', format_anaconfig), # 1 byte - 1 unsigned char
FieldDef('AnalogCh3DetectVal', 'h', format_none), # 2 bytes - int16
FieldDef('AnalogCh4Config', 'B', format_anaconfig), # 1 byte - 1 unsigned char
FieldDef('AnalogCh4DetectVal', 'h', format_none), # 2 bytes - int16
FieldDef('AnalogCh5Config', 'B', format_anaconfig), # 1 byte - 1 unsigned char
FieldDef('AnalogCh5DetectVal', 'h', format_none), # 2 bytes - int16
FieldDef('EmptyBytes', '6s', format_none)], # 2 bytes - empty
'VIDEOSYN': [FieldDef('VideoSourceID', 'H', format_none), # 2 bytes - uint16
FieldDef('VideoSource', '16s', format_stripstring), # 16 bytes - 16 char array
FieldDef('FrameRate', 'f', format_none), # 4 bytes - single float
FieldDef('EmptyBytes', '2s', format_none)], # 2 bytes - empty
'TRACKOBJ': [FieldDef('TrackableType', 'H', format_trackobjtype), # 2 bytes - uint16
FieldDef('TrackableID', 'H', format_none), # 2 bytes - uint16
FieldDef('PointCount', 'H', format_none), # 2 bytes - uint16
FieldDef('VideoSource', '16s', format_stripstring), # 16 bytes - 16 char array
FieldDef('EmptyBytes', '2s', format_none)] # 2 bytes - empty
}
nsx_header_dict = {
'basic_21': [FieldDef('Label', '16s', format_stripstring), # 16 bytes - 16 char array
FieldDef('Period', 'I', format_none), # 4 bytes - uint32
FieldDef('ChannelCount', 'I', format_none)], # 4 bytes - uint32
'basic': [FieldDef('FileSpec', '2B', format_filespec), # 2 bytes - 2 unsigned char
FieldDef('BytesInHeader', 'I', format_none), # 4 bytes - uint32
FieldDef('Label', '16s', format_stripstring), # 16 bytes - 16 char array
FieldDef('Comment', '256s', format_stripstring), # 256 bytes - 256 char array
FieldDef('Period', 'I', format_none), # 4 bytes - uint32
FieldDef('TimeStampResolution', 'I', format_none), # 4 bytes - uint32
FieldDef('TimeOrigin', '8H', format_timeorigin), # 16 bytes - 8 uint16
FieldDef('ChannelCount', 'I', format_none)], # 4 bytes - uint32
'extended': [FieldDef('Type', '2s', format_stripstring), # 2 bytes - 2 char array
FieldDef('ElectrodeID', 'H', format_none), # 2 bytes - uint16
FieldDef('ElectrodeLabel', '16s', format_stripstring), # 16 bytes - 16 char array
FieldDef('PhysicalConnector', 'B', format_none), # 1 byte - uint8
FieldDef('ConnectorPin', 'B', format_none), # 1 byte - uint8
FieldDef('MinDigitalValue', 'h', format_none), # 2 bytes - int16
FieldDef('MaxDigitalValue', 'h', format_none), # 2 bytes - int16
FieldDef('MinAnalogValue', 'h', format_none), # 2 bytes - int16
FieldDef('MaxAnalogValue', 'h', format_none), # 2 bytes - int16
FieldDef('Units', '16s', format_stripstring), # 16 bytes - 16 char array
FieldDef('HighFreqCorner', 'I', format_freq), # 4 bytes - uint32
FieldDef('HighFreqOrder', 'I', format_none), # 4 bytes - uint32
FieldDef('HighFreqType', 'H', format_filter), # 2 bytes - uint16
FieldDef('LowFreqCorner', 'I', format_freq), # 4 bytes - uint32
FieldDef('LowFreqOrder', 'I', format_none), # 4 bytes - uint32
FieldDef('LowFreqType', 'H', format_filter)], # 2 bytes - uint16
'data': [FieldDef('Header', 'B', format_none), # 1 byte - uint8
FieldDef('Timestamp', 'I', format_none), # 4 bytes - uint32
FieldDef('NumDataPoints', 'I', format_none)] # 4 bytes - uint32]
}
# </editor-fold>
# <editor-fold desc="Safety check functions">
def check_elecid(elec_ids):
if type(elec_ids) is str and elec_ids != ELEC_ID_DEF:
print("\n*** WARNING: Electrode IDs must be 'all', a single integer, or a list of integers.")
print(" Setting elec_ids to 'all'")
elec_ids = ELEC_ID_DEF
if elec_ids != ELEC_ID_DEF and type(elec_ids) is not list:
if type(elec_ids) == range: elec_ids = list(elec_ids)
elif type(elec_ids) == int: elec_ids = [elec_ids]
return elec_ids
def check_starttime(start_time_s):
if not isinstance(start_time_s, (int, float)) or \
(isinstance(start_time_s, (int, float)) and start_time_s < START_TIME_DEF):
print("\n*** WARNING: Start time is not valid, setting start_time_s to 0")
start_time_s = START_TIME_DEF
return start_time_s
def check_datatime(data_time_s):
if (type(data_time_s) is str and data_time_s != DATA_TIME_DEF) or \
(isinstance(data_time_s, (int, float)) and data_time_s < 0):
print("\n*** WARNING: Data time is not valid, setting data_time_s to 'all'")
data_time_s = DATA_TIME_DEF
return data_time_s
def check_downsample(downsample):
if not isinstance(downsample, int) or downsample < DOWNSAMPLE_DEF:
print("\n*** WARNING: Downsample must be an integer value greater than 0. "
" Setting downsample to 1 (no downsampling)")
downsample = DOWNSAMPLE_DEF
return downsample
def check_dataelecid(elec_ids, all_elec_ids):
unique_elec_ids = set(elec_ids)
all_elec_ids = set(all_elec_ids)
# if some electrodes asked for don't exist, reset list with those that do, or throw error and return
if not unique_elec_ids.issubset(all_elec_ids):
if not unique_elec_ids & all_elec_ids:
print('\nNone of the elec_ids passed exist in the data, returning None')
return None
else:
print("\n*** WARNING: Channels " + str(sorted(list(unique_elec_ids - all_elec_ids))) +
" do not exist in the data")
unique_elec_ids = unique_elec_ids & all_elec_ids
return sorted(list(unique_elec_ids))
def check_filesize(file_size):
if file_size < DATA_FILE_SIZE_MIN:
print('\n file_size must be larger than 10 Mb, setting file_size=10 Mb')
return DATA_FILE_SIZE_MIN
else:
return int(file_size)
# </editor-fold>
class NevFile:
"""
attributes and methods for all BR event data files. Initialization opens the file and extracts the
basic header information.
"""
def __init__(self, datafile=''):
self.datafile = datafile
self.basic_header = {}
self.extended_headers = []
# Run openfilecheck and open the file passed or allow user to browse to one
self.datafile = openfilecheck('rb', file_name=self.datafile, file_ext='.nev', file_type='Blackrock NEV Files')
# extract basic header information
self.basic_header = processheaders(self.datafile, nev_header_dict['basic'])
# Extract extended headers
for i in range(self.basic_header['NumExtendedHeaders']):
self.extended_headers.append({})
header_string = bytes.decode(unpack('<8s', self.datafile.read(8))[0], 'latin-1')
self.extended_headers[i]['PacketID'] = header_string.split(STRING_TERMINUS, 1)[0]
self.extended_headers[i].update(
processheaders(self.datafile, nev_header_dict[self.extended_headers[i]['PacketID']]))
# Must set this for file spec 2.1 and 2.2
if header_string == 'NEUEVWAV' and float(self.basic_header['FileSpec']) < 2.3:
self.extended_headers[i]['SpikeWidthSamples'] = WAVEFORM_SAMPLES_21
def getdata(self, elec_ids='all', get_waveforms=True):
"""
This function is used to return a set of data from the NSx datafile.
:param elec_ids: [optional] {list} User selection of elec_ids to extract specific spike waveforms (e.g., [13])
:return: output: {Dictionary} with one or more of the following dictionaries (all include TimeStamps)
dig_events: Reason, Data, [for file spec 2.2 and below, AnalogData and AnalogDataUnits]
spike_events: Units='nV', ChannelID, NEUEVWAV_HeaderIndices, Classification, Waveforms
comments: CharSet, Flag, Data, Comment
video_sync_events: VideoFileNum, VideoFrameNum, VideoElapsedTime_ms, VideoSourceID
tracking_events: ParentID, NodeID, NodeCount, PointCount, TrackingPoints
button_trigger_events: TriggerType
configuration_events: ConfigChangeType, ConfigChanged
Note: For digital and neural data - TimeStamps, Classification, and Data can be lists of lists when more
than one digital type or spike event exists for a channel
"""
# Initialize output dictionary and reset position in file (if read before, may not be here anymore)
output = dict()
self.datafile.seek(self.basic_header['BytesInHeader'], 0)
# Safety checks
elec_ids = check_elecid(elec_ids)
# Must go through each data packet and process separately until end of file
filesize = ospath.getsize(self.datafile.name)
while self.datafile.tell() != filesize:
time_stamp = unpack('<I', self.datafile.read(4))[0]
packet_id = unpack('<H', self.datafile.read(2))[0]
# skip unwanted neural data packets if only asking for certain channels
if not (elec_ids == 'all' or ( (packet_id in elec_ids) and
NEURAL_PACKET_ID_MIN <= packet_id <= NEURAL_PACKET_ID_MAX )):
self.datafile.seek(self.basic_header['BytesInDataPackets'] - 6, 1)
continue
# For digital event data, read reason, skip one byte (reserved), read digital value,
# and skip X bytes (reserved)
if packet_id == DIGITAL_PACKET_ID:
# See if the dictionary exists in output
if 'dig_events' not in output:
output['dig_events'] = {'Reason': [], 'TimeStamps': [], 'Data': []}
reason = unpack('B', self.datafile.read(1))[0]
if reason == PARALLEL_REASON: reason = 'parallel'
elif reason == PERIODIC_REASON: reason = 'periodic'
elif reason == SERIAL_REASON: reason = 'serial'
else: reason = 'unknown'
self.datafile.seek(1, 1)
# Check if this type of data already exists, if not, create an empty list, and then append data
if reason in output['dig_events']['Reason']:
idx = output['dig_events']['Reason'].index(reason)
else:
idx = -1
output['dig_events']['Reason'].append(reason)
output['dig_events']['TimeStamps'].append([])
output['dig_events']['Data'].append([])
output['dig_events']['TimeStamps'][idx].append(time_stamp)
output['dig_events']['Data'][idx].append(unpack('<H', self.datafile.read(2))[0])
# For serial data, strip off upper byte
if reason == 'serial':
output['dig_events']['Data'][idx][-1] &= LOWER_BYTE_MASK
# For File Spec < 2.3, also capture analog Data, otherwise skip remaining packet bytes
if float(self.basic_header['FileSpec']) < 2.3:
if 'AnalogDataUnits' not in output['dig_events']:
output['dig_events']['AnalogDataUnits'] = 'mv'
output['dig_events']['AnalogData'].append([])
for j in range(5):
output['dig_events']['AnalogData'][-1].append(unpack('<h', self.datafile.read(2))[0])
else:
self.datafile.seek(self.basic_header['BytesInDataPackets'] - 10, 1)
# For neural waveforms, read classifier, skip one byte (reserved), and read waveform data
elif NEURAL_PACKET_ID_MIN <= packet_id <= NEURAL_PACKET_ID_MAX:
# See if the dictionary exists in output, if not, create it
if 'spike_events' not in output:
output['spike_events'] = {'Units': 'nV', 'ChannelID': [], 'TimeStamps': [],
'NEUEVWAV_HeaderIndices': [], 'Classification': [], 'Waveforms': []}
classifier = unpack('B', self.datafile.read(1))[0]
if classifier == UNDEFINED: classifier = 'none'
elif CLASSIFIER_MIN <= classifier <= CLASSIFIER_MAX: classifier = classifier
elif classifier == CLASSIFIER_NOISE: classifier = 'noise'
else: classifier = 'error'
self.datafile.seek(1, 1)
# Check if data for this electrode exists and update parameters accordingly
if packet_id in output['spike_events']['ChannelID']:
idx = output['spike_events']['ChannelID'].index(packet_id)
else:
idx = -1
output['spike_events']['ChannelID'].append(packet_id)
output['spike_events']['TimeStamps'].append([])
output['spike_events']['Classification'].append([])
# Find neuevwav extended header for this electrode for use in calculating data info
output['spike_events']['NEUEVWAV_HeaderIndices'].append(
next(item for (item, d) in enumerate(self.extended_headers)
if d["ElectrodeID"] == packet_id and d["PacketID"] == 'NEUEVWAV'))
output['spike_events']['TimeStamps'][idx].append(time_stamp)
output['spike_events']['Classification'][idx].append(classifier)
# Use extended header idx to get specific data information
ext_header_idx = output['spike_events']['NEUEVWAV_HeaderIndices'][idx]
samples = self.extended_headers[ext_header_idx]['SpikeWidthSamples']
dig_factor = self.extended_headers[ext_header_idx]['DigitizationFactor']
num_bytes = self.extended_headers[ext_header_idx]['BytesPerWaveform']
if num_bytes <= 1: data_type = np.int8
elif num_bytes == 2: data_type = np.int16
# Extract and scale the data
if get_waveforms:
if idx == -1:
output['spike_events']['Waveforms'].append(
[np.fromfile(file=self.datafile, dtype=data_type, count=samples).astype(np.int32) * dig_factor])
else:
output['spike_events']['Waveforms'][idx] = \
np.append(output['spike_events']['Waveforms'][idx],
[np.fromfile(file=self.datafile, dtype=data_type, count=samples).astype(np.int32) *
dig_factor], axis=0)
else:
self.datafile.seek(self.basic_header['BytesInDataPackets'] - 8, 1)
# For comment events
elif packet_id == COMMENT_PACKET_ID:
# See if the dictionary exists in output, if not, create it
if 'comments' not in output:
output['comments'] = {'TimeStamps': [], 'CharSet': [], 'Flag': [], 'Data': [], 'Comment': []}
output['comments']['TimeStamps'].append(time_stamp)
char_set = unpack('B', self.datafile.read(1))[0]
if char_set == CHARSET_ANSI: output['comments']['CharSet'].append('ANSI')
elif char_set == CHARSET_UTF: output['comments']['CharSet'].append('UTF-16')
elif char_set == CHARSET_ROI: output['comments']['CharSet'].append('NeuroMotive ROI')
else: output['comments']['CharSet'].append('error')
comm_flag = unpack('B', self.datafile.read(1))[0]
if comm_flag == COMM_RGBA: output['comments']['Flag'].append('RGBA color code')
elif comm_flag == COMM_TIME: output['comments']['Flag'].append('timestamp')
else: output['comments']['Flag'].append('error')
output['comments']['Data'].append(unpack('<I', self.datafile.read(4))[0])
samples = self.basic_header['BytesInDataPackets'] - 12
comm_string = bytes.decode(self.datafile.read(samples), 'latin-1')
output['comments']['Comment'].append(comm_string.split(STRING_TERMINUS, 1)[0])
# For video sync event
elif packet_id == VIDEO_SYNC_PACKET_ID:
# See if the dictionary exists in output, if not, create it
if 'video_sync_events' not in output:
output['video_sync_events'] = {'TimeStamps': [], 'VideoFileNum': [], 'VideoFrameNum': [],
'VideoElapsedTime_ms': [], 'VideoSourceID': []}
output['video_sync_events']['TimeStamps'].append( time_stamp)
output['video_sync_events']['VideoFileNum'].append( unpack('<H', self.datafile.read(2))[0])
output['video_sync_events']['VideoFrameNum'].append( unpack('<I', self.datafile.read(4))[0])
output['video_sync_events']['VideoElapsedTime_ms'].append( unpack('<I', self.datafile.read(4))[0])
output['video_sync_events']['VideoSourceID'].append( unpack('<I', self.datafile.read(4))[0])
self.datafile.seek((self.basic_header['BytesInDataPackets'] - 20), 1)
# For tracking event
elif packet_id == TRACKING_PACKET_ID:
# See if the dictionary exists in output, if not, create it
if 'tracking_events' not in output:
output['tracking_events'] = {'TimeStamps': [], 'ParentID': [], 'NodeID': [], 'NodeCount': [],
'PointCount': [], 'TrackingPoints': []}
output['tracking_events']['TimeStamps'].append( time_stamp)
output['tracking_events']['ParentID'].append( unpack('<H', self.datafile.read(2))[0])
output['tracking_events']['NodeID'].append( unpack('<H', self.datafile.read(2))[0])
output['tracking_events']['NodeCount'].append( unpack('<H', self.datafile.read(2))[0])
output['tracking_events']['PointCount'].append( unpack('<H', self.datafile.read(2))[0])
samples = (self.basic_header['BytesInDataPackets'] - 14) // 2
output['tracking_events']['TrackingPoints'].append(
np.fromfile(file=self.datafile, dtype=np.uint16, count=samples))
# For button trigger event
elif packet_id == BUTTON_PACKET_ID:
# See if the dictionary exists in output, if not, create it
if 'button_trigger_events' not in output:
output['button_trigger_events'] = {'TimeStamps': [], 'TriggerType': []}
output['button_trigger_events']['TimeStamps'].append(time_stamp)
trigger_type = unpack('<H', self.datafile.read(2))[0]
if trigger_type == UNDEFINED: output['button_trigger_events']['TriggerType'].append('undefined')
elif trigger_type == BUTTON_PRESS: output['button_trigger_events']['TriggerType'].append('button press')
elif trigger_type == BUTTON_RESET: output['button_trigger_events']['TriggerType'].append('event reset')
else: output['button_trigger_events']['TriggerType'].append('error')
self.datafile.seek((self.basic_header['BytesInDataPackets'] - 8), 1)
# For configuration log event
elif packet_id == CONFIGURATION_PACKET_ID:
# See if the dictionary exists in output, if not, create it
if 'configuration_events' not in output:
output['configuration_events'] = {'TimeStamps': [], 'ConfigChangeType': [], 'ConfigChanged': []}
output['configuration_events']['TimeStamps'].append(time_stamp)
change_type = unpack('<H', self.datafile.read(2))[0]
if change_type == CHG_NORMAL: output['configuration_events']['ConfigChangeType'].append('normal')
elif change_type == CHG_CRITICAL: output['configuration_events']['ConfigChangeType'].append('critical')
else: output['configuration_events']['ConfigChangeType'].append('error')
samples = self.basic_header['BytesInDataPackets'] - 8
output['configuration_events']['ConfigChanged'].append(unpack(('<' + str(samples) + 's'),
self.datafile.read(samples))[0])
# Otherwise, packet unknown, skip to next packet
else: self.datafile.seek((self.basic_header['BytesInDataPackets'] - 6), 1)
return output
def processroicomments(self, comments):
"""
used to process the comment data packets associated with NeuroMotive region of interest enter/exit events.
requires that read_data() has already been run.
:return: roi_events: a dictionary of regions, enter timestamps, and exit timestamps for each region
"""
roi_events = {'Regions': [], 'EnterTimeStamps': [], 'ExitTimeStamps': []}
for i in range(len(comments['TimeStamps'])):
if comments['CharSet'][i] == 'NeuroMotive ROI':
temp_data = pack('<I', comments['Data'][i])
roi = unpack_from('<B', temp_data)[0]
event = unpack_from('<B', temp_data, 1)[0]
# Determine the label of the region source
source_label = next(d['VideoSource'] for d in self.extended_headers if d["TrackableID"] == roi)
# update the timestamps for events
if source_label in roi_events['Regions']:
idx = roi_events['Regions'].index(source_label)
else:
idx = -1
roi_events['Regions'].append(source_label)
roi_events['EnterTimeStamps'].append([])
roi_events['ExitTimeStamps'].append([])
if event == ENTER_EVENT: roi_events['EnterTimeStamps'][idx].append(comments['TimeStamp'][i])
elif event == EXIT_EVENT: roi_events['ExitTimeStamps'][idx].append(comments['TimeStamp'][i])
return roi_events
def close(self):
name = self.datafile.name
self.datafile.close()
print('\n' + name.split('/')[-1] + ' closed')
class NsxFile:
"""
attributes and methods for all BR continuous data files. Initialization opens the file and extracts the
basic header information.
"""
def __init__(self, datafile=''):
self.datafile = datafile
self.basic_header = {}
self.extended_headers = []
# Run openfilecheck and open the file passed or allow user to browse to one
self.datafile = openfilecheck('rb', file_name=self.datafile, file_ext='.ns*', file_type='Blackrock NSx Files')
# Determine File ID to determine if File Spec 2.1
self.basic_header['FileTypeID'] = bytes.decode(self.datafile.read(8), 'latin-1')
# Extract basic and extended header information based on File Spec
if self.basic_header['FileTypeID'] == 'NEURALSG':
self.basic_header.update(processheaders(self.datafile, nsx_header_dict['basic_21']))
self.basic_header['FileSpec'] = '2.1'
self.basic_header['TimeStampResolution'] = 30000
self.basic_header['BytesInHeader'] = 32 + 4 * self.basic_header['ChannelCount']
shape = (1, self.basic_header['ChannelCount'])
self.basic_header['ChannelID'] = \
list(np.fromfile(file=self.datafile, dtype=np.uint32,
count=self.basic_header['ChannelCount']).reshape(shape)[0])
else:
self.basic_header.update(processheaders(self.datafile, nsx_header_dict['basic']))
for i in range(self.basic_header['ChannelCount']):
self.extended_headers.append(processheaders(self.datafile, nsx_header_dict['extended']))
def getdata(self, elec_ids='all', start_time_s=0, data_time_s='all', downsample=1):
"""
This function is used to return a set of data from the NSx datafile.
:param elec_ids: [optional] {list} List of elec_ids to extract (e.g., [13])
:param start_time_s: [optional] {float} Starting time for data extraction (e.g., 1.0)
:param data_time_s: [optional] {float} Length of time of data to return (e.g., 30.0)
:param downsample: [optional] {int} Downsampling factor (e.g., 2)
:return: output: {Dictionary} of: data_headers: {list} dictionaries of all data headers
elec_ids: {list} elec_ids that were extracted (sorted)
start_time_s: {float} starting time for data extraction
data_time_s: {float} length of time of data returned
downsample: {int} data downsampling factor
samp_per_s: {float} output data samples per second
data: {numpy array} continuous data in a 2D numpy array
Parameters: elec_ids, start_time_s, data_time_s, and downsample are not mandatory. Defaults will assume all
electrodes and all data points starting at time(0) are to be read. Data is returned as a numpy 2d array
with each row being the data set for each electrode (e.g. output['data'][0] for output['elec_ids'][0]).
"""
# Safety checks
start_time_s = check_starttime(start_time_s)
data_time_s = check_datatime(data_time_s)
downsample = check_downsample(downsample)
elec_ids = check_elecid(elec_ids)
# initialize parameters
output = dict()
output['elec_ids'] = elec_ids
output['start_time_s'] = float(start_time_s)
output['data_time_s'] = data_time_s
output['downsample'] = downsample
output['data'] = []
output['data_headers'] = []
output['ExtendedHeaderIndices'] = []
datafile_samp_per_sec = self.basic_header['TimeStampResolution'] / self.basic_header['Period']
data_pt_size = self.basic_header['ChannelCount'] * DATA_BYTE_SIZE
elec_id_indices = []
front_end_idxs = []
analog_input_idxs = []
front_end_idx_cont = True
analog_input_idx_cont = True
hit_start = False
hit_stop = False
d_ptr = 0
# Move file position to start of datafile (if read before, may not be here anymore)
self.datafile.seek(self.basic_header['BytesInHeader'], 0)
# Based on FileSpec set other parameters
if self.basic_header['FileSpec'] == '2.1':
output['elec_ids'] = self.basic_header['ChannelID']
output['data_headers'].append({})
output['data_headers'][0]['Timestamp'] = TIMESTAMP_NULL_21
output['data_headers'][0]['NumDataPoints'] = (ospath.getsize(self.datafile.name) - self.datafile.tell()) \
// (DATA_BYTE_SIZE * self.basic_header['ChannelCount'])
else:
output['elec_ids'] = [d['ElectrodeID'] for d in self.extended_headers]
# Determine start and stop index for data
if start_time_s == START_TIME_DEF: start_idx = START_OFFSET_MIN
else: start_idx = int(round(start_time_s * datafile_samp_per_sec))
if data_time_s == DATA_TIME_DEF: stop_idx = STOP_OFFSET_MIN
else: stop_idx = int(round((start_time_s + data_time_s) * datafile_samp_per_sec))
# If a subset of electrodes is requested, error check, determine elec indices, and reduce headers
if elec_ids != ELEC_ID_DEF:
elec_ids = check_dataelecid(elec_ids, output['elec_ids'])
if not elec_ids: return output
else:
elec_id_indices = [output['elec_ids'].index(e) for e in elec_ids]
output['elec_ids'] = elec_ids
num_elecs = len(output['elec_ids'])
# Determine extended header indices and idx for Front End vs. Analog Input channels
if self.basic_header['FileSpec'] != '2.1':
for i in range(num_elecs):
idx = next(item for (item, d) in enumerate(self.extended_headers)
if d["ElectrodeID"] == output['elec_ids'][i])
output['ExtendedHeaderIndices'].append(idx)
if self.extended_headers[idx]['PhysicalConnector'] < 5: front_end_idxs.append(i)
else: analog_input_idxs.append(i)
# Determine if front_end_idxs and analog_idxs are contiguous (default = False)
if any(np.diff(np.array(front_end_idxs)) != 1): front_end_idx_cont = False
if any(np.diff(np.array(analog_input_idxs)) != 1): analog_input_idx_cont = False
# Pre-allocate output data based on data packet info (timestamp + num pts) and/or data_time_s
# 1) Determine number of samples in all data packets to set possible number of output pts
# 1a) For file spec > 2.1, get to last data packet quickly to determine total possible output length
# 2) If possible output length is bigger than requested, set output based on requested
if self.basic_header['FileSpec'] == '2.1':
timestamp = TIMESTAMP_NULL_21
num_data_pts = output['data_headers'][0]['NumDataPoints']
else:
while self.datafile.tell() != ospath.getsize(self.datafile.name):
self.datafile.seek(1, 1) # skip header byte value
timestamp = unpack('<I', self.datafile.read(4))[0]
num_data_pts = unpack('<I', self.datafile.read(4))[0]
self.datafile.seek(num_data_pts * self.basic_header['ChannelCount'] * DATA_BYTE_SIZE, 1)
stop_idx_output = ceil(timestamp / self.basic_header['Period']) + num_data_pts
if data_time_s != DATA_TIME_DEF and stop_idx < stop_idx_output: stop_idx_output = stop_idx
total_samps = int(ceil((stop_idx_output - start_idx) / downsample))
if (total_samps * self.basic_header['ChannelCount'] * DATA_BYTE_SIZE) > DATA_PAGING_SIZE:
print("\nOutput data requested is larger than 1 GB, attempting to preallocate output now")
# If data output is bigger than available, let user know this is too big and they must request at least one of:
# subset of electrodes, subset of data, or use savensxsubset to smaller file sizes, otherwise, pre-allocate data
try: output['data'] = np.zeros((total_samps, num_elecs), dtype=np.float32)
except MemoryError as err:
err.args += (" Output data size requested is larger than available memory. Use the parameters\n"
" for getdata(), e.g., 'elec_ids', to request a subset of the data or use\n"
" NsxFile.savesubsetnsx() to create subsets of the main nsx file\n", )
raise
# Reset file position to start of data header #1, loop through all data packets, process header, and add data
self.datafile.seek(self.basic_header['BytesInHeader'], 0)
while not hit_stop:
# Read header, check to make sure the header is valid (ie Header field != 0). There is currently a
# bug with the NSP where pausing creates a 0 sample packet before the next real data packet, these need to
# be skipped, including any tiny packets that have less samples than downsample
if self.basic_header['FileSpec'] != '2.1':
output['data_headers'].append(processheaders(self.datafile, nsx_header_dict['data']))
if output['data_headers'][-1]['Header'] == 0: print('Invalid Header. File may be corrupt')
if output['data_headers'][-1]['NumDataPoints'] < downsample:
self.datafile.seek(self.basic_header['ChannelCount'] * output['data_headers'][-1]['NumDataPoints']
* DATA_BYTE_SIZE, 1)
continue
# Determine sample value for current packet timestamp
timestamp_sample = int(round(output['data_headers'][-1]['Timestamp'] / self.basic_header['Period']))
# For now, we need a patch for file sync which syncs 2 NSP clocks, starting a new data packet which
# may be backwards in time wrt the end of data packet 1. Thus, when this happens, we need to treat
# data packet 2 as if it was 1, and start this process over.
if timestamp_sample < d_ptr:
d_ptr = 0
hit_start = False
output['data_headers'] = []
self.datafile.seek(-9, 1)
continue
# Check to see if stop index is before the first data packet
if len(output['data_headers']) == 1 and (STOP_OFFSET_MIN < stop_idx < timestamp_sample):
print("\nData requested is before any data was saved, which starts at t = {0:.6f} s".format(
output['data_headers'][0]['Timestamp'] / self.basic_header['TimeStampResolution']))
return
# For the first data packet to be read
if not hit_start:
# Check for starting point of data request
start_offset = start_idx - timestamp_sample
# If start_offset is outside of this packet, skip the current packet
# if we've reached the end of file, break, otherwise continue to next packet
if start_offset > output['data_headers'][-1]['NumDataPoints']:
self.datafile.seek(output['data_headers'][-1]['NumDataPoints'] * data_pt_size, 1)
if self.datafile.tell() == ospath.getsize(self.datafile.name): break
else: continue
else:
# If the start_offset is before the current packet, check to ensure that stop_index
# is not also in the paused area, then create padded data for during pause time
if start_offset < 0:
if STOP_OFFSET_MIN < stop_idx < timestamp_sample:
print("\nBecause of pausing, data section requested is during pause period")
return
else:
print("\nFirst data packet requested begins at t = {0:.6f} s, "
"initial section padded with zeros".format(
output['data_headers'][-1]['Timestamp'] / self.basic_header['TimeStampResolution']))
start_offset = START_OFFSET_MIN
d_ptr = (timestamp_sample - start_idx) // downsample
hit_start = True
# for all other packets
else:
# check to see if padded data is needed, including hitting the stop index
if STOP_OFFSET_MIN < stop_idx < timestamp_sample:
print("\nSection padded with zeros due to file pausing")
hit_stop = True; break
elif (timestamp_sample - start_idx) > d_ptr:
print("\nSection padded with zeros due to file pausing")
start_offset = START_OFFSET_MIN
d_ptr = (timestamp_sample - start_idx) // downsample
# Set number of samples to be read based on if start/stop sample is during data packet
if STOP_OFFSET_MIN < stop_idx <= (timestamp_sample + output['data_headers'][-1]['NumDataPoints']):
total_pts = stop_idx - timestamp_sample - start_offset
hit_stop = True
else:
total_pts = output['data_headers'][-1]['NumDataPoints'] - start_offset
# Need current file position because memory map will reset file position
curr_file_pos = self.datafile.tell()
# Determine starting position to read from memory map
file_offset = int(curr_file_pos + start_offset * data_pt_size)
# Extract data no more than 1 GB at a time (or based on DATA_PAGING_SIZE)
# Determine shape of data to map based on file sizing and position, then map it
downsample_data_size = data_pt_size * downsample
max_length = (DATA_PAGING_SIZE // downsample_data_size) * downsample_data_size
num_loops = int(ceil(total_pts * data_pt_size / max_length))
for loop in range(num_loops):
if loop == 0:
if num_loops == 1: num_pts = total_pts
else: num_pts = max_length // data_pt_size
else:
file_offset += max_length
if loop == (num_loops - 1): num_pts = ((total_pts * data_pt_size) % max_length) // data_pt_size
else: num_pts = max_length // data_pt_size
if num_loops != 1: print('Data extraction requires paging: {0} of {1}'.format(loop + 1, num_loops))
num_pts = int(num_pts)
shape = (num_pts, self.basic_header['ChannelCount'])
mm = np.memmap(self.datafile, dtype=np.int16, mode='r', offset=file_offset, shape=shape)
# append data based on downsample slice and elec_ids indexing, then clear memory map
if downsample != 1: mm = mm[::downsample]
if elec_id_indices:
output['data'][d_ptr:d_ptr + mm.shape[0]] = np.array(mm[:, elec_id_indices]).astype(np.float32)
else:
output['data'][d_ptr:d_ptr + mm.shape[0]] = np.array(mm).astype(np.float32)
d_ptr += mm.shape[0]
del mm
# Reset current file position for file position checking and possibly next header
curr_file_pos += self.basic_header['ChannelCount'] * output['data_headers'][-1]['NumDataPoints'] \
* DATA_BYTE_SIZE
self.datafile.seek(curr_file_pos, 0)
if curr_file_pos == ospath.getsize(self.datafile.name): hit_stop = True
# Safety checks for start and stop times
if not hit_stop and start_idx > START_OFFSET_MIN:
raise Exception('Error: End of file found before start_time_s')
elif not hit_stop and stop_idx:
print("\n*** WARNING: End of file found before stop_time_s, returning all data in file")
# Transpose the data so that it has entries based on each electrode, not each sample time
output['data'] = output['data'].transpose()
# All data must be scaled based on scaling factors from extended header
if self.basic_header['FileSpec'] == '2.1': output['data'] *= UV_PER_BIT_21
else:
if front_end_idxs:
if front_end_idx_cont:
output['data'][front_end_idxs[0]:front_end_idxs[-1] + 1] *= \
getdigfactor(self.extended_headers, output['ExtendedHeaderIndices'][front_end_idxs[0]])
else:
for i in front_end_idxs:
output['data'][i] *= getdigfactor(self.extended_headers, output['ExtendedHeaderIndices'][i])
if analog_input_idxs:
if analog_input_idx_cont:
output['data'][analog_input_idxs[0]:analog_input_idxs[-1] + 1] *= \
getdigfactor(self.extended_headers, output['ExtendedHeaderIndices'][analog_input_idxs[0]])
else:
for i in analog_input_idxs:
output['data'][i] *= getdigfactor(self.extended_headers, output['ExtendedHeaderIndices'][i])
# Update parameters based on data extracted
output['samp_per_s'] = float(datafile_samp_per_sec / downsample)
output['data_time_s'] = len(output['data'][0]) / output['samp_per_s']
return output
def savesubsetnsx(self, elec_ids='all', file_size=None, file_time_s=None, file_suffix=''):
"""
This function is used to save a subset of data based on electrode IDs, file sizing, or file data time. If
both file_time_s and file_size are passed, it will default to file_time_s and determine sizing accordingly.
:param elec_ids: [optional] {list} List of elec_ids to extract (e.g., [13])
:param file_size: [optional] {int} Byte size of each subset file to save (e.g., 1024**3 = 1 Gb). If nothing
is passed, file_size will be all data points.
:param file_time_s: [optional] {float} Time length of data for each subset file, in seconds (e.g. 60.0). If
nothing is passed, file_size will be used as default.
:param file_suffix: [optional] {str} Suffix to append to NSx datafile name for subset files. If nothing is
passed, default will be "_subset".
:return: None - None of the electrodes requested exist in the data
SUCCESS - All file subsets extracted and saved
"""
# Initializations
elec_id_indices = []
file_num = 1
pausing = False
datafile_datapt_size = self.basic_header['ChannelCount'] * DATA_BYTE_SIZE
self.datafile.seek(0, 0)
# Run electrode id checks and set num_elecs
elec_ids = check_elecid(elec_ids)
if self.basic_header['FileSpec'] == '2.1': all_elec_ids = self.basic_header['ChannelID']
else: all_elec_ids = [x['ElectrodeID'] for x in self.extended_headers]
if elec_ids == ELEC_ID_DEF:
elec_ids = all_elec_ids
else:
elec_ids = check_dataelecid(elec_ids, all_elec_ids)
if not elec_ids: return None
else: elec_id_indices = [all_elec_ids.index(x) for x in elec_ids]
num_elecs = len(elec_ids)
# If file_size or file_time_s passed, check it and set file_sizing accordingly
if file_time_s:
if file_time_s and file_size:
print("\nWARNING: Only one of file_size or file_time_s can be passed, defaulting to file_time_s.")
file_size = int(num_elecs * DATA_BYTE_SIZE * file_time_s *
self.basic_header['TimeStampResolution'] / self.basic_header['Period'])
if self.basic_header['FileSpec'] == '2.1':
file_size += 32 + 4 * num_elecs
else:
file_size += NSX_BASIC_HEADER_BYTES_22 + NSX_EXT_HEADER_BYTES_22 * num_elecs + 5
print("\nBased on timing request, file size will be {0:d} Mb".format(int(file_size / 1024**2)))
elif file_size:
file_size = check_filesize(file_size)
# Create and open subset file as writable binary, if it already exists ask user for overwrite permission
file_name, file_ext = ospath.splitext(self.datafile.name)
if file_suffix: file_name += '_' + file_suffix
else: file_name += '_subset'
if ospath.isfile(file_name + "_000" + file_ext):
if 'y' != input("\nFile '" + file_name.split('/')[-1] + "_xxx" + file_ext +
"' already exists, overwrite [y/n]: "):
print("\nExiting, no overwrite, returning None"); return None
else:
print("\n*** Overwriting existing subset files ***")
subset_file = open(file_name + "_000" + file_ext, 'wb')
print("\nWriting subset file: " + ospath.split(subset_file.name)[1])
# For file spec 2.1:
# 1) copy the first 28 bytes from the datafile (these are unchanged)
# 2) write subset channel count and channel ID to file
# 3) skip ahead in datafile the number of bytes in datafile ChannelCount(4) plus ChannelID (4*ChannelCount)
if self.basic_header['FileSpec'] == '2.1':
subset_file.write(self.datafile.read(28))
subset_file.write(np.array(num_elecs).astype(np.uint32).tobytes())
subset_file.write(np.array(elec_ids).astype(np.uint32).tobytes())
self.datafile.seek(4 + 4 * self.basic_header['ChannelCount'], 1)
# For file spec 2.2 and above
# 1) copy the first 10 bytes from the datafile (unchanged)
# 2) write subset bytes-in-headers and skip 4 bytes in datafile, noting position of this for update later
# 3) copy the next 296 bytes from datafile (unchanged)
# 4) write subset channel-count value and skip 4 bytes in datafile
# 5) append extended headers based on the channel ID. Must read the first 4 bytes, determine if correct
# Channel ID, repack first 4 bytes, write to disk, then copy remaining 62 (66-4) bytes
else:
subset_file.write(self.datafile.read(10))
bytes_in_headers = NSX_BASIC_HEADER_BYTES_22 + NSX_EXT_HEADER_BYTES_22 * num_elecs
num_pts_header_pos = bytes_in_headers + 5
subset_file.write(np.array(bytes_in_headers).astype(np.uint32).tobytes())
self.datafile.seek(4, 1)
subset_file.write(self.datafile.read(296))
subset_file.write(np.array(num_elecs).astype(np.uint32).tobytes())
self.datafile.seek(4, 1)
for i in range(len(self.extended_headers)):
h_type = self.datafile.read(2)
chan_id = self.datafile.read(2)
if unpack('<H', chan_id)[0] in elec_ids:
subset_file.write(h_type)
subset_file.write(chan_id)
subset_file.write(self.datafile.read(62))
else:
self.datafile.seek(62, 1)
# For all file types, loop through all data packets, extracting data based on page sizing
while self.datafile.tell() != ospath.getsize(self.datafile.name):
# pull and set data packet header info
if self.basic_header['FileSpec'] == '2.1':
packet_pts = (ospath.getsize(self.datafile.name) - self.datafile.tell()) \
/ (DATA_BYTE_SIZE * self.basic_header['ChannelCount'])
else:
header_binary = self.datafile.read(1)
timestamp_binary = self.datafile.read(4)
packet_pts_binary = self.datafile.read(4)
packet_pts = unpack('<I', packet_pts_binary)[0]
if packet_pts == 0: continue
subset_file.write(header_binary)
subset_file.write(timestamp_binary)
subset_file.write(packet_pts_binary)
# get current file position and set loop parameters
datafile_pos = self.datafile.tell()
file_offset = datafile_pos
mm_length = (DATA_PAGING_SIZE // datafile_datapt_size) * datafile_datapt_size
num_loops = int(ceil(packet_pts * datafile_datapt_size / mm_length))
packet_read_pts = 0
subset_file_pkt_pts = 0
# Determine shape of data to map based on file sizing and position, map it, then append to file
for loop in range(num_loops):
if loop == 0:
if num_loops == 1: num_pts = packet_pts
else: num_pts = mm_length // datafile_datapt_size
else:
file_offset += mm_length
if loop == (num_loops - 1):
num_pts = ((packet_pts * datafile_datapt_size) % mm_length) // datafile_datapt_size
else:
num_pts = mm_length // datafile_datapt_size
shape = (int(num_pts), self.basic_header['ChannelCount'])
mm = np.memmap(self.datafile, dtype=np.int16, mode='r', offset=file_offset, shape=shape)
if elec_id_indices: mm = mm[:, elec_id_indices]
start_idx = 0
# Determine if we need to start an additional file
if file_size and (file_size - subset_file.tell()) < DATA_PAGING_SIZE:
# number of points we can possibly write to current subset file
pts_can_add = int((file_size - subset_file.tell()) // (num_elecs * DATA_BYTE_SIZE)) + 1
stop_idx = start_idx + pts_can_add
# If the pts remaining are less than exist in the data, we'll need an additional subset file
while pts_can_add < num_pts:
# Write pts to disk, set old file name, update pts in packet, and close last subset file
if elec_id_indices: subset_file.write(np.array(mm[start_idx:stop_idx]).tobytes())
else: subset_file.write(mm[start_idx:stop_idx])
prior_file_name = subset_file.name
prior_file_pkt_pts = subset_file_pkt_pts + pts_can_add
subset_file.close()
# We need to copy header information from last subset file and adjust some headers.
# For file spec 2.1, this is just the basic header.
# For file spec 2.2 and above:
# 1) copy basic and extended headers
# 2) create data packet header with new timestamp and num data points (dummy numpts value)
# 3) overwrite the number of data points in the old file last header packet with true value
prior_file = open(prior_file_name, 'rb+')
if file_num < 10: numstr = "_00" + str(file_num)
elif 10 <= file_num < 100: numstr = "_0" + str(file_num)
else: numstr = "_" + str(file_num)
subset_file = open(file_name + numstr + file_ext, 'wb')
print("Writing subset file: " + ospath.split(subset_file.name)[1])
if self.basic_header['FileSpec'] == '2.1':
subset_file.write(prior_file.read(32 + 4 * num_elecs))
else:
subset_file.write(prior_file.read(bytes_in_headers))
subset_file.write(header_binary)
timestamp_new = unpack('<I', timestamp_binary)[0] \
+ (packet_read_pts + pts_can_add) * self.basic_header['Period']
subset_file.write(np.array(timestamp_new).astype(np.uint32).tobytes())
subset_file.write(np.array(num_pts - pts_can_add).astype(np.uint32).tobytes())
prior_file.seek(num_pts_header_pos, 0)
prior_file.write(np.array(prior_file_pkt_pts).astype(np.uint32).tobytes())
num_pts_header_pos = bytes_in_headers + 5
# Close old file and update parameters
prior_file.close()
packet_read_pts += pts_can_add
start_idx += pts_can_add
num_pts -= pts_can_add
file_num += 1
subset_file_pkt_pts = 0
pausing = False
pts_can_add = int((file_size - subset_file.tell()) // (num_elecs * DATA_BYTE_SIZE)) + 1
stop_idx = start_idx + pts_can_add
# If no additional file needed, write remaining data to disk, update parameters, and clear memory map
if elec_id_indices: subset_file.write(np.array(mm[start_idx:]).tobytes())
else: subset_file.write(mm[start_idx:])
packet_read_pts += num_pts
subset_file_pkt_pts += num_pts
del mm
# Update num_pts header position for each packet, while saving last packet num_pts_header_pos for later
if self.basic_header['FileSpec'] != '2.1':
curr_hdr_num_pts_pos = num_pts_header_pos
num_pts_header_pos += 4 + subset_file_pkt_pts * num_elecs * DATA_BYTE_SIZE + 5
# Because memory map resets the file position, reset position in datafile
datafile_pos += self.basic_header['ChannelCount'] * packet_pts * DATA_BYTE_SIZE
self.datafile.seek(datafile_pos, 0)
# If using file_timing and there is pausing in data (multiple packets), let user know
if file_time_s and not pausing and (self.datafile.tell() != ospath.getsize(self.datafile.name)):
pausing = True
print("\n*** Because of pausing in original datafile, this file may be slightly time shorter\n"
" than others, and will contain multiple data packets offset in time\n")
# Update last data header packet num data points accordingly (spec != 2.1)
if self.basic_header['FileSpec'] != '2.1':
subset_file_pos = subset_file.tell()
subset_file.seek(curr_hdr_num_pts_pos, 0)
subset_file.write(np.array(subset_file_pkt_pts).astype(np.uint32).tobytes())
subset_file.seek(subset_file_pos, 0)
# Close subset file and return success
subset_file.close()
print("\n *** All subset files written to disk and closed ***")
return "SUCCESS"
def close(self):
name = self.datafile.name
self.datafile.close()
print('\n' + name.split('/')[-1] + ' closed')
|
[
"guillaume.doucet2@mail.mcgill.ca"
] |
guillaume.doucet2@mail.mcgill.ca
|
7f63c9542d741b1f878c60b4bf2129b95b9de527
|
2084aacbcc299561ff7b76fff02a0710786c6052
|
/B4162/Kozlov/Homework 2018-12-01.py
|
097c778eefb31dff47e3e0c1d267478d8b0df8cc
|
[] |
no_license
|
kuperchal/python-course-2018
|
8d5c2c5005b859a31417e22066077bc3aadb0809
|
eec0af6892190abe2244424fc05fa679154938bc
|
refs/heads/master
| 2020-03-29T10:20:02.175730
| 2019-01-27T11:53:07
| 2019-01-27T11:53:07
| 149,799,897
| 0
| 3
| null | 2019-01-27T11:53:25
| 2018-09-21T18:03:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,955
|
py
|
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
x = np.arange(-10, 10, 0.1) #Задаём диапазон значений для ох для разных графиков
x1 = np.arange(-1.56,1.57,0.05)
x2 = np.arange(0,3.15, 0.1)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10,7), dpi=85, facecolor='white',
frameon=True, edgecolor='lightblue', linewidth=4) #Создаём графики
fig.subplots_adjust(wspace=0.4, hspace=0.5, left=0.1, right=0.95, top=0.95, bottom=0.1) #Задаём отступы между графиками, сверху, снизу, по бокам
#First graph
axes[0,0].plot(x, np.sin(x), color='red')
axes[0,0].grid(True, c='lightblue', alpha=0.5)
axes[0, 0].set_title('sin(x)', fontsize=10)
axes[0,0].set_xlabel('x', fontsize=8)
axes[0,0].set_ylabel('y=sin(x)', fontsize=8)
axes[0,0].annotate('local max', xy=(1.57, 1), xytext=(3.5, 0.5), arrowprops=dict(facecolor='black', shrink=0.01))
axes[0,0].annotate('local min', xy=(-1.57, -1), xytext=(1, -0.5), arrowprops=dict(facecolor='black', shrink=0.01))
#Second graph
axes[0,1].plot(x, np.cos(x), color='green')
axes[0,1].grid(True, c='lightblue', alpha=0.5)
axes[0,1].set_title('cos(x)', fontsize=10)
axes[0,1].set_xlabel('x', fontsize=8)
axes[0,1].set_ylabel('y=cos(x)', fontsize=8)
axes[0,1].annotate('local max', xy=(0, 1), xytext=(2, 0.5), arrowprops=dict(facecolor='black', shrink=0.01))
axes[0,1].annotate('local min', xy=(-3.14, -1), xytext=(-1.5, -0.5), arrowprops=dict(facecolor='black', shrink=0.01))
#Third graph
axes[1,0].plot(x1, np.tan(x1), color='green')
axes[1,0].set_title('tg(x)', fontsize=10)
axes[1,0].set_xlabel('x', fontsize=8)
axes[1,0].set_ylabel('y=tg(x)', fontsize=8)
#Fourth graph
axes[1,1].plot(x2, 1/np.tan(x2), color='yellow')
axes[1,1].set_title('ctg(x)', fontsize=10)
axes[1,1].set_xlabel('x', fontsize=8)
axes[1,1].set_ylabel('y=ctg(x)', fontsize=8)
plt.show()
|
[
"noreply@github.com"
] |
kuperchal.noreply@github.com
|
56b60992b52817b496456c67420676c2dbb9af53
|
980852f3fe17b3f40ec70d45e1d4f64bb0270230
|
/gginfo/spiders/daqing.py
|
d327d0b1270864c5de7cdea55ff29c35b56150c6
|
[] |
no_license
|
logonmy/Data-Crawler
|
332bb1aa6f1abecd8249c48585f22a6cc44a4f17
|
6d862f0617356accde5260766ff0251336157529
|
refs/heads/master
| 2022-09-01T00:33:29.500311
| 2020-05-27T16:03:54
| 2020-05-27T16:03:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,934
|
py
|
# -*- coding: utf-8 -*-
import scrapy
import bs4
import re
from gginfo import items
class DaqingSpider(scrapy.Spider):
name = 'daqing'
# allowed_domains = ['http://www.dqsbwg.com/view.asp?id=']
# start_urls = ['http://http://www.dqsbwg.com/view.asp?id=/']
def start_requests(self):
start_page = 50
end_page = 110
base_url = "http://www.dqsbwg.com/view.asp?id="
count = 0
for i in range(start_page, end_page):
url = base_url + str(i)
count += 1
yield scrapy.Request(url=url, callback=self.parse, meta={'id': count})
def parse(self, response):
if (response.status == '404'):
return
Items = items.GginfoItem()
Items['id'] = 37
name = response.css('body > table:nth-child(3) > tr > td > table > tr > td:nth-child(2) > table > tr:nth-child(2) > td > div > table > tr > td > table > tr > td > table:nth-child(1) > tr:nth-child(2) > td > font > b::text').extract()
if (len(name) == 0):
return
Items['name'] = str(name[0]).strip()
pic = response.css('body > table:nth-child(3) > tr > td > table > tr > td:nth-child(2) > table > tr:nth-child(2) > td > div > table > tr > td > table > tr > td > table:nth-child(1) > tr:nth-child(3) > td > div > p > img::attr(src)').extract()
if (len(pic) == 0):
return
base_url = "https://www.wmhg.com.cn"
url = base_url + str(pic[0]).strip()
Items['pic'] = url
# text = response.css('body > div.x-container > div > div.section1 > div > div.slick-cont > div *::text').extract()
# if (len(text) == 0):
Items['text'] = ""
# else:
# s = ""
# for item in text:
# s += str(str(item).strip()).replace('\xa0', '')
# Items['text'] = s
# if Items['text'] == "":
# return
yield Items
|
[
"1012668100@qq.comgit commit -m Collectionexitgit config --global user.name x-coder-Lgit config --global user.email 1012668100@qq.com"
] |
1012668100@qq.comgit commit -m Collectionexitgit config --global user.name x-coder-Lgit config --global user.email 1012668100@qq.com
|
bd006143c55c546974bbb237ad7d8a80d54be85f
|
850370ad34b40bc3332e8954a79798568331a4a4
|
/Number Guess.py
|
182cf760d7a1f763174ba413eb962d26ea6041de
|
[] |
no_license
|
romil-rc/Game-of-Guess
|
d91e323002a77ca342f5a755d06662679c4d2a3d
|
d3faa7166e918ad21a801f8e8b641dcbcccb8767
|
refs/heads/master
| 2023-03-05T10:51:08.910733
| 2021-02-16T13:12:00
| 2021-02-16T13:12:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 887
|
py
|
# Set a key number.
# Set number of guesses.
# Take a number input from user.
# Print, is the number smaller or greater than the key.
# Print, number of guesses left.
# If number of guesses is over print "game over".
key = 63
chance = 0
guess = 10
print("Game of Guess")
while(chance < guess):
num = int(input("Enter a number : "))
if (num>key):
chance = chance + 1
if(chance < guess):
print("Enter smaller number.")
print("Chances left : ", guess - chance)
else:
print("Game over")
continue
elif (num<key):
chance = chance + 1
if(chance < guess):
print("Enter greater number.")
print("Chances left : ", guess - chance)
else:
print("Game over.")
continue
else:
print("You won the game.")
break
chance = chance + 1
|
[
"itssaahil208@gmail.com"
] |
itssaahil208@gmail.com
|
22d62da0dd7376888e5e477d9481a578a6638fb8
|
eaeb5e71b0b5b03e4cb8d60ca206160a7bdc3488
|
/Inventory/store_app/urls.py
|
6a5d5548d308aa7dff06c734890b2b3e8195a728
|
[] |
no_license
|
aonchicken/Dev
|
a15d79c8399fb63eb376b57c4bb2f49a73a250d6
|
6fe3510d1fe1b877376afa21d082f1230c025b35
|
refs/heads/master
| 2022-10-22T02:03:06.641148
| 2018-07-19T10:02:49
| 2018-07-19T10:02:49
| 141,110,191
| 0
| 1
| null | 2022-10-10T16:12:27
| 2018-07-16T08:32:44
|
Python
|
UTF-8
|
Python
| false
| false
| 3,246
|
py
|
#!python
# log/urls.py
from django.conf.urls import url
from .import views
#2pdf
#from store_app.views import GeneratePdf
#from django_pdfkit import PDFView
#log in
from django.contrib.auth.decorators import login_required
#from easy_pdf.views import PDFTemplateView
#from .views import PDF_View
from django.template import Context, loader
#from .views import PDFTemplateView
#from .models import Product
#from wkhtmltopdf.views import PDFTemplateView
#from django_pdfkit.views import PDFView
#product = Product.objects.get(id=1)
'''context = {
'amount': 39.99,
'customer_name': 'Cooper Mann',
'order_id': 'เอกสารใบส่งมอบสินค้า',
'name': 'TTTTT',#product.device_name,
}'''
#context = Context({'amount': 39.99,})
context = {
'date' : '24 พฤษภาคม 2561',
'customer_tel' : '0859078578',
'customer_address' : 'Bankok',
'no': 1,
'contract_no': 'สอ.2/2561',
'customer_name': 'สมมติ ขึ้นมา',
'detail_pd': 'Router 892w ',
'staff_name': 'รัฐกานต์ บันที',
'count_pd' : 1,
'key_pd': '-',
'serial_pd' : 84003446789236,
'ref' : '-',
'note' : '1กล่อง',
'range': range(1,15+1),
}
# We are adding a URL called /home
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^product/(?P<id>[0-9]+)/', views.detail, name='detail'),#localhost:8000/product/10/
url(r'^addnew/', views.addnew, name="addnew"),
url(r'^edit/(?P<id>[0-9]+)/', views.edit, name="edit"),
url(r'^document/', views.document, name="document"),
url(r'^doc_pdf/', views.doc_pdf, name="doc_pdf"),
url(r'^my-pdf/', views.doc_pdf, name="my-pdf"),
url(r'^display/', views.display, name="display"),
#url(r'^pdf/', PDFTemplateView.as_view(filename='doc_pdf.pdf',template_name='doc_pdf.html'),name='pdf'),4
url(r'^pdf/$', views.PDFTemplateView.as_view( cmd_options = {
'page-size': 'A4',
'margin-top': 80,
'margin-bottom': 90,
#'footer-right': 'Page [page] of [toPage]',
#'margin-right' => 5,
#'margin-left' => 5,
#'orientation' => 'Landscape',
#'footer-center' => 'Page [page] of [toPage]',
#'footer-font-size' => 8,
#'footer-left' => 'Confidential'
},template_name='content.html',header_template='header.html',footer_template='footer.html',show_content_in_browser = True), name='pdf'),
url(r'^mail/', views.mail,name="mail"),
#url(r'^pdf/$', PDFTemplateView.as_view(template_name='content.html',show_content_in_browser = True), name='pdf'),
url(r'^pdf-inline/', views.PDFView.as_view(inline=True, template_name='doc_pdf.html'), name='pdf-inline'),
#url(r'^pdf-filename/', PDFView.as_view(filename='foo.pdf', template_name='doc_pdf.html'), name='pdf-filename'),
url(r'^upload2csv/', views.upload, name='uplink'),
url(r'^import/', views.import_data, name="import"),
url(r'^handson_view/', views.handson_table, name="handson_view"),
]
|
[
"aonchicken@gmail.com"
] |
aonchicken@gmail.com
|
da88233a14db93263b4ee9e6ad8f520f0e6afa9c
|
b52e27c68a2a2b03c39c0698fde0acbd988f52a5
|
/test4.py
|
9e70c57fa08eabd6f98a82ee81bf496991f2f25c
|
[] |
no_license
|
nmcnwr/python_test
|
fae0b3e6ee981abce5a43ae26fe590f4083aa6e9
|
52ac4522c7d6d022254aeb348fdf82c4ea302dc9
|
refs/heads/master
| 2021-07-23T17:09:17.116262
| 2020-05-28T08:17:04
| 2020-05-28T08:17:04
| 178,802,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
filename ="FILE1.txt"
print("Filename: ",filename)
file = open(filename, "r")
for line in file:
line = line.replace("\n", "")
print(line)
file.close()
|
[
"pavel.b.kuznetsov@gmail.com"
] |
pavel.b.kuznetsov@gmail.com
|
0609c654e9cd175e77f91139ee849e861fc9e1e0
|
2f831410a345fe44b385eb4453a3777150db3eea
|
/retriever/dense_retriever.py
|
84bf96056114fb8411e26e5e5b79b5173ef087e4
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
michaelmccracken90/Quin
|
89a2b5e7796440cfe72fb0af01b4963674b469c2
|
461af08123757840fcc6cbbd08e7c2862a28ebbd
|
refs/heads/master
| 2023-02-17T12:45:55.230655
| 2021-01-10T11:43:40
| 2021-01-10T11:43:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,614
|
py
|
import logging
import pickle
import torch
from .vector_index import VectorIndex
class DenseRetriever:
def __init__(self, model, batch_size=16):
self.model = model
self.vector_index = VectorIndex(768)
self.batch_size = batch_size
self.use_gpu = torch.cuda.is_available()
def create_index_from_documents(self, documents):
logging.info('Building index...')
self.vector_index.vectors = self.model.encode(documents, batch_size=self.batch_size)
self.vector_index.build(self.use_gpu)
logging.info('Built index')
def create_index_from_vectors(self, vectors_path):
logging.info('Building index...')
self.vector_index.vectors = pickle.load(open(vectors_path, 'rb'))
self.vector_index.build(self.use_gpu)
logging.info('Built index')
def search(self, queries, limit=1000, probes=512, min_similarity=0):
query_vectors = self.model.encode(queries, batch_size=self.batch_size)
ids, similarities = self.vector_index.search(query_vectors, k=limit, probes=probes)
results = []
for j in range(len(ids)):
results.append([
(ids[j][i], similarities[j][i]) for i in range(len(ids[j])) if similarities[j][i] > min_similarity
])
return results
def load_index(self, path):
self.vector_index.load(path)
def save_index(self, index_path='', vectors_path=''):
if vectors_path != '':
self.vector_index.save_vectors(vectors_path)
if index_path != '':
self.vector_index.save(index_path)
|
[
"algoprog@users.noreply.github.com"
] |
algoprog@users.noreply.github.com
|
3bce62061ed8d284331b3570797e87de8c2eefc3
|
503e97b5c0bb77e923fe135ff14a8b5ca5e6ba07
|
/mxshop/mxshop/urls.py
|
ff2b7608058cef6c3f850164773fe310905e259e
|
[] |
no_license
|
pshyms/dianshang-1
|
72345de3ce769efeb2b17c975b586590524dcdbe
|
788b7950f52cb7979a8b73e5d9193243f2e69cad
|
refs/heads/master
| 2021-05-21T10:11:38.248170
| 2020-04-03T06:25:13
| 2020-04-03T06:25:13
| 252,649,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
"""mxshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"1816635208@qq.com"
] |
1816635208@qq.com
|
04d569ac26879a082158c2acffd50077389bf00c
|
ae323b6e8407576249c4f1300c2261f63ee57610
|
/bootstrap.py
|
57adc1466694c5178b4fdc1782e6600609583191
|
[
"MIT"
] |
permissive
|
silx-kit/dynamix
|
b3e22ccaebeef35c97604a2188e8790e55241ce2
|
445a85b331278097a0c997dfecd73c39dc8f1afd
|
refs/heads/master
| 2023-08-08T03:23:01.963283
| 2021-09-13T15:14:37
| 2021-09-13T15:14:37
| 191,756,780
| 3
| 7
|
MIT
| 2023-06-29T13:53:58
| 2019-06-13T12:15:12
|
Python
|
UTF-8
|
Python
| false
| false
| 9,058
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bootstrap helps you to test scripts without installing them
by patching your PYTHONPATH on the fly
example: ./bootstrap.py ipython
"""
__authors__ = ["Frédéric-Emmanuel Picca", "Jérôme Kieffer"]
__contact__ = "jerome.kieffer@esrf.eu"
__license__ = "MIT"
__date__ = "05/09/2019"
import sys
import os
import distutils.util
import subprocess
import logging
import collections
from argparse import ArgumentParser
logging.basicConfig()
logger = logging.getLogger("bootstrap")
def is_debug_python():
"""Returns true if the Python interpreter is in debug mode."""
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
if sysconfig.get_config_var("Py_DEBUG"):
return True
return hasattr(sys, "gettotalrefcount")
def _distutils_dir_name(dname="lib"):
"""
Returns the name of a distutils build directory
"""
platform = distutils.util.get_platform()
architecture = "%s.%s-%i.%i" % (dname, platform,
sys.version_info[0], sys.version_info[1])
if is_debug_python():
architecture += "-pydebug"
return architecture
def _distutils_scripts_name():
"""Return the name of the distrutils scripts sirectory"""
f = "scripts-{version[0]}.{version[1]}"
return f.format(version=sys.version_info)
def _get_available_scripts(path):
res = []
try:
res = " ".join([s.rstrip('.py') for s in os.listdir(path)])
except OSError:
res = ["no script available, did you ran "
"'python setup.py build' before bootstrapping ?"]
return res
if sys.version_info[0] >= 3: # Python3
def execfile(fullpath, globals=None, locals=None):
"Python3 implementation for execfile"
with open(fullpath) as f:
try:
data = f.read()
except UnicodeDecodeError:
raise SyntaxError("Not a Python script")
code = compile(data, fullpath, 'exec')
exec(code, globals, locals)
def run_file(filename, argv):
"""
Execute a script trying first to use execfile, then a subprocess
:param str filename: Script to execute
:param list[str] argv: Arguments passed to the filename
"""
full_args = [filename]
full_args.extend(argv)
try:
logger.info("Execute target using exec")
# execfile is considered as a local call.
# Providing globals() as locals will force to feed the file into
# globals() (for examples imports).
# Without this any function call from the executed file loses imports
try:
old_argv = sys.argv
sys.argv = full_args
logger.info("Patch the sys.argv: %s", sys.argv)
logger.info("Executing %s.main()", filename)
print("########### EXECFILE ###########")
module_globals = globals().copy()
module_globals['__file__'] = filename
execfile(filename, module_globals, module_globals)
finally:
sys.argv = old_argv
except SyntaxError as error:
logger.error(error)
logger.info("Execute target using subprocess")
env = os.environ.copy()
env.update({"PYTHONPATH": LIBPATH + os.pathsep + os.environ.get("PYTHONPATH", ""),
"PATH": os.environ.get("PATH", "")})
print("########### SUBPROCESS ###########")
run = subprocess.Popen(full_args, shell=False, env=env)
run.wait()
def run_entry_point(entry_point, argv):
"""
Execute an entry_point using the current python context
(http://setuptools.readthedocs.io/en/latest/setuptools.html#automatic-script-creation)
:param str entry_point: A string identifying a function from a module
(NAME = PACKAGE.MODULE:FUNCTION [EXTRA])
"""
import importlib
elements = entry_point.split("=")
target_name = elements[0].strip()
elements = elements[1].split(":")
module_name = elements[0].strip()
# Take care of entry_point optional "extra" requirements declaration
function_name = elements[1].split()[0].strip()
logger.info("Execute target %s (function %s from module %s) using importlib", target_name, function_name, module_name)
full_args = [target_name]
full_args.extend(argv)
try:
old_argv = sys.argv
sys.argv = full_args
print("########### IMPORTLIB ###########")
module = importlib.import_module(module_name)
if hasattr(module, function_name):
func = getattr(module, function_name)
func()
else:
logger.info("Function %s not found", function_name)
finally:
sys.argv = old_argv
def find_executable(target):
"""Find a filename from a script name.
- Check the script name as file path,
- Then checks if the name is a target of the setup.py
- Then search the script from the PATH environment variable.
:param str target: Name of the script
:returns: Returns a tuple: kind, name.
"""
if os.path.isfile(target):
return ("path", os.path.abspath(target))
# search the file from setup.py
import setup
config = setup.get_project_configuration(dry_run=True)
# scripts from project configuration
if "scripts" in config:
for script_name in config["scripts"]:
if os.path.basename(script_name) == target:
return ("path", os.path.abspath(script_name))
# entry-points from project configuration
if "entry_points" in config:
for kind in config["entry_points"]:
for entry_point in config["entry_points"][kind]:
elements = entry_point.split("=")
name = elements[0].strip()
if name == target:
return ("entry_point", entry_point)
# search the file from env PATH
for dirname in os.environ.get("PATH", "").split(os.pathsep):
path = os.path.join(dirname, target)
if os.path.isfile(path):
return ("path", path)
return None, None
def main(argv):
parser = ArgumentParser(prog="bootstrap", usage="./bootstrap.py <script>",
description=__doc__)
parser.add_argument("script", nargs="*")
parser.add_argument("-m", help="run library module as a script (terminates option list)")
Options = collections.namedtuple("Options", ["script", "module"])
if len(argv) == 1:
options = Options(script=None, module=None)
else:
if argv[1] in ["-h", "--help"]:
parser.print_help()
return
if argv[1] == "-m":
if len(argv) < 3:
parser.parse_args(argv[1:])
return
options = Options(script=None, module=argv[2:])
else:
options = Options(script=argv[1:], module=None)
if options.script is not None:
logger.info("Executing %s from source checkout", options.script)
script = options.script[0]
argv = options.script[1:]
kind, target = find_executable(script)
if kind == "path":
run_file(target, argv)
elif kind == "entry_point":
run_entry_point(target, argv)
else:
logger.error("Script %s not found", options.script)
elif options.module is not None:
logging.info("Running module %s", options.module)
import runpy
module = options.module[0]
try:
old = sys.argv
sys.argv = [None] + options.module[1:]
runpy.run_module(module, run_name="__main__", alter_sys=True)
finally:
sys.argv = old
else:
logging.info("Running IPython by default")
logger.info("Patch the sys.argv: %s", sys.argv)
sys.path.insert(2, "")
try:
from IPython import start_ipython
except Exception as err:
logger.error("Unable to execute iPython, using normal Python")
logger.error(err)
import code
code.interact()
else:
start_ipython(argv=[])
if __name__ == "__main__":
home = os.path.dirname(os.path.abspath(__file__))
LIBPATH = os.path.join(home, 'build', _distutils_dir_name('lib'))
cwd = os.getcwd()
os.chdir(home)
build = subprocess.Popen([sys.executable, "setup.py", "build"],
shell=False, cwd=os.path.dirname(os.path.abspath(__file__)))
build_rc = build.wait()
if not os.path.exists(LIBPATH):
logger.warning("`lib` directory does not exist, trying common Python3 lib")
LIBPATH = os.path.join(os.path.split(LIBPATH)[0], "lib")
os.chdir(cwd)
if build_rc == 0:
logger.info("Build process ended.")
else:
logger.error("Build process ended with rc=%s", build_rc)
sys.exit(-1)
sys.path.insert(0, LIBPATH)
logger.info("Patched sys.path with %s", LIBPATH)
main(sys.argv)
|
[
"jerome.kieffer@esrf.fr"
] |
jerome.kieffer@esrf.fr
|
01acfe330f914e60e16edd354e09a95e5f455717
|
c887464a73c249f3a6bc4e344c001724a46d2bd2
|
/web_app/views/misc.py
|
cd26cb069637e94c4b08058e2edc18da001b3a39
|
[
"Beerware"
] |
permissive
|
erikdeluca/beer-analytics
|
004017229fa32574bbf58d703023c455d9a57e69
|
630cfb1dcd409a1b449a54a99aa9b3f73da0f756
|
refs/heads/main
| 2023-04-26T20:18:22.794454
| 2021-03-06T18:45:17
| 2021-03-06T18:45:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,834
|
py
|
from django.http import HttpResponse, HttpRequest
from django.shortcuts import render
from django.urls import reverse
from recipe_db.models import Recipe, Style, Hop, Fermentable, Yeast
from web_app.charts.fermentable import FermentableChartFactory
from web_app.charts.hop import HopChartFactory
from web_app.charts.style import StyleChartFactory
from web_app.charts.yeast import YeastChartFactory
from web_app.meta import PageMeta, HomeMeta
def home(request: HttpRequest) -> HttpResponse:
recipes = Recipe.objects.count()
meta = HomeMeta().get_meta()
return render(request, 'index.html', {'recipes': recipes, 'meta': meta})
def legal(request: HttpRequest) -> HttpResponse:
meta = PageMeta.create('Legal', 'Legal information about Beer Analytics', url=reverse('legal'))
meta.extra_props = {'robots': 'noindex'}
return render(request, 'legal.html', {'meta': meta})
def about(request: HttpRequest) -> HttpResponse:
recipes = Recipe.objects.count()
meta = PageMeta.create('About', url=reverse('about'))
return render(request, 'about.html', {'recipes': recipes, 'meta': meta})
def sitemap(request: HttpRequest) -> HttpResponse:
styles = Style.objects.filter(recipes_count__gt=0)
hops = Hop.objects.filter(recipes_count__gt=0)
fermentables = Fermentable.objects.filter(recipes_count__gt=0)
yeasts = Yeast.objects.filter(recipes_count__gt=0)
return render(request, 'sitemap.xml', {
'styles': styles,
'hops': hops,
'fermentables': fermentables,
'yeasts': yeasts,
'style_chart_types': StyleChartFactory.get_types(),
'hop_chart_types': HopChartFactory.get_types(),
'fermentable_chart_types': FermentableChartFactory.get_types(),
'yeast_chart_types': YeastChartFactory.get_types(),
}, content_type='text/xml')
|
[
"privat@stylemotion.de"
] |
privat@stylemotion.de
|
3c2c8682ab6ffc62a2a322782250bbe1881dc6b3
|
5fc9cad39efe2eb4020d604540d3adc38a89114a
|
/Restanta/NLP/Lab4/venv/Scripts/easy_install-3.6-script.py
|
b3ec84eafe341f92b593dab5a84d306ca45ff789
|
[] |
no_license
|
daneel95/Master_Homework
|
bf16db69366fe09140c5cdf71c5e98c875611d79
|
7987341c78a6572342b6823ff610d9222d8c3b62
|
refs/heads/master
| 2022-12-27T04:38:18.358573
| 2020-10-08T12:23:20
| 2020-10-08T12:23:20
| 156,966,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
#!C:\Users\Daniel\Desktop\Master_Homework\Restanta\NLP\Lab4\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"holteiu.daniel@gmail.com"
] |
holteiu.daniel@gmail.com
|
858ac55278eb1e921054886e184330cd3adb1dcc
|
a454671bb5df86f99496a20ad0dd0648617cd2cb
|
/orders/migrations/0001_initial.py
|
cb04c20c429c6aacaed6b91bf73ea6e9e01433e4
|
[] |
no_license
|
ppcs50/project3
|
7a86d6d34682fc29a887b94dae975feb0f1a91ae
|
27909fa6b48325c026e7913b604e1333469bdc5e
|
refs/heads/master
| 2020-03-25T03:40:38.637545
| 2018-08-02T23:20:04
| 2018-08-02T23:20:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,436
|
py
|
# Generated by Django 2.0.7 on 2018-07-29 21:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(auto_now_add=True, verbose_name='Date')),
('time', models.TimeField(auto_now_add=True, verbose_name='Time')),
],
),
migrations.CreateModel(
name='Dinplat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
],
),
migrations.CreateModel(
name='Pizza',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
],
),
migrations.CreateModel(
name='SaladPasta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
],
),
migrations.CreateModel(
name='Size',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2)),
],
),
migrations.CreateModel(
name='Style',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Sub',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('size', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sub_size', to='orders.Size')),
],
),
migrations.CreateModel(
name='Topping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Type',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2)),
],
),
migrations.AddField(
model_name='sub',
name='topping',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sub_topping', to='orders.Topping'),
),
migrations.AddField(
model_name='pizza',
name='size',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pizza_size', to='orders.Size'),
),
migrations.AddField(
model_name='pizza',
name='style',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pizza_style', to='orders.Style'),
),
migrations.AddField(
model_name='pizza',
name='topping',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pizza_topping', to='orders.Topping'),
),
migrations.AddField(
model_name='pizza',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pizza_type', to='orders.Type'),
),
migrations.AddField(
model_name='dinplat',
name='size',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dinnerplatter_size', to='orders.Size'),
),
migrations.AddField(
model_name='cart',
name='dinplat',
field=models.ManyToManyField(blank=True, related_name='dinnerplatter_order', to='orders.Dinplat'),
),
migrations.AddField(
model_name='cart',
name='pizza',
field=models.ManyToManyField(blank=True, related_name='pizza_order', to='orders.Pizza'),
),
migrations.AddField(
model_name='cart',
name='saladpast',
field=models.ManyToManyField(blank=True, related_name='saladpast_order', to='orders.SaladPasta'),
),
migrations.AddField(
model_name='cart',
name='sub',
field=models.ManyToManyField(blank=True, related_name='sub_order', to='orders.Sub'),
),
]
|
[
"eg60125@meiji.ac.jp"
] |
eg60125@meiji.ac.jp
|
37e328f83c46f0f167e1ed7dec7853f184aed7db
|
376120dc62be6868331e77e6fcd9b3f44c0f247b
|
/main_app/migrations/0001_initial.py
|
fe895754b3dc3ae26ab7bfea39c75bcd366ade53
|
[] |
no_license
|
kevinka58/cardcollector
|
279a57ad9f7731aef9b28545fa9629dba9da62c7
|
d9395dc53c17ac88abf183ec3371a58e6dd40502
|
refs/heads/main
| 2023-07-05T21:33:12.695174
| 2021-08-18T23:31:11
| 2021-08-18T23:31:11
| 395,808,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
# Generated by Django 3.2.6 on 2021-08-16 18:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Card',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
]
|
[
"kevinayala6969@gmail.com"
] |
kevinayala6969@gmail.com
|
1f250ec8288fb8e6ae009a8ed472731dd54824ca
|
466e5e56d2f350bcea90683af67e160138af836c
|
/Onsite/Week-4/Monday/Pancake V.2.py
|
5366d063ab28ea1aa0f95f78a73ddb562d2f9689
|
[] |
no_license
|
max180643/Pre-Programming-61
|
bafbb7ed3069cda5c2e64cf1de590dfb4a542273
|
e68d4a69ffeedd4269fffc64b9b81e845a10da4d
|
refs/heads/master
| 2021-06-17T14:10:44.889814
| 2019-08-01T09:35:42
| 2019-08-01T09:35:42
| 135,553,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 932
|
py
|
"""Pancake V.2"""
def main():
"""Main Function"""
promo_buy = int(input()) # ซื้อถึงแล้วได้แถม
promo_give = int(input()) # จำนวนแถม
want = int(input()) # ต้องการ
price = int(input()) # ราคาต่อชิ้น
pack = promo_buy + promo_give #จำนวนชิ้นในแพ็ค
buypack = want // pack #จำนวนแพ็คที่ต้องซื้อ
other = want - (buypack * pack) # เศษที่เหลือจากการซื้อแพค
if other >= promo_buy: # ถ้าเศษมากกว่าหรือเท่ากับ จำนวนโปรโมชั่น
buypack += 1
other = 0
get = (pack * buypack) + other
pay = ((promo_buy * buypack) + other) * price
# Output
print("Pay: %i" % (pay))
print("Get: %i" % (get))
main()
|
[
"noreply@github.com"
] |
max180643.noreply@github.com
|
621bedf5d75156e92bf5e1ec10226d82735cf03c
|
801268d9ff5b2e74986646c5469017ba98e2368f
|
/examples/information_extraction/DuIE/data_loader.py
|
71554cfc47c9cbef94a5766477c0049f511bcd52
|
[
"Apache-2.0"
] |
permissive
|
Amy-l-iu/PaddleNLP
|
1e3f699b7b2ecc1d7600afad19c721652a5eb18f
|
cbcb958eb561550f38224b5b51cf027dd891f4cc
|
refs/heads/develop
| 2023-05-13T21:34:18.886738
| 2021-06-10T08:49:53
| 2021-06-10T08:49:53
| 375,710,949
| 1
| 0
|
Apache-2.0
| 2021-06-10T13:40:02
| 2021-06-10T13:40:02
| null |
UTF-8
|
Python
| false
| false
| 13,221
|
py
|
# Copyright (c) 2021 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import os
from typing import Optional, List, Union, Dict
from dataclasses import dataclass
import numpy as np
import paddle
from tqdm import tqdm
from paddlenlp.transformers import ErnieTokenizer
from paddlenlp.utils.log import logger
from extract_chinese_and_punct import ChineseAndPunctuationExtractor
InputFeature = collections.namedtuple("InputFeature", [
"input_ids", "seq_len", "tok_to_orig_start_index", "tok_to_orig_end_index",
"labels"
])
def parse_label(spo_list, label_map, tokens, tokenizer):
# 2 tags for each predicate + I tag + O tag
num_labels = 2 * (len(label_map.keys()) - 2) + 2
seq_len = len(tokens)
# initialize tag
labels = [[0] * num_labels for i in range(seq_len)]
# find all entities and tag them with corresponding "B"/"I" labels
for spo in spo_list:
for spo_object in spo['object'].keys():
# assign relation label
if spo['predicate'] in label_map.keys():
# simple relation
label_subject = label_map[spo['predicate']]
label_object = label_subject + 55
subject_tokens = tokenizer._tokenize(spo['subject'])
object_tokens = tokenizer._tokenize(spo['object']['@value'])
else:
# complex relation
label_subject = label_map[spo['predicate'] + '_' + spo_object]
label_object = label_subject + 55
subject_tokens = tokenizer._tokenize(spo['subject'])
object_tokens = tokenizer._tokenize(spo['object'][spo_object])
subject_tokens_len = len(subject_tokens)
object_tokens_len = len(object_tokens)
# assign token label
# there are situations where s entity and o entity might overlap, e.g. xyz established xyz corporation
# to prevent single token from being labeled into two different entity
# we tag the longer entity first, then match the shorter entity within the rest text
forbidden_index = None
if subject_tokens_len > object_tokens_len:
for index in range(seq_len - subject_tokens_len + 1):
if tokens[index:index +
subject_tokens_len] == subject_tokens:
labels[index][label_subject] = 1
for i in range(subject_tokens_len - 1):
labels[index + i + 1][1] = 1
forbidden_index = index
break
for index in range(seq_len - object_tokens_len + 1):
if tokens[index:index + object_tokens_len] == object_tokens:
if forbidden_index is None:
labels[index][label_object] = 1
for i in range(object_tokens_len - 1):
labels[index + i + 1][1] = 1
break
# check if labeled already
elif index < forbidden_index or index >= forbidden_index + len(
subject_tokens):
labels[index][label_object] = 1
for i in range(object_tokens_len - 1):
labels[index + i + 1][1] = 1
break
else:
for index in range(seq_len - object_tokens_len + 1):
if tokens[index:index + object_tokens_len] == object_tokens:
labels[index][label_object] = 1
for i in range(object_tokens_len - 1):
labels[index + i + 1][1] = 1
forbidden_index = index
break
for index in range(seq_len - subject_tokens_len + 1):
if tokens[index:index +
subject_tokens_len] == subject_tokens:
if forbidden_index is None:
labels[index][label_subject] = 1
for i in range(subject_tokens_len - 1):
labels[index + i + 1][1] = 1
break
elif index < forbidden_index or index >= forbidden_index + len(
object_tokens):
labels[index][label_subject] = 1
for i in range(subject_tokens_len - 1):
labels[index + i + 1][1] = 1
break
# if token wasn't assigned as any "B"/"I" tag, give it an "O" tag for outside
for i in range(seq_len):
if labels[i] == [0] * num_labels:
labels[i][0] = 1
return labels
def convert_example_to_feature(
example,
tokenizer: ErnieTokenizer,
chineseandpunctuationextractor: ChineseAndPunctuationExtractor,
label_map,
max_length: Optional[int]=512,
pad_to_max_length: Optional[bool]=None):
spo_list = example['spo_list'] if "spo_list" in example.keys() else None
text_raw = example['text']
sub_text = []
buff = ""
for char in text_raw:
if chineseandpunctuationextractor.is_chinese_or_punct(char):
if buff != "":
sub_text.append(buff)
buff = ""
sub_text.append(char)
else:
buff += char
if buff != "":
sub_text.append(buff)
tok_to_orig_start_index = []
tok_to_orig_end_index = []
orig_to_tok_index = []
tokens = []
text_tmp = ''
for (i, token) in enumerate(sub_text):
orig_to_tok_index.append(len(tokens))
sub_tokens = tokenizer._tokenize(token)
text_tmp += token
for sub_token in sub_tokens:
tok_to_orig_start_index.append(len(text_tmp) - len(token))
tok_to_orig_end_index.append(len(text_tmp) - 1)
tokens.append(sub_token)
if len(tokens) >= max_length - 2:
break
else:
continue
break
seq_len = len(tokens)
# 2 tags for each predicate + I tag + O tag
num_labels = 2 * (len(label_map.keys()) - 2) + 2
# initialize tag
labels = [[0] * num_labels for i in range(seq_len)]
if spo_list is not None:
labels = parse_label(spo_list, label_map, tokens, tokenizer)
# add [CLS] and [SEP] token, they are tagged into "O" for outside
if seq_len > max_length - 2:
tokens = tokens[0:(max_length - 2)]
labels = labels[0:(max_length - 2)]
tok_to_orig_start_index = tok_to_orig_start_index[0:(max_length - 2)]
tok_to_orig_end_index = tok_to_orig_end_index[0:(max_length - 2)]
tokens = ["[CLS]"] + tokens + ["[SEP]"]
# "O" tag for [PAD], [CLS], [SEP] token
outside_label = [[1] + [0] * (num_labels - 1)]
labels = outside_label + labels + outside_label
tok_to_orig_start_index = [-1] + tok_to_orig_start_index + [-1]
tok_to_orig_end_index = [-1] + tok_to_orig_end_index + [-1]
if seq_len < max_length:
tokens = tokens + ["[PAD]"] * (max_length - seq_len - 2)
labels = labels + outside_label * (max_length - len(labels))
tok_to_orig_start_index = tok_to_orig_start_index + [-1] * (
max_length - len(tok_to_orig_start_index))
tok_to_orig_end_index = tok_to_orig_end_index + [-1] * (
max_length - len(tok_to_orig_end_index))
token_ids = tokenizer.convert_tokens_to_ids(tokens)
return InputFeature(
input_ids=np.array(token_ids),
seq_len=np.array(seq_len),
tok_to_orig_start_index=np.array(tok_to_orig_start_index),
tok_to_orig_end_index=np.array(tok_to_orig_end_index),
labels=np.array(labels), )
class DuIEDataset(paddle.io.Dataset):
"""
Dataset of DuIE.
"""
def __init__(
self,
input_ids: List[Union[List[int], np.ndarray]],
seq_lens: List[Union[List[int], np.ndarray]],
tok_to_orig_start_index: List[Union[List[int], np.ndarray]],
tok_to_orig_end_index: List[Union[List[int], np.ndarray]],
labels: List[Union[List[int], np.ndarray, List[str], List[Dict]]]):
super(DuIEDataset, self).__init__()
self.input_ids = input_ids
self.seq_lens = seq_lens
self.tok_to_orig_start_index = tok_to_orig_start_index
self.tok_to_orig_end_index = tok_to_orig_end_index
self.labels = labels
def __len__(self):
if isinstance(self.input_ids, np.ndarray):
return self.input_ids.shape[0]
else:
return len(self.input_ids)
def __getitem__(self, item):
return {
"input_ids": np.array(self.input_ids[item]),
"seq_lens": np.array(self.seq_lens[item]),
"tok_to_orig_start_index":
np.array(self.tok_to_orig_start_index[item]),
"tok_to_orig_end_index": np.array(self.tok_to_orig_end_index[item]),
# If model inputs is generated in `collate_fn`, delete the data type casting.
"labels": np.array(
self.labels[item], dtype=np.float32),
}
@classmethod
def from_file(cls,
file_path: Union[str, os.PathLike],
tokenizer: ErnieTokenizer,
max_length: Optional[int]=512,
pad_to_max_length: Optional[bool]=None):
assert os.path.exists(file_path) and os.path.isfile(
file_path), f"{file_path} dose not exists or is not a file."
label_map_path = os.path.join(
os.path.dirname(file_path), "predicate2id.json")
assert os.path.exists(label_map_path) and os.path.isfile(
label_map_path
), f"{label_map_path} dose not exists or is not a file."
with open(label_map_path, 'r', encoding='utf8') as fp:
label_map = json.load(fp)
chineseandpunctuationextractor = ChineseAndPunctuationExtractor()
input_ids, seq_lens, tok_to_orig_start_index, tok_to_orig_end_index, labels = (
[] for _ in range(5))
dataset_scale = sum(1 for line in open(file_path, 'r'))
logger.info("Preprocessing data, loaded from %s" % file_path)
with open(file_path, "r", encoding="utf-8") as fp:
lines = fp.readlines()
for line in tqdm(lines):
example = json.loads(line)
input_feature = convert_example_to_feature(
example, tokenizer, chineseandpunctuationextractor,
label_map, max_length, pad_to_max_length)
input_ids.append(input_feature.input_ids)
seq_lens.append(input_feature.seq_len)
tok_to_orig_start_index.append(
input_feature.tok_to_orig_start_index)
tok_to_orig_end_index.append(
input_feature.tok_to_orig_end_index)
labels.append(input_feature.labels)
return cls(input_ids, seq_lens, tok_to_orig_start_index,
tok_to_orig_end_index, labels)
@dataclass
class DataCollator:
"""
Collator for DuIE.
"""
def __call__(self, examples: List[Dict[str, Union[list, np.ndarray]]]):
batched_input_ids = np.stack([x['input_ids'] for x in examples])
seq_lens = np.stack([x['seq_lens'] for x in examples])
tok_to_orig_start_index = np.stack(
[x['tok_to_orig_start_index'] for x in examples])
tok_to_orig_end_index = np.stack(
[x['tok_to_orig_end_index'] for x in examples])
labels = np.stack([x['labels'] for x in examples])
return (batched_input_ids, seq_lens, tok_to_orig_start_index,
tok_to_orig_end_index, labels)
if __name__ == "__main__":
tokenizer = ErnieTokenizer.from_pretrained("ernie-1.0")
d = DuIEDataset.from_file("./data/train_data.json", tokenizer)
sampler = paddle.io.RandomSampler(data_source=d)
batch_sampler = paddle.io.BatchSampler(sampler=sampler, batch_size=2)
collator = DataCollator()
loader = paddle.io.DataLoader(
dataset=d,
batch_sampler=batch_sampler,
collate_fn=collator,
return_list=True)
for dd in loader():
model_input = {
"input_ids": dd[0],
"seq_len": dd[1],
"tok_to_orig_start_index": dd[2],
"tok_to_orig_end_index": dd[3],
"labels": dd[4]
}
print(model_input)
|
[
"noreply@github.com"
] |
Amy-l-iu.noreply@github.com
|
d9196cc3a173854616ff24c78864a891732ea5fb
|
dc3755709936a2ad4ac7e8f4dba87ee545ae1e12
|
/detector.py
|
b20d1ea1a4efaae513717be042c0963a24d7cb3d
|
[] |
no_license
|
umeshkh-Official/Python-Project-Facerecognition-system-
|
f8174a49c2832889bc84236e7f3f9b40da534d19
|
4b46cf1970fac1d42c18ccfef969cf065b205c2e
|
refs/heads/master
| 2022-03-26T03:16:10.234549
| 2019-12-14T06:28:17
| 2019-12-14T06:28:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,258
|
py
|
import cv2
from tkinter import messagebox
import mysql.connector
import numpy as np
from datetime import datetime
now=datetime.now()
formatted_date= now.strftime('%Y-%m-%d %H:%M:%S')
def getProfile(id):
conn = mysql.connector.connect(host="127.0.0.1", user="root", password="tiger",database="employee")
cmd="SELECT *FROM emp WHERE ID=" +str(id)
mycur = conn.cursor()
mycur.execute(cmd)
q=mycur
profile= None
for row in q:
profile=row
conn.close()
return profile
def facedetecting():
faceDetect = cv2.CascadeClassifier('haarcascade_frontalface_default.xml');
cam = cv2.VideoCapture(0)
rec = cv2.face.LBPHFaceRecognizer_create();
rec.read("trainner//training.yml")
id=0
font = cv2.FONT_HERSHEY_SIMPLEX
while True:
ret, img = cam.read();
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceDetect.detectMultiScale(gray,1.3,5);
for(x,y,w,h) in faces:
cv2.rectangle(img, (x + 2, y + 2), (x + w + 2, y + h + 2), (255, 0, 0), 2)
id,conf = rec.predict(gray[y:y+h, x:x+w])
profile=getProfile(id)
#NAME = NameFind.ID2Name(id)
cv2.putText(img, str(id), (x + 5, y + h +30), font, 1, (255, 255, 255), 1)
#cv2.putText(img, str(NAME), (x + 5, y + h + 60), font, 1, (255, 255, 255), 1)
cv2.putText(img, str(profile[1]), (x + 5, y + h + 60), font, 1, (255, 255, 255), 1)
cv2.putText(img, str(profile[2]), (x + 5, y + h + 90), font, 1, (255, 255, 255), 1)
cv2.imshow('camera', img)
conn = mysql.connector.connect(host="127.0.0.1", user="root", password="tiger", database="employee")
try:
cmd2 = "INSERT INTO attend(id,name,datatime) VALUES("+str(id)+",'"+profile[1]+"','"+formatted_date+"')"
mycur = conn.cursor()
mycur.execute(cmd2)
conn.commit()
conn.close()
except Exception:
print("duplicate entry")
if cv2.waitKey(10) & 0xFF == ord('q'): # Press 'ESC' for exiting video
break
cam.release()
cv2.destroyAllWindows()
facedetecting()
|
[
"noreply@github.com"
] |
umeshkh-Official.noreply@github.com
|
a0e49acc8c730929c378924c601b41a8d7cf4485
|
a0602756643d613fb35bccc6f364a729beffd1d6
|
/pipeline/edges/EdgeDetectionTemplateMatching.py
|
dd97b93886cd1245218eb34aa5783236736f50dc
|
[
"MIT"
] |
permissive
|
sunsided/CarND-Advanced-Lane-Lines
|
02ec7f73ae9813050b098a91c2509eb0ee26d48c
|
9692cf242f6d531fe37dca9ec462c632f1bcf832
|
refs/heads/master
| 2020-03-23T00:35:03.777640
| 2018-09-03T07:57:12
| 2018-09-03T07:57:12
| 140,871,683
| 1
| 0
| null | 2018-07-13T16:50:21
| 2018-07-13T16:50:21
| null |
UTF-8
|
Python
| false
| false
| 2,342
|
py
|
import glob
import os
from concurrent.futures import ThreadPoolExecutor
import cv2
import numpy as np
from typing import Optional
from pipeline.edges.EdgeDetectionBase import EdgeDetectionBase
class EdgeDetectionTemplateMatching(EdgeDetectionBase):
"""
Obtains edges for for further processing.
"""
def __init__(self, path: str, workers: int = 8, mask: Optional[np.ndarray] = None, detect_lines: bool = False):
"""
Initializes a new instance of the EdgeDetection class.
"""
super().__init__(detect_lines, detect_lines=detect_lines)
self._negatives = [np.float32(cv2.imread(path, cv2.IMREAD_GRAYSCALE)) / 255
for path in glob.glob(os.path.join(path, '**', 'negative-*.png'), recursive=True)]
self._positives = [np.float32(cv2.imread(path, cv2.IMREAD_GRAYSCALE)) / 255
for path in glob.glob(os.path.join(path, '**', 'positive-*.png'), recursive=True)]
self._roi_mask = mask
self._pe = ThreadPoolExecutor(max_workers=workers)
self._mode = cv2.TM_CCOEFF
def filter(self, img: np.ndarray) -> np.ndarray:
"""
Filters the specified image.
:param img: The image to obtain masks from.
:return: The pre-filtered image.
"""
gray = img
gray = cv2.GaussianBlur(gray, (3, 3), 0)
mode = self._mode
def process(template):
m = cv2.matchTemplate(gray, template, mode)
m[m < 0] = 0
return m
pos_matched = self._pe.map(process, self._positives)
neg_matched = self._pe.map(process, self._negatives)
pos_sum = np.zeros_like(gray)
for result in pos_matched:
pos_sum[8:745 + 8, 8:285 + 8] += result
pos_sum /= len(self._positives)
neg_sum = np.zeros_like(gray)
for result in neg_matched:
neg_sum[8:745 + 8, 8:285 + 8] += result
neg_sum /= len(self._negatives)
mask = (1 - neg_sum) * pos_sum
mask[mask < 0] = 0
mask = cv2.normalize(mask, 1, cv2.NORM_MINMAX)
mask = cv2.GaussianBlur(mask, (5, 5), 0)
mask[mask < 0.05] = 0
mask = cv2.normalize(mask, 1, cv2.NORM_MINMAX)
if self._roi_mask is not None:
mask *= self._roi_mask
return mask
|
[
"widemeadows@gmail.com"
] |
widemeadows@gmail.com
|
682abb270f0c3d71384d9f1422f488a142fd684f
|
3e5d9f0cd81a6d60002819a1fda56007f407a614
|
/ProjetosimulacaoDiscreta/listaFinal/questao01a.py
|
439b6f4aebe27e00a712cbe4e1763c0478f25dfd
|
[] |
no_license
|
anderson89marques/simulacaoDiscreta
|
b305258766bbbe5f66e65a333a84528a7e2e9a64
|
849b042a89271447ec38dd881effa9b2e27e0083
|
refs/heads/master
| 2021-01-15T18:03:49.983979
| 2014-12-02T02:28:39
| 2014-12-02T02:28:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
__author__ = 'andersonmarques'
from random import randint
experimentos = 10000
cont = 0
for x in range(experimentos):
dado1 = randint(1, 6)
dado2 = randint(1, 6)
dado3 = randint(1, 6)
if 9 <= dado1 + dado2 + dado3 <= 15:
cont += 1
print("Probabilidade: %.4f" % (cont/experimentos))
|
[
"Andersonoanjo18@gmail.com"
] |
Andersonoanjo18@gmail.com
|
6209d6c126116ec3ac5e03684a649997b76d1a92
|
81a29997e5ad3ff6194b961aaa60ad262b590458
|
/tests/test_url_generating.py
|
0ba2c0fcbf217bab1202f39b12bbcdff9585ee95
|
[
"MIT"
] |
permissive
|
maxzhenzhera/python-freeDictionaryAPI
|
71143679aeba49b770d51a2f802c69bf22763773
|
556415527835ef6e559756f6ed9fa5da387ceb3c
|
refs/heads/main
| 2023-05-06T08:41:56.001247
| 2021-05-24T02:38:52
| 2021-05-24T02:38:52
| 365,132,988
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,713
|
py
|
"""
Contains tests for API url generation.
.. class:: TestApiUrlGeneration
"""
import typing
import pytest
from freedictionaryapi.languages import LanguageCodes
from freedictionaryapi.urls import ApiUrl
class TestApiUrlGeneration:
"""
Contains tests for
* API url generator (``ApiUrl``).
Checking that API url generator
correctly init instance and generate url.
"""
# fixtures ---------------------------------------------------------------------------------------------------------
@pytest.fixture(name='data_list', scope='class')
def fixture_data_list(self) -> typing.List[dict]:
""" Get ``list`` of ``dict`` that contains arguments for ``ApiUrl`` instances """
data_list = [
{
'word': 'hello',
'language_code': LanguageCodes.ENGLISH_US
},
{
'word': 'Olá',
'language_code': LanguageCodes.BRAZILIAN_PORTUGUESE
}
]
return data_list
# tests ------------------------------------------------------------------------------------------------------------
def test_error_raising_on_empty_word(self):
empty_word = ' '
with pytest.raises(ValueError) as raised_error:
_ = ApiUrl(empty_word)
def test_generated_url_with_some_data(self, data_list: typing.List[dict]):
for data in data_list:
word = data['word']
language = data['language_code']
fact_url = f'https://api.dictionaryapi.dev/api/v2/entries/{language.value}/{word.strip()}'
expected_url = ApiUrl(**data).get_url()
assert expected_url == fact_url
|
[
"megashurik@urk.net"
] |
megashurik@urk.net
|
ddd3e8c757d55624ec472097294af1b6b986fb9f
|
9c91119fe567df359d83c1453148967ce9830304
|
/deprecated/scripts/vaders-mc2xml/vaders-mc2xml.py
|
c9465eb14b16fb53bbc46dbe69f7552f08cb1ea9
|
[
"MIT"
] |
permissive
|
oottppxx/enigma2
|
f2425646b756272f6515bafcfac583b0f7613dcd
|
2ceb21a787f6a656985e2aa1cd5ea537e08bff30
|
refs/heads/master
| 2022-11-08T09:45:21.037355
| 2022-11-01T00:16:35
| 2022-11-01T00:16:35
| 130,276,423
| 24
| 33
|
MIT
| 2022-04-13T18:42:33
| 2018-04-19T21:52:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,762
|
py
|
#!/usr/bin/python
import json
import re
import sys
import time
import urllib2
import zlib
VAPI_MC_SCHED='http://vapi.vaders.tv/mc/schedule?username=%(USER)s&password=%(PWD)s'
TIME_FMT='%Y-%m-%d %H:%M:%S'
def getJsonURL(url):
request = urllib2.Request(url)
request.add_header('User-Agent', 'MC2XML script @oottppxx')
request.add_header('Accept-Encoding', 'gzip')
response = urllib2.urlopen(request)
gzipped = response.info().get('Content-Encoding') == 'gzip'
data = ''
dec_obj = zlib.decompressobj(16+zlib.MAX_WBITS)
while True:
res_data = response.read()
if not res_data:
break
if gzipped:
data += dec_obj.decompress(res_data)
else:
data += res_data
return json.loads(data)
if len(sys.argv) < 4:
print "Usage: %s <user> <pass> <offset>" % sys.argv[0]
sys.exit(-1)
mc = getJsonURL(VAPI_MC_SCHED % {'USER': sys.argv[1], 'PWD': sys.argv[2]})
offset = sys.argv[3]
channels = {}
programmes = []
if mc:
for program in mc:
cat = str(program['category']['name'])
logo = str(program['category']['logo'])
title = program['title'].encode('ascii', 'replace')
start = re.sub('[^0-9]', '', str(program['startTime']).split('+')[0])
stop = re.sub('[^0-9]', '', str(program['endTime']).split('+')[0])
for stream in program['streams']:
sid = str(stream['id']) + '.vaders.tv'
sname = str(stream['name'])
ptitle = title
if "1080" in sname:
ptitle += ' [1080]'
channels[sid] = sname
programmes.append({'START': start, 'STOP': stop, 'CHANNEL': sid,
'TITLE': ptitle, 'CAT': cat, 'LOGO': logo})
else:
print "No info!"
sys.exit(0)
HEADER="""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE tv SYSTEM "xmltv.dtd">
<tv source-info-url="http://vaders.tv" source-info-name="VS" generator-info-name="VS" generator-info-url="http://www.xmltv.org/">"""
TRAILER=""" </tv>
"""
CHANNEL_TMPL=""" <channel id="%(ID)s">
<display-name>%(SDN)s</display-name>
<icon src="none" />
</channel>"""
PROGRAMME_TMPL=""" <programme start="%(START)s%(OFFSET)s" stop="%(STOP)s%(OFFSET)s" channel="%(ID)s">
<title lang="en">%(TITLE)s</title>
<category lang="en">%(CAT)s</category>
<category lang="en">Sports</category>
<icon src="%(LOGO)s"/>
</programme>"""
print HEADER
for channel_id, channel_name in channels.iteritems():
print CHANNEL_TMPL % {'ID': channel_id, 'SDN': channel_name}
for p in programmes:
print PROGRAMME_TMPL % {'START': p['START'], 'STOP': p['STOP'], 'ID': p['CHANNEL'],
'TITLE': p['TITLE'], 'CAT': p['CAT'], 'LOGO': p['LOGO'],
'OFFSET': offset}
print TRAILER
|
[
"@oottppxx"
] |
@oottppxx
|
2d64c3d2dcb4340084edf32824852cf49b1bb2d1
|
11c30d4e8418c0ecae1131af2868d9ddaa58e7e6
|
/Exercício 114.py
|
e7732565a9a8fa21ba6e85e255cc02fcafb4f885
|
[] |
no_license
|
FabianoJanisch/CursoEmVideo-Python
|
6a99453cdf891086450fe05416318347ef8f4da2
|
b3e238256af11cb48013bd57eb1815698f41142c
|
refs/heads/main
| 2023-02-19T07:31:45.048125
| 2021-01-18T19:34:12
| 2021-01-18T19:34:12
| 330,767,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
import urllib
import urllib.request
try:
online = urllib.request.urlopen('http://www.pudim.com.br')
except urllib.error.URLError:
('O site não está online')
else:
print ('O site está online')
|
[
"fabianoaugustojanisch@hotmail.com"
] |
fabianoaugustojanisch@hotmail.com
|
752ccdcaaecb2bf67fcdec7cb60e92757e101071
|
bc20be4024b159c47c12780f8840c47ed991b449
|
/setup.py
|
4742049596cbe85ecd278a05db7cdd66eb49acbd
|
[] |
no_license
|
jhrdinka/FCChhAnalyses
|
704c827798571a24d65c1ef3a164f39b652ee189
|
b47f355910633267d926e571e3cac4a3345238c3
|
refs/heads/master
| 2020-05-06T13:43:42.557528
| 2019-03-26T15:39:04
| 2019-03-26T15:39:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
#!/usr/bin/env python2
from setuptools import setup, find_packages
import glob
setup(name='FCChhAnalyses',
version='0.1.0',
description='Produce flat ROOT trees using FCCSW EDM in heppy',
author='Clement Helsens',
author_email='clement.helsens@cern.ch',
url='https://github.com/HEP-FCC/FCChhAnalyses',
requires=['heppy', 'ROOT'], # heppy is called heppyfwk if installed with pip
packages=find_packages(),
package_dir={"FCChhAnalyses": "../FCChhAnalyses"},
scripts=glob.glob('scripts/*')
)
|
[
"javier.cervantes.villanueva@cern.ch"
] |
javier.cervantes.villanueva@cern.ch
|
3bf025ca6b569587910ee9310692ef856bc9759e
|
c6431cdf572dd10f0f4d45839e6081124b246f90
|
/code/lc179.py
|
3aaa4820cd2d067a5c93a9bd7ec62299708b2b99
|
[] |
no_license
|
bendanwwww/myleetcode
|
1ec0285ea19a213bc629e0e12fb8748146e26d3d
|
427846d2ad1578135ef92fd6549235f104f68998
|
refs/heads/master
| 2021-09-27T19:36:40.111456
| 2021-09-24T03:11:32
| 2021-09-24T03:11:32
| 232,493,899
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
"""
给定一组非负整数,重新排列它们的顺序使之组成一个最大的整数。
示例 1:
输入: [10,2]
输出: 210
示例 2:
输入: [3,30,34,5,9]
输出: 9534330
说明: 输出结果可能非常大,所以你需要返回一个字符串而不是整数。
"""
class Solution(object):
def largestNumber(self, nums):
res = ''
for i in range(len(nums)):
for x in range(i + 1, len(nums)):
if self.compare(str(nums[i]), str(nums[x])):
tmp = nums[i]
nums[i] = nums[x]
nums[x] = tmp
if nums[len(nums) - 1] == 0:
return "0"
for i in range(len(nums) - 1, -1, -1):
res+= str(nums[i])
return res
def compare(self, a, b):
if int(a + b) > int(b + a):
return True
else:
return False
s = Solution()
res = s.largestNumber([12, 121])
print(res)
|
[
"461806307@qq.com"
] |
461806307@qq.com
|
5dc9e6d9ca09f4d108701e06c92e18d3768a1d0a
|
251e7002f2ecc218447c0602cb5e02920adfb548
|
/python/sitemonitor/lib/splunked.py
|
102b5f1d83f0715dd0d6f60eb4a54be6725a378c
|
[] |
no_license
|
useEvil/site-monitor
|
d0018bfccdaa38acb42d61384410114f94d613b2
|
392412797f61ebddd3855030777e1f935e780147
|
refs/heads/master
| 2020-05-29T17:20:32.759687
| 2012-10-29T17:32:47
| 2012-10-29T17:32:47
| 4,162,859
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,404
|
py
|
"""The Splunk Controller API
Provides the Splink class for subclassing.
"""
import time
import splunk
from splunk import auth, search
from pylons import config
HOST = config.get('splunk.host')
splunk.mergeHostPath(HOST, True)
class Splunk:
# first get the session key
# (the method will automatically cache during the interactive session)
auth.getSessionKey('admin','changeme')
def searchSplunk(self):
# /////////////////////////////////////////////////////////////////////////////
# Scenario 1: do a simple search for all web server logs
# /////////////////////////////////////////////////////////////////////////////
# start search
job = search.dispatch('search index="coherence" host="*hou" source="coherence_gc_log" sourcetype="garbagecollection" | timechart max(gctime) by host')
# at this point, Splunk is running the search in the background; how long it
# takes depends on how much data is indexed, and the scope of the search
#
# from this point, we explore some of the things you can do:
#
#
# Option A: return all of the matched events
# this will stream events back until the last event is reached
# for event in job:
# print event
# Option B: just return the host field all of the matched events
# for event in job:
# print event['host']
# Option C: return specific events
# wait until the job has completed before trying to access arbirary indices
while not job.isDone:
time.sleep(1)
# print the total number of matched events
print len(job)
print job.count
# print the second event (remember that python is 0-indexed)
print job[1]
# print the first 10
for event in job[0:10]:
print event
# print the last 5
for event in job[:-5]:
print event
# clean up
job.cancel()
def searchSplunkSummarize(self):
# /////////////////////////////////////////////////////////////////////////////
# Scenario 2: do a search for all web server logs and summarize
# /////////////////////////////////////////////////////////////////////////////
# start search
job = search.dispatch('search sourcetype="access_combined" | timechart count')
# the 'job' object has 2 distinct result containers: 'events' and 'results'
# 'events' contains the data in a non-transformed manner
# 'results' contains the data that is post-transformed, i.e. after being
# processed by the 'timechart' operator
# wait for search to complete, and make the results available
while not job.isDone:
time.sleep(1)
# print out the results
for result in job.results:
print result
# because we used the 'timechart' operator, the previous loop will output a
# compacted string; to get at a native dictionary of fields:
for result in job.results:
print result.fields # prints a standard python str() of a dict object
# or, if we just want the raw events
# for event in job.events:
# print event
# print event.time # returns a datetime.datetime object
# clean up
job.cancel()
|
[
"useEvil@gmail.com"
] |
useEvil@gmail.com
|
e7db557128138182a4636941585056d2d674a53e
|
b55aa5b0642a19a62acb996d3c5a1e87cd1b9d63
|
/model/generate_ai_customer_service.py
|
35e1c7cdeb41e2692779fc86b99971952dbd3542
|
[] |
no_license
|
Innerface/qarobot
|
b01913c034770dca9d75d81a853b30e09edd8a4c
|
ed51d09621d567e16d3d15c79696028d3059d7ee
|
refs/heads/master
| 2023-02-24T03:32:58.905942
| 2018-05-11T07:52:08
| 2018-05-11T07:52:08
| 133,007,690
| 1
| 0
| null | 2023-02-15T17:49:39
| 2018-05-11T07:49:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,427
|
py
|
# Author: YuYuE (1019303381@qq.com) 2018.03.16
import re
from itertools import groupby
from extend.data_optimization import keywords_sorted as doks
from model import generate_word_vector as vectormodel
from model import nlp_pinyin_hanzi_transfer as phtransfer
def remove_special_tags(str_):
"""
特殊字符处理,可选择性配置
:param str_:
:return:
"""
r = '[’!"#$%&\'()*+,-./:;<=>??。!¥……【】、,:;‘’”“@[\\]^_`{|}~]+'
result = re.sub(r, '', str_)
return result
def remove_modal_particle(str_):
"""
语气助词处理,可选择性配置
:param str_:
:return:
"""
modal_particle = ['阿','啊','呃','欸','哇','呀','哦','耶','哟','欤','呕','噢','呦','吧','罢','呗','啵','嘞','哩','咧','咯','啰','喽','吗','嘛','呢','呐','噻','嘢']
for particle in modal_particle:
if str_.find(particle) != -1:
str_ = str_.replace(particle,'')
return str_
def remove_partial_and_special(sent):
sent = remove_special_tags(sent)
sent = remove_modal_particle(sent)
return sent
def replace_synonyms_words(main_sent_set,words,synonyms_words=False):
"""
同义词替换
:param words:
:return:
"""
if words and main_sent_set and synonyms_words:
words_str = ' '.join(words)
main_sent_set_str = ' '.join(main_sent_set)
synonyms = doks.default_synonyms()
if synonyms:
for key in synonyms.keys():
if main_sent_set_str.find(key) != -1 and words_str.find(synonyms[key]) != -1:
words_str = words_str.replace(synonyms[key],key)
words = words_str.split()
return words
def siphon_synonyms_words(main_sent_set):
"""
根据问题找到可能的同义词,较上一个方法效率
:param main_sent_set:
:return:
"""
synonyms_words= False
if main_sent_set:
main_sent_set_str = ' '.join(main_sent_set)
synonyms = doks.default_synonyms()
if synonyms:
for key in synonyms.keys():
if main_sent_set_str.find(key) != -1:
synonyms_words = True
return synonyms_words
def groupby_subscript(lst):
"""
连续下标分组
:param lst:
:return:
"""
groups = []
fun = lambda x: x[1] - x[0]
for k, g in groupby(enumerate(lst), fun):
groups.append([v for i, v in g])
return groups
def remove_useless_and_correction(inp):
"""
去除与语义无关的杂项,并做中文纠正
1.去除多余标点符号
2.拼音识别
3.拼音转换
4.去语气助词
:param inp:
:return:
"""
step_one_str = remove_special_tags(inp)
is_with_alphabet = False
inner_alphabet = ''
pos_alphabet = []
i = 0
for vchar in step_one_str:
if phtransfer.is_alphabet(vchar):
is_with_alphabet =True
inner_alphabet += vchar
pos_alphabet.append(i)
i += 1
if is_with_alphabet:
groups = groupby_subscript(pos_alphabet)
if len(groups) > 1:
increase_or_decrease = 0
for group in groups:
item = ''
for index in group:
item += step_one_str[index-increase_or_decrease]
item_to_hanzi = phtransfer.transfer_continue_pinyin_to_hanzi(item)
item_to_hanzi_ = ''.join(item_to_hanzi)
eval_item = vectormodel.words_evaluation(item,item_to_hanzi_)
if eval_item != None and eval_item != item:
step_one_str = step_one_str.replace(item,item_to_hanzi_)
increase_or_decrease = len(item) - len(''.join(item_to_hanzi))
else:
alphabet_to_hanzi = phtransfer.transfer_continue_pinyin_to_hanzi(inner_alphabet)
alphabet_to_hanzi_ = ''.join(alphabet_to_hanzi)
eval_item = vectormodel.words_evaluation(inner_alphabet, alphabet_to_hanzi_)
if eval_item != None and inner_alphabet != eval_item:
step_one_str = step_one_str.replace(inner_alphabet,eval_item)
step_two_str = remove_modal_particle(step_one_str)
return step_two_str
def split_sentence_to_words(inp, method='method', mode='HMM'):
return doks.siphon_keywords_and_sort(inp)
def siphon_keywords_by_tfidf(inp):
return inp
def siphon_ners_by_nlp(inp, keywords, method='method', mode='HMM'):
return inp
def siphon_relations_by_nlp(ners, sent, method='method', mode='HMM'):
return ners
def text_classification(inp, keywords=''):
return inp
def generate_response(inp, keywords='', ners=None, relations=None, type=None):
"""
答案抽取模块,基本流程
1.FAQ匹配
2.知识图谱
3.文档
4.互联网资源
:param words_split:
:param keywords:
:param ners:
:param relations:
:param type:
:return:
"""
response = faq_search(keywords,inp)
return response
def faq_search(keywords,inp):
return inp
|
[
"yuy@workway.com.cn"
] |
yuy@workway.com.cn
|
6daf8b4f3c24f4b8b3d3dc70540f2ea7d90a499d
|
773e55fe106852600adff8ff886c4890414e4540
|
/sample.py
|
e01cd32223797f9c1e57ccfb44d68bb075f9e380
|
[] |
no_license
|
gloveboxes/Python-DMX-Client
|
a7702d416b42b9cfa2cef031dbfa79c29f1a1c5b
|
503626826d4e34b9b0cb08ae3b57b8965c646cd1
|
refs/heads/master
| 2020-05-31T21:16:25.863307
| 2019-06-05T12:15:24
| 2019-06-05T12:15:24
| 190,494,080
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
import dmxclient
import time
import random
dmx = dmxclient.DmxClient(dmxServerAddress='dmxserver.local', fixtureIds=[1, 2, 3])
def cycleColours():
for cycles in range(10):
for colour in range(100, 255):
dmx.colours(red=colour)
dmx.publish([1, 3])
dmx.colours(blue=colour)
dmx.publish([2])
time.sleep(0.1)
def cyclePalette():
palette = [[255, 0, 0], [0, 255, 0], [0, 0, 255]]
for cycles in range(100):
for colour in palette:
# dmx.colours(red=colour[0], green=colour[1], blue=colour[2])
# dmx.colours(colour[0], colour[1], colour[2])
# alternatively set colour by property
dmx.red = colour[0]
dmx.green = colour[1]
dmx.blue = colour[2]
dmx.white = 0
dmx.publish()
time.sleep(0.5)
def simple():
dmx.colours(255, 0, 255) # magenta
dmx.publish()
time.sleep(3)
dmx.clear()
dmx.red = 255
dmx.publish(fixtureIds=[2])
time.sleep(3)
def lightsOff():
dmx.clear() # will default to black
dmx.publish() # defaults to all fixtures
simple()
cycleColours()
cyclePalette()
lightsOff()
|
[
"dglover@microsoft.com"
] |
dglover@microsoft.com
|
7c3cd07ebf63dfc48ef91adefa1a240b5a5f9ea2
|
06cff881a57a161763cd9ca532118dd545d5d7e7
|
/src/profiling_command.py
|
6b5b79bdd119944db8ece8ca89c7c7084b7b3974
|
[
"MIT"
] |
permissive
|
chilin0525/modelstat
|
d72d78c66659397dc90a3a13034cbb1d3899ba5f
|
a6db805b48b2a2a064eb41cb1a50ddda0f5ed79d
|
refs/heads/main
| 2023-08-01T14:09:43.136206
| 2021-09-14T08:08:34
| 2021-09-14T08:08:34
| 402,698,086
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
def generate_command(model_type, gpu_idx, command):
dlprof = "dlprof --reports all" + \
" --force=true" + \
" --mode=" + model_type + \
" --formats=json" + \
" --output_path=log " + command
return dlprof
|
[
"sky707070.cv07@nctu.edu.tw"
] |
sky707070.cv07@nctu.edu.tw
|
1a633e6391da83a80c5b9071eb7f440b90a5e856
|
0523c56528465acfb4a4107d3960f85dc23e395e
|
/mod_cycle/r_diagram.py
|
c81636d245eb0c497fd3f5243957b49caaecf0f6
|
[] |
no_license
|
Nathanzhn/GA4
|
46e0eccc1050740c45da70c603e67c60f0b3be85
|
6843fd139ff2355b98eac0ac9cf09aee6fede7cd
|
refs/heads/master
| 2020-05-19T13:43:57.655145
| 2019-06-07T09:30:24
| 2019-06-07T09:30:24
| 185,046,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
import CoolProp.CoolProp as CP
import numpy as NP
def r_diagram():
#ofile = open ('sat.txt','w')
#fluid = input ('Enter fluid name: ')
fluid = 'R134a'
pc = CP.PropsSI(fluid, 'pcrit')
pt = CP.PropsSI(fluid, 'ptriple')
# Create lists of properties on saturation line with GP variation in p
pr = (0.9999 * pc / pt)
pr = NP.power(pr, 0.001)
plt_TT = []
plt_sf = []
plt_sg = []
plt_pp = []
plt_hf = []
plt_hg = []
p = pt
#ofile.write ('\t P (Pa) \t T (deg C) \t sf (kJ/kg.K) \t sg (kJ/kg.K) \t hf (kJ/kg.K) \t hg (kJ/kg.K)\n')
for i in range(1000):
TT = CP.PropsSI('T', 'P', p, 'Q', 0.0, fluid) - 273.15
sf = CP.PropsSI('S', 'P', p, 'Q', 0.0, fluid) / 1000.0
sg = CP.PropsSI('S', 'P', p, 'Q', 1.0, fluid) / 1000.0
hf = CP.PropsSI('H', 'P', p, 'Q', 0.0, fluid) / 1000.0
hg = CP.PropsSI('H', 'P', p, 'Q', 1.0, fluid) / 1000.0
#ofile.write ('\t%10.2f \t%10.2f \t%10.2f \t%10.2f \t%10.2f \t%10.2f \n' % (p,TT,sf,sg,hf,hg))
p = p * pr
plt_TT.append(TT)
plt_sf.append(sf)
plt_sg.append(sg)
plt_pp.append(p)
plt_hf.append(hf)
plt_hg.append(hg)
# ofile.close()
plt_TT = plt_TT[600:]
plt_pp = plt_pp[600:]
plt_sf = plt_sf[600:]
plt_sg = plt_sg[600:]
plt_hf = plt_hf[600:]
plt_hg = plt_hg[600:]
plt_TT += plt_TT[::-1]
plt_pp += plt_pp[::-1]
plt_sfg = plt_sf + plt_sg[::-1]
plt_hfg = plt_hf + plt_hg[::-1]
return plt_pp, plt_hfg, plt_TT, plt_sfg
|
[
"hz325@cam.ac.uk"
] |
hz325@cam.ac.uk
|
09242a62a09ba5ba8535db7d793e49b3b2eb14a9
|
d2a030f7a050a641fddd657e895651ee0310ae41
|
/givers/migrations/0010_auto_20210909_1153.py
|
d07c21117cc4a4aa04ffcaf388c9d91eb2b085fe
|
[] |
no_license
|
Shawen17/Giveawaynow
|
f052a1055a96f2d0a392aaf748adcafbec2a5135
|
92f3bc0b359a712776661348e239b492894b81a1
|
refs/heads/master
| 2023-09-05T00:28:59.237486
| 2021-10-24T21:12:37
| 2021-10-24T21:12:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
# Generated by Django 3.1.7 on 2021-09-09 10:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('givers', '0009_give_gift_recipient'),
]
operations = [
migrations.RenameField(
model_name='give',
old_name='phone_number',
new_name='giver_number',
),
migrations.RenameField(
model_name='vendor',
old_name='phone_number',
new_name='giver_number',
),
]
|
[
"shawen022@yahoo.com"
] |
shawen022@yahoo.com
|
834ec6a6d8f6eb46399cf5c9441e992f04caf2a3
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_138/1236.py
|
914087508eb86863c5f11d9d67e00191230d743f
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,167
|
py
|
import copy
T = int(raw_input())
for t in range(0,T):
N = int(raw_input())
nb = [float(x) for x in raw_input().split()]
kb = [float(x) for x in raw_input().split()]
nb.sort()
kb.sort()
dkb = copy.deepcopy(kb)
#print nb
#print kb
# deceitful war
dw_p = 0
ln = len(nb)
index = ln - 1;
lost = 0;
while(index >= lost):
for i in range(len(dkb)-1, -1, -1):
if nb[index] > dkb[i]:
dkb.remove(dkb[i])
break;
else:
lost = lost + 1
dkb.remove(dkb[i])
index = index - 1
dw_p = ln - lost
# optimal war
ow_p = 0
index = 0
ln = len(nb)
while(index < ln):
state = False
for i in range(0, len(kb)):
if(nb[index] < kb[i]):
kb.remove(kb[i])
state = True
break;
else:
continue
if not state:
kb.pop(0)
ow_p = ow_p + 1
index = index + 1
print "Case #%d: %d %d" %(t+1, dw_p, ow_p)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
2f4d9a096f13e77f9c74695299c6b3647424473a
|
56b5e0ca548c805973494ed0e2fd06ac2b583827
|
/beast/tools/star_type_probability.py
|
c88ea0d2ad93b9269f7dde4b761b9a5276ac886b
|
[
"BSD-3-Clause"
] |
permissive
|
dthilker/beast
|
1fa4124a5bfdafc0c2225a9b118058adb8324d9f
|
892940813f4b22d545b501cc596c72967d9a45bc
|
refs/heads/master
| 2022-07-01T06:53:14.129362
| 2020-05-04T19:34:37
| 2020-05-04T19:34:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,674
|
py
|
import numpy as np
from collections import defaultdict
from astropy.table import Table
from astropy.io import fits
def star_type_probability(
pdf1d_files,
pdf2d_files,
output_filebase=None,
ext_O_star_params=None,
dusty_agb_params=None,
):
"""
Calculate the probabilities of a set of star types by integrating either the
1D or 2D PDF across the relevant range of parameter values.
Currently does probabilities for these types. See the docstrings of their
respective functions for more details. If the required parameters are not
present in the PDFs, the function returns None.
* extinguished O star (M_ini, Av)
* dusty AGB star (Av, logT)
Note that if more functions for different stellar types are added (please
add more!), `params_to_save` needs to be updated. This variable ensures
that only the necessary parameters are stored in memory.
Parameters
----------
pdf1d_files : string or list of strings
Name of the file(s) with the 1D PDFs. If a list, it's each part of the
subgrid.
pdf2d_files : string or list of strings
Name of the file(s) with the 2D PDFs. If a list, it's in the same
order as the subgrids above.
output_filebase : string (default=None)
Prefix for saving the file of probabilities ("_startype.fits" will be
appended). If None, will return the table rather than saving it.
ext_O_star_params : dict or None
Set to a dictionary to override the default cuts for extinguished early
type stars. Allowed keywords are 'min_M_ini' (float), 'min_Av' (float),
and 'max_Av' (float).
dusty_agb_params : dict or None
Set to a dictionary to override the default cuts for dusty AGB stars
(the high Av failure mode). Allowed keywords are 'min_Av' (float),
'min_logT' (float), and 'max_logT' (float).
Returns
-------
star_prob : dict
if output_filebase is None, a dictionary of probabilities is returned.
"""
# read in the data
# - set up dictionaries to hold PDFs and bins
pdf1d_data = defaultdict(list)
pdf1d_bins = defaultdict(list)
pdf2d_data = defaultdict(list)
pdf2d_bins = defaultdict(list)
# - parameters to save
params_to_save = ['Av', 'M_ini', 'logT']
# - go through each pair of files
for (pdf1d_file, pdf2d_file) in zip(np.atleast_1d(pdf1d_files), np.atleast_1d(pdf2d_files)):
# 1D PDF data
with fits.open(str(pdf1d_file)) as hdu:
for ext in hdu:
# only save the data if the parameter is in params_to_save
if ext.name in params_to_save:
pdf1d_data[ext.name].append(ext.data[:-1,:])
pdf1d_bins[ext.name].append(ext.data[-1,:])
# 2D PDF data
with fits.open(str(pdf2d_file)) as hdu:
for ext in hdu:
# skip extensions without '+'
if '+' not in ext.name:
continue
# break up the name into the two parameters
p1, p2 = ext.name.split('+')
# only save the data if both parameters are in params_to_save
if (p1 in params_to_save) and (p2 in params_to_save):
pdf2d_data[ext.name].append(ext.data[:-2,:,:])
pdf2d_bins[ext.name].append(ext.data[-2:,:,:])
# combine arrays from each file
for key in pdf1d_data:
# check that the bins are the same for all
bin_list = pdf1d_bins[key]
bin_check = [
not np.array_equal(bin_list[i], bin_list[i+1])
for i in range(len(bin_list)-1)
]
if np.sum(bin_check) > 0:
raise ValueError('1D PDF bins not the same for each input file')
# if so, just save the first one
pdf1d_bins[key] = pdf1d_bins[key][0]
# concatenate the PDFs
pdf1d_data[key] = np.concatenate(pdf1d_data[key])
for key in pdf2d_data:
# check that the bins are the same for all
bin_list = pdf2d_bins[key]
bin_check = [
not np.array_equal(bin_list[i], bin_list[i+1])
for i in range(len(bin_list)-1)
]
if np.sum(bin_check) > 0:
raise ValueError('2D PDF bins not the same for each input file')
# if so, just save the first one
pdf2d_bins[key] = pdf2d_bins[key][0]
# concatenate the PDFs
pdf2d_data[key] = np.concatenate(pdf2d_data[key])
# evaluate probabilities of things
star_prob = {}
# - extinguished O star
if ext_O_star_params is None:
star_prob['ext_O_star'] = ext_O_star(pdf2d_data, pdf2d_bins)
else:
star_prob['ext_O_star'] = ext_O_star(pdf2d_data, pdf2d_bins, **ext_O_star_params)
# - dusty AGB star (high Av failure mode)
if dusty_agb_params is None:
star_prob['dusty_agb'] = dusty_agb(pdf2d_data, pdf2d_bins)
else:
star_prob['dusty_agb'] = dusty_agb(pdf2d_data, pdf2d_bins, **dusty_agb_params)
# - other things
# write out the table
if output_filebase is not None:
Table(star_prob).write(output_filebase+"_startype.fits", overwrite=True)
else:
return star_prob
def ext_O_star(pdf2d_data, pdf2d_bins, min_M_ini=10, min_Av=0.5, max_Av=99):
"""
Calculate the probability that each star is an extinguished O star:
* initial mass >= 10 Msun
* A_V >= 0.5 mag
There's a max A_V option to avoid possible high-Av artifacts.
Some useful references for O/B stars
https://ui.adsabs.harvard.edu/abs/2019A%26A...625A.104R/abstract
https://ui.adsabs.harvard.edu/abs/2018A%26A...615A..40R/abstract
https://ui.adsabs.harvard.edu/abs/2018A%26A...609A...7R/abstract
Parameters
----------
pdf2d_data : dict
2D PDF data, each key has an array with shape (n_stars, nbin1, nbin2)
pdf2d_bins : dict
dictionary with corresponding bin values
min_M_ini : float (default=10)
minimum mass (in solar masses)
min_Av : float (default=0.5)
minimum Av (magnitudes)
max_Av : float (default=99)
maximum Av (magnitudes)
Returns
-------
star_prob : array
probability for each star
"""
if 'Av+M_ini' in pdf2d_data.keys():
prob_data = pdf2d_data['Av+M_ini']
av_bins = pdf2d_bins['Av+M_ini'][0,:,:]
mass_bins = pdf2d_bins['Av+M_ini'][1,:,:]
elif 'M_ini+Av' in pdf2d_data.keys():
prob_data = pdf2d_data['M_ini+Av']
av_bins = pdf2d_bins['M_ini+Av'][1,:,:]
mass_bins = pdf2d_bins['M_ini+Av'][0,:,:]
else:
print("2D PDFs don't contain M_ini and Av data")
tot_stars = pdf2d_data[list(pdf2d_data)[0]].shape[0]
return [np.nan] * tot_stars
# reshape the arrays
prob_data = prob_data.reshape(prob_data.shape[0], -1)
av_bins = av_bins.reshape(-1)
mass_bins = mass_bins.reshape(-1)
keep = np.where(
(mass_bins >= min_M_ini) & (av_bins >= min_Av) & (av_bins <= max_Av)
)[0]
return np.sum(prob_data[:,keep], axis=1)
def dusty_agb(pdf2d_data, pdf2d_bins, min_Av=7, min_logT=3.7, max_logT=4.2):
"""
Calculate the probability that each star is a dusty AGB star, using the high
Av failure mode:
* A_V >= 7 mag
* Log T_eff from 3.7 to 4.2
Parameters
----------
pdf2d_data : dict
2D PDF data, each key has an array with shape (n_stars, nbin1, nbin2)
pdf2d_bins : dict
dictionary with corresponding bin values
min_Av : float (default=0.5)
minimum Av (magnitudes)
min_logT, max_logT : float (default=3.7, 4.2)
minimum and maximum logT
Returns
-------
star_prob : array
probability for each star
"""
if 'Av+logT' in pdf2d_data.keys():
prob_data = pdf2d_data['Av+logT']
av_bins = pdf2d_bins['Av+logT'][0,:,:]
logT_bins = pdf2d_bins['Av+logT'][1,:,:]
elif 'logT+Av' in pdf2d_data.keys():
prob_data = pdf2d_data['logT+Av']
av_bins = pdf2d_bins['logT+Av'][1,:,:]
logT_bins = pdf2d_bins['logT+Av'][0,:,:]
else:
print("2D PDFs don't contain Av and logT (T_eff) data")
tot_stars = pdf2d_data[list(pdf2d_data)[0]].shape[0]
return [np.nan] * tot_stars
# reshape the arrays
prob_data = prob_data.reshape(prob_data.shape[0], -1)
av_bins = av_bins.reshape(-1)
logT_bins = logT_bins.reshape(-1)
keep = np.where(
(av_bins >= min_Av) &
(logT_bins >= min_logT) &
(logT_bins <= max_logT)
)[0]
return np.sum(prob_data[:,keep], axis=1)
|
[
"lhagen@stsci.edu"
] |
lhagen@stsci.edu
|
a694c0f308be0902485e4db6142cc6874788303a
|
d9eafe91bd585f46cc896c5c6704863b00e1a6d7
|
/experiments/2015_02_20_mnist/1/experiment.py
|
3b7a53cda6faa1157774e0ce1d15e3fc1f2e5965
|
[] |
no_license
|
davidbonet/maxwells-daemon
|
b2145412c65a8645261d60509f350736683515a0
|
c947afac67b992676a44c9615edba46fa40531c0
|
refs/heads/master
| 2023-06-15T14:32:14.778557
| 2021-07-13T15:20:58
| 2021-07-13T15:20:58
| 379,681,838
| 0
| 0
| null | 2021-06-23T17:31:15
| 2021-06-23T17:31:14
| null |
UTF-8
|
Python
| false
| false
| 5,765
|
py
|
"""First real experiment - how well do we do on MNIST?"""
import numpy as np
from numpy.linalg import norm
import pickle
from collections import defaultdict
from funkyyak import grad
from maxwell_d.util import RandomState
from maxwell_d.optimizers import entropic_descent2
from maxwell_d.nn_utils import make_nn_funs
from maxwell_d.data import load_data_subset
# ------ Problem parameters -------
layer_sizes = [784, 200, 10]
batch_size = 200
N_train = 10**3
N_tests = 10**3
# ------ Variational parameters -------
seed = 0
init_scale = 1.0
epsilon = 0.1
gamma = 0.1
N_iter = 1000
alpha = 0.1
annealing_schedule = np.linspace(0, 1, N_iter)
# ------ Plot parameters -------
N_samples = 3
N_checkpoints = 10
thin = np.ceil(N_iter/N_checkpoints)
def run():
(train_images, train_labels),\
(tests_images, tests_labels) = load_data_subset(N_train, N_tests)
parser, pred_fun, nllfun, frac_err = make_nn_funs(layer_sizes)
print "Running experiment..."
results = defaultdict(list)
for i in xrange(N_samples):
x_init_scale = np.full(len(parser.vect), init_scale)
def indexed_loss_fun(w, i_iter):
rs = RandomState((seed, i, i_iter))
idxs = rs.randint(N_train, size=batch_size)
return nllfun(w, train_images[idxs], train_labels[idxs])
gradfun = grad(indexed_loss_fun)
def callback(x, t, v, entropy):
results[("entropy", i)].append(entropy)
results[("v_norm", i)].append(norm(v))
results[("minibatch_likelihood", i)].append(-indexed_loss_fun(x, t))
if t % thin == 0 or t == N_iter or t == 0:
results[('iterations', i)].append(t)
results[("train_likelihood", i)].append(-nllfun(x, train_images, train_labels))
results[("tests_likelihood", i)].append(-nllfun(x, tests_images, tests_labels))
results[("tests_error", i)].append(frac_err(x, tests_images, tests_labels))
print "Iteration {0:5i} Train likelihood {1:2.4f} Test likelihood {2:2.4f}" \
" Test Err {3:2.4f}".format(t, results[("train_likelihood", i)][-1],
results[("tests_likelihood", i)][-1],
results[("tests_error", i)][-1])
rs = RandomState((seed, i))
entropic_descent2(gradfun, callback=callback, x_scale=x_init_scale,
epsilon=epsilon, gamma=gamma, alpha=alpha,
annealing_schedule=annealing_schedule, rs=rs)
return results
def estimate_marginal_likelihood(likelihood, entropy):
return likelihood + entropy
def plot():
print "Plotting results..."
import matplotlib.pyplot as plt
with open('results.pkl') as f:
results = pickle.load(f)
fig = plt.figure(0); fig.clf()
ax = fig.add_subplot(211)
for i in xrange(N_samples):
plt.plot(results[("entropy", i)])
ax = fig.add_subplot(212)
plt.plot([np.mean([results[("entropy", i)][t] for i in xrange(N_samples)])
for t in xrange(N_iter)])
plt.savefig("entropy.png")
fig = plt.figure(0); fig.clf()
ax = fig.add_subplot(211)
for i in xrange(N_samples):
plt.plot(results[("v_norm", i)])
ax = fig.add_subplot(212)
plt.plot([np.mean([results[("v_norm", i)][t] for i in xrange(N_samples)])
for t in xrange(N_iter)])
plt.savefig("v_norms.png")
fig = plt.figure(0); fig.clf()
ax = fig.add_subplot(211)
for i in xrange(N_samples):
plt.plot(results[("minibatch_likelihood", i)])
ax = fig.add_subplot(212)
plt.plot([np.mean([results[("minibatch_likelihood", i)][t] for i in xrange(N_samples)]) for t in xrange(N_iter)])
plt.savefig("minibatch_likelihoods.png")
fig = plt.figure(0); fig.clf()
ax = fig.add_subplot(211)
for i in xrange(N_samples):
plt.plot(results[('iterations', i)],
[estimate_marginal_likelihood(results[("train_likelihood", i)][t_ix],
results[("entropy", i)][t])
for t_ix, t in enumerate(results[('iterations', i)])])
ax = fig.add_subplot(212)
plt.plot(results[('iterations', i)],
[np.mean([estimate_marginal_likelihood(results[("train_likelihood", i)][t_ix],
results[("entropy", i)][t])
for i in xrange(N_samples)])
for t_ix, t in enumerate(results[('iterations', 0)])])
plt.savefig("marginal_likelihoods.png")
fig = plt.figure(0); fig.clf()
ax = fig.add_subplot(211)
for i in xrange(N_samples):
plt.plot(results[('iterations', i)],
[results[("tests_likelihood", i)][t] for t in xrange(len(results[('iterations', i)]))],)
ax = fig.add_subplot(212)
plt.plot(results[('iterations', i)],
[np.mean([results[("tests_likelihood", i)][t] for i in xrange(N_samples)])
for t in xrange(len(results[('iterations', 0)]))])
plt.savefig("test_likelihoods.png")
fig = plt.figure(0); fig.clf()
ax = fig.add_subplot(211)
for i in xrange(N_samples):
plt.plot(results[('iterations', i)],
[results[("tests_error", i)][t] for t in xrange(len(results[('iterations', i)]))])
ax = fig.add_subplot(212)
plt.plot(results[('iterations', 0)],
[np.mean([results[("tests_error", i)][t] for i in xrange(N_samples)])
for t in xrange(len(results[('iterations', 0)]))])
plt.savefig("test_errors.png")
if __name__ == '__main__':
results = run()
with open('results.pkl', 'w') as f:
pickle.dump(results, f, 1)
plot()
|
[
"dduvenaud@seas.harvard.edu"
] |
dduvenaud@seas.harvard.edu
|
50e83cdef85a998a42841277e6aeb8552d6c9e4b
|
c7979f4f6435fe8d0d07fff7a430da55e3592aed
|
/ABC035/C.py
|
3896d58a52d5cbdff09b32a7a1e9ace983db5d2b
|
[] |
no_license
|
banboooo044/AtCoder
|
cee87d40bb98abafde19017f4f4e2f984544b9f8
|
7541d521cf0da848ecb5eb10ffea7d75a44cbbb6
|
refs/heads/master
| 2020-04-14T11:35:24.977457
| 2019-09-17T03:20:27
| 2019-09-17T03:20:27
| 163,818,272
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
N,Q = map(int,input().split(" "))
s = 0
for i in range(Q):
l,r = map(int,input().split(" "))
bit = (1 << (N-l+1)) - (1 << (N-r))
s ^= bit
ans = ""
for i in range(N-1,-1,-1):
if (s >> i) & 1:
ans += '1'
else:
ans += '0'
print(ans)
|
[
"touhoucrisis7@gmail.com"
] |
touhoucrisis7@gmail.com
|
af581fa5b0a483c2f126c59fbcaebea7e27c9491
|
102ad7bc61d20d9ea92ed3c4b872c5748adb81c5
|
/AromaAnnounce/models.py
|
ab639f7d3d4ca889ac5140b76e4c989631054207
|
[] |
no_license
|
neofyte/aroma
|
ae3e53943418d202b32f7af814bb10bc6163848c
|
0cea963fc58848fbf36251770959d4177faf11a7
|
refs/heads/master
| 2021-01-22T10:17:50.180961
| 2013-09-10T14:52:27
| 2013-09-10T14:52:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
from django.db import models
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.dispatch import Signal, receiver
from AromaFriend.models import Relationship
from AromaFriend.signals import relationship_created
class AromaEvent(models.Model):
content = models.CharField(max_length=150)
announcer = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='announcer_to_event', db_index=True)
created_date = models.DateTimeField('时间', auto_now_add=True)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
event = generic.GenericForeignKey()
def __str__(self):
return '{0} @ {1}'.format(self.announcer, self.created_date)
@property
def description(self):
return self.event.description
@classmethod
def AromaEvent_post_save(self, sender, instance, *args, **kwargs):
event = AromaEvent(
content = instance.description,
announcer = instance.announcer,
event = instance,
)
event.save()
#relationship_created.connect(AromaEvent_post_save, sender=Relationship)
|
[
"nikegu@gmail.com"
] |
nikegu@gmail.com
|
bd449ec4e27a83d7f61f5a3a0271fb13f1f0645f
|
dd9b64651b7761401e4f8bce1f56a231892a4946
|
/inplace_swap.py
|
e0b0527aa58e84815702c781f2e55a9d1983aa4c
|
[] |
no_license
|
richnakasato/ctci-py
|
793bf33c3e2c5d5f9f597e6574aa32185145a801
|
81e79ef75d8f8f8770b2e13bcd05f2ea3011895b
|
refs/heads/master
| 2020-03-27T08:14:15.918723
| 2019-01-10T02:28:16
| 2019-01-10T02:28:16
| 146,235,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 596
|
py
|
'''
Perform swap inplace using only XOR
Takeaways:
- This works because XOR of a^b^a == b (think about finding missing number
from [1, 2, ..., n+1] in arr of size n)
- So... a^b=c, c^a==b, c^b==a (because c==a^b, c^ a or b cancels out a or b)
'''
import random
def inplace_swap(a, b):
print(a, b)
a = a^b # c (aka a^b)
b = a^b # (a^b)^b, b == a
a = a^b # (a^b)^a, from above, (a == b)
print(a, b)
def main():
lo = 1
hi = 100
a = random.randint(lo, hi)
b = random.randint(lo, hi)
inplace_swap(a, b)
if __name__ == "__main__":
main()
|
[
"richnakasato@hotmail.com"
] |
richnakasato@hotmail.com
|
71ae0fad47ff3c1ef04664f1dff4da8471053cdd
|
36f35ecfc54ca57ed9a5444242286c3b5905b9de
|
/Python/TriangleQuest.py
|
4cb887412f3fe43efe308f080335bdd7a83d4fb8
|
[
"MIT"
] |
permissive
|
anujitm2007/Hackerrank-Codes
|
7e447e8ba0e76c3ef0fb9da0fc73b3feefc11d3e
|
3afe9d1ec1c3563916a32815e5133f4cbb5234dd
|
refs/heads/master
| 2023-01-04T06:18:10.605095
| 2020-10-28T14:37:01
| 2020-10-28T14:37:01
| 302,840,781
| 0
| 0
|
MIT
| 2020-10-28T14:37:03
| 2020-10-10T07:19:44
| null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
'''
You are given a positive integer . Print a numerical triangle of height like the one below:
1
22
333
4444
55555
......
Can you do it using only arithmetic operations, a single for loop and print statement?
Use no more than two lines. The first line (the for statement) is already written for you. You have to complete the print statement.
Note: Using anything related to strings will give a score of .
Input Format
A single line containing integer, .
Constraints
Output Format
Print lines as explained above.
Sample Input
5
Sample Output
1
22
333
4444
'''
for i in range(1,int(input())):
print((10**(i)//9)*i)
|
[
"noreply@github.com"
] |
anujitm2007.noreply@github.com
|
495f547d5bada6337f2891daadb8866a8e134d79
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/KQ5H9aFBZDKEJuP6C_21.py
|
fd6ff8f60e7ca560d02d17a9ef74b3d81df4ccad
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 45
|
py
|
import re
pattern = "(?<!good )cookie"
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
e525b56f6061196df2ad2a70f0aee00b6682f319
|
e4288c575d10ad31bb2b7b8217bec2f7fda5a0fd
|
/simbert/models/bert/BertForRanking.py
|
571f6642c093d94f97c9dd203f8d977ef42f9241
|
[] |
no_license
|
serafima-ai/SimBert
|
3f2beb92ced1f6a71933d2435b3ecc7b07668624
|
fada20ceee9ca9559a5f8cee59d631038d197157
|
refs/heads/master
| 2021-03-24T21:44:49.741371
| 2020-05-07T21:57:29
| 2020-05-07T21:57:29
| 247,567,129
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,944
|
py
|
import pytorch_lightning as pl
from dotmap import DotMap
from torch import nn
from torch.utils.data import DataLoader
from simbert.models.lightning import SimbertLightningModule
from simbert.models.model import Model
from transformers import *
import torch
from simbert.datasets.processor import DataProcessor
from simbert.optimizers.optimizer import Optimizer
class BertForRanking(SimbertLightningModule, Model):
def __init__(self, configs: DotMap = DotMap(), *args, **kwargs):
pl.LightningModule.__init__(self, *args, **kwargs)
Model.__init__(self, configs)
self.bert = None
self.num_classes = 0
self.classifier = None
self.DataProcessor = None
self.apply_configs(self.configs)
self.sigmoid = nn.Sigmoid()
if configs is not None:
self.DataProcessor = self.data_processor()
def __bert_model(self):
if self.bert is not None and self.configs.get('bert_model') is None:
return self.bert
return BertModel.from_pretrained(self.configs.get('bert_model', 'bert-base-multilingual-cased'))
def __calculate_classes(self):
if self.num_classes != 0 and self.configs.dataset.processor.features.get('labels') is None:
return self.num_classes
return len(self.configs.dataset.processor.features.labels)
def __classifier(self, num_classes=2):
num_classes = self.configs.get(num_classes, num_classes)
return nn.Linear(self.bert.config.hidden_size, num_classes)
def data_processor(self):
if self.DataProcessor is not None and self.configs.dataset.get(
'processor') is None or self.configs.dataset.processor.get('data_processor_name') is None:
return self.DataProcessor
return DataProcessor().get(self.configs.dataset.processor.data_processor_name)(
self.configs.dataset.processor)
def new_tokenizer(self):
if self.tokenizer is not None and self.configs.get('tokenizer') is None:
return self.tokenizer
return BertTokenizer.from_pretrained(
self.configs.get('tokenizer', 'bert-base-multilingual-cased'))
def apply_configs(self, configs: DotMap):
Model.apply_configs(self, configs)
self.bert = self.__bert_model()
self.num_classes = self.__calculate_classes()
self.classifier = self.__classifier()
def predict(self, inputs):
examples = []
results = []
for sample in inputs:
query, paragraph = sample
examples.append(InputExample(text_a=query, text_b=paragraph, label=0, guid='prediction'))
features = self.DataProcessor.FeaturesProcessor.convert_examples_to_features(examples,
tokenizer=self.tokenizer)
tokenized = self.DataProcessor.create_tensor_dataset(features)
bert_test_dataloader = DataLoader(tokenized)
for batch in bert_test_dataloader:
input_ids, attention_mask, token_type_ids, label = batch
results.append(
self.forward(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)[0][
0].tolist())
return results
def forward(self, input_ids, attention_mask, token_type_ids):
outputs = self.bert(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids)
pooler_output, attn = outputs[1], outputs[-1]
logits = self.classifier(pooler_output)
sigmoids = self.sigmoid(logits)
return sigmoids, attn
def training_step(self, batch, batch_nb):
# batch
input_ids, attention_mask, token_type_ids, label = batch
# fwd
y_hat, attn = self.forward(input_ids, attention_mask, token_type_ids)
y = torch.zeros(label.shape[0], 2, device='cuda')
y[range(y.shape[0]), label] = 1
# loss
# loss = F.binary_cross_entropy_with_logits(y_hat, y)
loss = self.loss_func(y_hat, label)
# logs
tensorboard_logs = {'train_loss': loss}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_nb):
# batch
input_ids, attention_mask, token_type_ids, label = batch
# fwd
y_hat, attn = self.forward(input_ids, attention_mask, token_type_ids)
y = torch.zeros(label.shape[0], 2, device='cuda')
y[range(y.shape[0]), label] = 1
# print(y_hat,'label',label,'new',y)
# loss
# loss = F.binary_cross_entropy_with_logits(y_hat, y)
# print(loss)
loss = self.loss_func(y_hat, label)
# acc
a, y_hat = torch.max(y_hat, dim=1)
return {**{'val_loss': loss}, **self.calculate_metrics(label.cpu(), y_hat.cpu(), stage='val',
apply=lambda x: torch.tensor(x, dtype=torch.float64))}
def validation_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
avg_metrics = {}
for _, metric in self.metrics.items():
key_name = 'val_' + metric.get_metric_name()
avg_metrics.update({'avg_' + key_name: torch.stack([x[key_name] for x in outputs]).mean()})
tensorboard_logs = {**{'val_loss': avg_loss}, **avg_metrics}
return {'avg_val_loss': avg_loss, 'progress_bar': tensorboard_logs}
def test_step(self, batch, batch_nb):
input_ids, attention_mask, token_type_ids, label = batch
y_hat, attn = self.forward(input_ids, attention_mask, token_type_ids)
a, y_hat = torch.max(y_hat, dim=1)
return self.calculate_metrics(label.cpu(), y_hat.cpu(), stage='test',
apply=lambda x: torch.tensor(x, dtype=torch.float64))
def test_end(self, outputs):
avg_metrics = {}
for _, metric in self.metrics.items():
key_name = 'test_' + metric.get_metric_name()
avg_metrics.update({'avg_' + key_name: torch.stack([x[key_name] for x in outputs]).mean()})
tensorboard_logs = avg_metrics
self.test_results = avg_metrics
return {**avg_metrics, **{'log': tensorboard_logs, 'progress_bar': tensorboard_logs}}
def configure_optimizers(self):
return Optimizer().get(self.configs.optimizer.optimizer_name)(self.configs.optimizer).optimizer(
[p for p in self.parameters() if p.requires_grad])
@pl.data_loader
def train_dataloader(self):
return self.train_dataset
@pl.data_loader
def val_dataloader(self):
return self.val_dataset
@pl.data_loader
def test_dataloader(self):
return self.test_dataset
def train_model(self):
pass
def evaluate_model(self):
pass
|
[
"nik@serafima.ai"
] |
nik@serafima.ai
|
281977c6331e0b79883b8bad6d90ca9f317bcded
|
b91a14f051de65ab315d43f4f2c3baadb72de0bb
|
/venv/Desktop/python2021Projects/bin/flask
|
f75d81f5f46ba528503205bd4f916080d99c6b5b
|
[] |
no_license
|
tnak1126/day70-blog
|
807e298c41c934091e49ec8d840355c48d284b5b
|
0f5fc3bb12a8bb96039ad119226fd806503e886d
|
refs/heads/master
| 2023-07-04T00:00:29.916463
| 2021-08-16T19:33:54
| 2021-08-16T19:33:54
| 396,068,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
#!/Volumes/10T_082020/Python100_May2021_projects/day64_movielist/venv/Desktop/python2021Projects/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"terrynakamura@gmail.com"
] |
terrynakamura@gmail.com
|
|
05a3ebcb2a64f2590133f058fc593b972c008a1e
|
1e035ce830ea1b1671efab5c890b899e58f1cace
|
/addbook.py
|
d6205cbdc02f9a71d8b5efd2f2b9b7e4b92d76b5
|
[] |
no_license
|
Shantanu03-ux/Library-Management-System
|
39bb74d8b8371f9c4b957291af1847678852917f
|
21058c53f25a7e895d64aa12ccaa7ad79f666565
|
refs/heads/main
| 2023-08-30T04:14:32.983785
| 2021-10-07T10:23:03
| 2021-10-07T10:23:03
| 414,553,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,912
|
py
|
from tkinter import *
from tkinter import messagebox
import sqlite3
con = sqlite3.connect('library.db')
cur = con.cursor()
class AddBook(Toplevel):
def __init__(self):
Toplevel.__init__(self)
self.geometry("1300x1300")
self.title("Add Book")
self.resizable(False,False)
#Frames
#Top Frame
self.topFrame=Frame(self,height=150,bg='yellow')
self.topFrame.pack(fill=X)
#Bottom Frame
self.bottomFrame=Frame(self,height=600,bg='#fcc324')
self.bottomFrame.pack(fill=X)
#name
self.lbl_name=Label(self.bottomFrame,text=' Name : ',font='arial 15 bold',fg='white',bg='#fcc324')
self.lbl_name.place(x=40,y=40)
self.ent_name=Entry(self.bottomFrame,width=30,bd=4)
self.ent_name.insert(0,'Please enter a book name')
self.ent_name.place(x=150,y=45)
#author
self.lbl_author = Label(self.bottomFrame, text=' Author : ', font='arial 15 bold', fg='white', bg='#fcc324')
self.lbl_author.place(x=40, y=80)
self.ent_author = Entry(self.bottomFrame, width=30, bd=4)
self.ent_author.insert(0, 'Please enter author name')
self.ent_author.place(x=150, y=85)
#page
self.lbl_page = Label(self.bottomFrame, text=' Pages : ', font='arial 15 bold', fg='white', bg='#fcc324')
self.lbl_page.place(x=40, y=120)
self.ent_page = Entry(self.bottomFrame, width=30, bd=4)
self.ent_page.insert(0, 'Please enter page size')
self.ent_page.place(x=150, y=125)
#language
self.lbl_language = Label(self.bottomFrame, text='Language :', font='arial 15 bold', fg='white', bg='#fcc324')
self.lbl_language.place(x=40, y=160)
self.ent_language = Entry(self.bottomFrame, width=30, bd=4)
self.ent_language.insert(0, 'Please enter a language')
self.ent_language.place(x=150, y=165)
#Button
button=Button(self.bottomFrame,text='Add Book',command=self.addBook)
button.place(x=270,y=200)
def addBook(self):
name = self.ent_name.get()
author = self.ent_author.get()
page = self.ent_page.get()
language = self.ent_language.get()
if name and author and page and language != "":
try:
query = "INSERT INTO 'books' (book_name,book_author,book_page,book_language) VALUES(?,?,?,?)"
cur.execute(query,(name,author,page,language))
con.commit()
messagebox.showinfo("Success","Successfully added to database",icon='info')
except:
messagebox.showerror("Error","Cant add to database",icon='warning')
else:
messagebox.showerror("Error","Fields cant be empty",icon='warning')
def clCON(self,b):
con.close()
|
[
"noreply@github.com"
] |
Shantanu03-ux.noreply@github.com
|
4bab841b23056527667cd539e0600260a63727ec
|
a71c0634fda6161571fcff26855eb324983939ee
|
/account/migrations/0014_auto_20191024_1636.py
|
89d2c44019e502a7594b6ff379a07dbea2942700
|
[] |
no_license
|
Jincykk1996/mydjang
|
19b7acab68f50aee83d6cf604739d087f766d92c
|
877bc84a66a835bc15a49f8dd0fa51f4b52bdb64
|
refs/heads/master
| 2020-08-30T19:41:31.662945
| 2019-10-30T08:18:23
| 2019-10-30T08:18:23
| 218,471,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
# Generated by Django 2.2.4 on 2019-10-24 11:06
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0013_auto_20191024_1632'),
]
operations = [
migrations.AlterField(
model_name='cart',
name='order_date',
field=models.DateField(default=datetime.datetime(2019, 10, 24, 16, 36, 2, 416952)),
),
migrations.AlterField(
model_name='coupon',
name='codes',
field=models.CharField(blank=True, max_length=10, null=True),
),
]
|
[
"jincykalarikunumal@gmail.com"
] |
jincykalarikunumal@gmail.com
|
1338b206aae85f69c1fb457f6bfa740573d2ff60
|
a353a4b535edce2f12b125205d7ffc1964d26cda
|
/app.py
|
a901f23e78a634331e655a2c9ecb6e881e3bfe81
|
[] |
no_license
|
asu1610/lineBot2
|
bee992114dfc5f113e8f25657d700bd39442eb37
|
cfdb8898b049370ef05fd284c85a0a8f047cb711
|
refs/heads/master
| 2020-06-18T01:08:52.620122
| 2019-10-10T04:45:12
| 2019-10-10T04:45:12
| 196,117,839
| 0
| 0
| null | 2019-07-10T02:41:26
| 2019-07-10T02:41:26
| null |
UTF-8
|
Python
| false
| false
| 1,537
|
py
|
from flask import Flask, jsonify, request
import os
import json
import requests
app = Flask(__name__)
@app.route('/')
def index():
a=os.environ['Authorization']
return "นางสาวคณิศร วงษ์สุวรรณ์ เลขที่ 7 ชั้น ม.4/1"
@app.route("/webhook", methods=['POST'])
def webhook():
if request.method == 'POST':
return "OK"
@app.route('/callback', methods=['POST'])
def callback():
json_line = request.get_json()
json_line = json.dumps(json_line)
decoded = json.loads(json_line)
user = decoded['originalDetectIntentRequest']['payload']['data']['replyToken']
userText = decoded['queryResult']['intent']['displayName']
#sendText(user,userText)
if (userText == 'สวัสดี') :
sendText(user,'ดีจ้ะ')
elif (userText == 'ไปแหละ') :
sendText(user,'เอ่อ! ไปเหอะ')
else :
sendText(user,'หมายควายว่าไง')
return '',200
def sendText(user, text):
LINE_API = 'https://api.line.me/v2/bot/message/reply'
headers = {
'Content-Type': 'application/json; charset=UTF-8',
'Authorization': os.environ['Authorization'] # ตั้ง Config vars ใน heroku พร้อมค่า Access token
}
data = json.dumps({
"replyToken":user,
"messages":[{"type":"text","text":text}]
})
r = requests.post(LINE_API, headers=headers, data=data) # ส่งข้อมูล
if __name__ == '__main__':
app.run()
|
[
"noreply@github.com"
] |
asu1610.noreply@github.com
|
792683deda5f938d090c31bf671fec709f1b6178
|
04ca6a06be3861bfb8d4bc0e839e8b78e7d76eb7
|
/bot/handlers/__init__.py
|
7103b81d1ce26b26c87090619ddd37ea645a256b
|
[
"MIT"
] |
permissive
|
rashidovich2/Russian-Qiwi-Bot
|
70cb9f0f4796abafd96ca7a56666b011aad5539d
|
d5b0f23516343205ca7bad15b2d2fae7b675f584
|
refs/heads/main
| 2023-08-25T02:58:17.796592
| 2021-10-20T19:12:27
| 2021-10-20T19:12:27
| 576,804,780
| 0
| 0
|
MIT
| 2022-12-11T03:01:25
| 2022-12-11T03:01:24
| null |
UTF-8
|
Python
| false
| false
| 21
|
py
|
from . import users
|
[
"noreply@github.com"
] |
rashidovich2.noreply@github.com
|
8983bc7b01719b00420a52a9a90148f67588ba26
|
a17eeaedb059b11dbaaf7dd2dedf31bc5251008b
|
/Python/function/sorted.py
|
379eb7a72f83a7314fd57a0842144a24f2229f48
|
[
"MIT"
] |
permissive
|
KrisCheng/HackerPractice
|
b94c575bf136c06b4ea563147ac94cae74947d22
|
778948db4836e85c9af90267dc9b03d50f29d8e9
|
refs/heads/master
| 2021-01-19T20:24:35.466720
| 2017-10-09T05:47:11
| 2017-10-09T05:47:11
| 88,504,300
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
# 排序算法
a = sorted([1,3,23,-5,-344,23])
print(a)
L = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]
def by_name(t):
return t[0]
L2 = sorted(L, key=by_name)
print(L2)
def by_score(t):
return t[1]
L3 = sorted(L, key=by_score)
print(L3)
|
[
"743628145@qq.com"
] |
743628145@qq.com
|
431300dc169839d8a8781e7751dddbc9007a6f2d
|
be1c3e70f12124f6ce402aed54cdbd9f41e052ca
|
/Day 2/Q3.py
|
655174032af027b752bf2a0a71632f35a9d6b66d
|
[] |
no_license
|
vapoorva/LetsUpgradeDataScience
|
de0858854950673021ffeeca0f70df0bdc430110
|
b2962f69c68f9dd00a46531f62302c125870739a
|
refs/heads/master
| 2023-01-13T14:28:55.998656
| 2020-11-20T07:21:11
| 2020-11-20T07:21:11
| 312,668,976
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 68
|
py
|
n= int(input())
d={}
for i in range(1,n+1):
d[i] = i*i
print(d)
|
[
"apoorva200299@gmail.com"
] |
apoorva200299@gmail.com
|
c697c259d8e52b844afbe7f1737ba3b929bdf1ab
|
a7943c40d294e6088408967303b4af4867fc8bae
|
/analyse/migrations/0001_initial.py
|
8fb4e0cb931113f000243727df25beb9b7481723
|
[] |
no_license
|
mudassir2700/Sentometer-Aspect-based-Sentiment-Analysis-
|
ce2a0d22eca9aefda862b2be352d8ade3aa359fc
|
ade4d70b33a342683eb26a14504b9e4fa385cb86
|
refs/heads/master
| 2021-02-23T21:37:29.627121
| 2020-03-06T12:52:03
| 2020-03-06T12:52:03
| 245,411,981
| 0
| 0
| null | 2020-03-06T12:22:25
| 2020-03-06T12:18:02
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,553
|
py
|
# Generated by Django 2.1.7 on 2019-04-13 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Features',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('camera_pos', models.CharField(blank=True, max_length=100)),
('camera_neg', models.CharField(blank=True, max_length=100)),
('battery_pos', models.CharField(blank=True, max_length=100)),
('battery_neg', models.CharField(blank=True, max_length=100)),
('performance_pos', models.CharField(blank=True, max_length=100)),
('performance_neg', models.CharField(blank=True, max_length=100)),
('storage_pos', models.CharField(blank=True, max_length=100)),
('storage_neg', models.CharField(blank=True, max_length=100)),
('budget_pos', models.CharField(blank=True, max_length=100)),
('url', models.TextField(blank=True)),
('name', models.CharField(blank=True, max_length=500)),
('price', models.CharField(blank=True, max_length=100)),
('img_url', models.CharField(blank=True, max_length=1000)),
('rating', models.CharField(blank=True, max_length=100)),
('details', models.TextField(blank=True)),
],
),
]
|
[
"mudassirali2700@gmail.com"
] |
mudassirali2700@gmail.com
|
9759212dbdc123e208cb9b58fcb04caf51495317
|
295f068e817882e14a8fdcd765a6582c1b35e506
|
/tests/mock_google/case.py
|
f15a4119efdb3051132c823feaf77e7659711467
|
[
"Apache-2.0"
] |
permissive
|
shafaypro/mrjob
|
57a40dd072f60c4a5cda6e14d72ac74ec0025a83
|
2a7d1c3c7917efed0118ebffd52865c4a50298f4
|
refs/heads/master
| 2020-05-15T13:56:27.752415
| 2019-04-16T01:05:55
| 2019-04-16T01:05:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,243
|
py
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Limited mock of google-cloud-sdk for tests
"""
from io import BytesIO
from google.cloud.logging.entries import StructEntry
from google.cloud.logging.resource import Resource
from google.oauth2.credentials import Credentials
from mrjob.fs.gcs import parse_gcs_uri
from .dataproc import MockGoogleDataprocClusterClient
from .dataproc import MockGoogleDataprocJobClient
from .logging import MockGoogleLoggingClient
from .storage import MockGoogleStorageClient
from tests.mr_two_step_job import MRTwoStepJob
from tests.py2 import Mock
from tests.py2 import patch
from tests.sandbox import SandboxedTestCase
_TEST_PROJECT = 'test-mrjob:test-project'
class MockGoogleTestCase(SandboxedTestCase):
def setUp(self):
super(MockGoogleTestCase, self).setUp()
# maps (project_id, region, cluster_name) to a
# google.cloud.dataproc_v1beta2.types.Cluster
self.mock_clusters = {}
# maps (project_id, region, job_name) to a
# google.cloud.dataproc_v1beta2.types.Job
self.mock_jobs = {}
# set this to False to make jobs ERROR
self.mock_jobs_succeed = True
# a list of StructEntry objects for mock logging client to return
self.mock_log_entries = []
# mock OAuth token, returned by mock google.auth.default()
self.mock_token = 'mock_token'
# mock project ID, returned by mock google.auth.default()
self.mock_project_id = 'mock-project-12345'
# Maps bucket name to a dictionary with the keys
# *blobs* and *location*. *blobs* maps object name to
# a dictionary with the key *data*, which is
# a bytestring.
self.mock_gcs_fs = {}
self.start(patch('google.api_core.grpc_helpers.create_channel',
self.create_channel))
self.start(patch('google.auth.default', self.auth_default))
self.start(patch(
'google.cloud.dataproc_v1beta2.ClusterControllerClient',
self.cluster_client))
self.start(patch('google.cloud.dataproc_v1beta2.JobControllerClient',
self.job_client))
self.start(patch('google.cloud.logging.Client',
self.logging_client))
self.start(patch('google.cloud.storage.client.Client',
self.storage_client))
self.start(patch('time.sleep'))
def auth_default(self, scopes=None):
credentials = Credentials(self.mock_token, scopes=scopes)
return (credentials, self.mock_project_id)
def create_channel(self, target, credentials=None):
channel = Mock()
channel._channel = Mock()
channel._channel.target = Mock(return_value=target)
return channel
def cluster_client(self, channel=None, credentials=None):
return MockGoogleDataprocClusterClient(
channel=channel,
credentials=credentials,
mock_clusters=self.mock_clusters,
mock_gcs_fs=self.mock_gcs_fs,
mock_jobs=self.mock_jobs,
mock_jobs_succeed=self.mock_jobs_succeed,
)
def job_client(self, channel=None, credentials=None):
return MockGoogleDataprocJobClient(
channel=channel,
credentials=credentials,
mock_clusters=self.mock_clusters,
mock_gcs_fs=self.mock_gcs_fs,
mock_jobs=self.mock_jobs,
mock_jobs_succeed=self.mock_jobs_succeed,
)
def logging_client(self, project=None, credentials=None):
return MockGoogleLoggingClient(
credentials=credentials,
mock_log_entries=self.mock_log_entries,
project=project,
)
def storage_client(self, project=None, credentials=None):
return MockGoogleStorageClient(mock_gcs_fs=self.mock_gcs_fs)
def add_mock_log_entry(
self, payload, logger, insert_id=None, timestamp=None,
labels=None, severity=None, http_request=None, resource=None):
if isinstance(resource, dict):
resource = Resource(**resource)
entry = StructEntry(
http_request=http_request,
insert_id=insert_id,
labels=labels,
logger=logger,
payload=payload,
resource=resource,
severity=severity,
timestamp=timestamp,
)
self.mock_log_entries.append(entry)
def make_runner(self, *args):
"""create a dummy job, and call make_runner() on it.
Use this in a with block:
with self.make_runner() as runner:
...
"""
stdin = BytesIO(b'foo\nbar\n')
mr_job = MRTwoStepJob(['-r', 'dataproc'] + list(args))
mr_job.sandbox(stdin=stdin)
return mr_job.make_runner()
def put_gcs_multi(self, gcs_uri_to_data_map):
client = self.storage_client()
for uri, data in gcs_uri_to_data_map.items():
bucket_name, blob_name = parse_gcs_uri(uri)
bucket = client.bucket(bucket_name)
if not bucket.exists():
bucket.create()
blob = bucket.blob(blob_name)
blob.upload_from_string(data)
def put_job_output_parts(self, dataproc_runner, raw_parts):
"""Generate fake output on GCS for the given Dataproc runner."""
assert type(raw_parts) is list
base_uri = dataproc_runner.get_output_dir()
gcs_multi_dict = dict()
for part_num, part_data in enumerate(raw_parts):
gcs_uri = base_uri + 'part-%05d' % part_num
gcs_multi_dict[gcs_uri] = part_data
self.put_gcs_multi(gcs_multi_dict)
|
[
"dave@davemarin.com"
] |
dave@davemarin.com
|
f8adb41d307eafa204c095eb025eed155b7ac3e0
|
0bf6356518fc0b7ae503a5be8b8852ada6395353
|
/video_8/p1.py
|
9a71f0aad0a2652e4a0545e309c813abaeae6fb3
|
[] |
no_license
|
thatfellarobin/w2021-ta-manim
|
13fdc511aa86582f499a6da343f61540e7f8bc12
|
93f57222b5381886d60daf8c3227ef47f611ad5d
|
refs/heads/main
| 2023-04-07T18:06:43.026901
| 2021-04-12T15:16:00
| 2021-04-12T15:16:00
| 331,493,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,566
|
py
|
from manim import *
import numpy as np
GOLD_DARK = '#5c4326'
EVERGREEN = '#077319'
GREEN_DARK = '#2b4022'
BLUE_DARK = '#26545e'
BROWN = '#8f4a04'
MED_DARK_GREY = '#666666'
BLUE_E_DARK = '#0c343d'
DIM_A=1.25
class T8P1(Scene):
def construct(self):
attribution = Tex('Robin Liu, 2021', color=MED_DARK_GREY).scale(0.4).to_corner(DOWN+RIGHT, buff=0.2)
self.add(attribution)
#region Diagram objects
disk = Circle(
radius=DIM_A,
stroke_color=BLUE_E,
fill_color=BLUE_E_DARK,
stroke_width=10,
fill_opacity=1
)
disk_center = Dot(
point=disk.get_center(),
radius=0.1,
color=BLUE_E
)
disk_group = Group(disk, disk_center)
pin = Dot(
point=disk.get_edge_center(LEFT),
color=LIGHT_GRAY
)
rod = Line(
start=disk.get_edge_center(LEFT),
end=disk.get_edge_center(LEFT)+(DIM_A*2)*np.array([-np.cos(PI/6), -np.sin(PI/6), 0]),
color=RED_E,
stroke_width=15
)
ground = Rectangle(
width=7,
height=0.3,
color=GREY,
stroke_opacity=0,
fill_opacity=1
).next_to(DIM_A*(LEFT+DOWN), DOWN, buff=0)
#endregion
diagram = Group(disk_group, rod, pin, ground).move_to(ORIGIN)
self.add(diagram)
self.wait()
#region annotate region
radius_arrow = DoubleArrow(
start=disk.get_center(),
end=disk.get_center()+DIM_A*np.array([np.cos(PI/4), np.sin(PI/4), 0]),
color=YELLOW,
buff=0.0,
stroke_width=5,
tip_length=0.15,
max_stroke_width_to_length_ratio=999,
max_tip_length_to_length_ratio=1
)
radius_annot = MathTex('a', color=YELLOW).scale(0.7).next_to(radius_arrow, UP+RIGHT, buff=0.15)
rodlength_arrow = DoubleArrow(
start=rod.get_start(),
end=rod.get_end(),
color=YELLOW,
buff=0.0,
stroke_width=5,
tip_length=0.15,
max_stroke_width_to_length_ratio=999,
max_tip_length_to_length_ratio=1
).shift(0.25*rod.copy().rotate(-PI/2).get_unit_vector())
rodlength_annot = MathTex(
'2a',
color=YELLOW
).scale(0.7).next_to(rodlength_arrow.get_center(), rodlength_arrow.copy().rotate(-PI/2).get_unit_vector(), buff=0.15)
self.play(
Write(radius_arrow),
Write(radius_annot),
Write(rodlength_arrow),
Write(rodlength_annot)
)
self.wait()
#endregion
diagram = Group(
diagram,
radius_arrow,
radius_annot,
rodlength_arrow,
rodlength_annot
)
#region Cleanup and show coordinate system
diagram_newpos = diagram.copy().scale(0.6).to_corner(DOWN+RIGHT, buff=0.5)
rod_copy = rod.copy().set_opacity(0)
rod_newpos = rod.copy().scale(0.75).to_corner(UP+RIGHT, buff=1.25)
disk_copy = disk_group.copy()
for item in disk_copy:
item.set_opacity(0)
disk_newpos = disk_group.copy().scale(0.75).to_edge(RIGHT, buff=0.5)
self.play(
Transform(rod_copy, rod_newpos),
Transform(disk_copy, disk_newpos),
Transform(diagram, diagram_newpos)
)
self.wait()
# Create coordinate system
i_arrow = Arrow(
start=ORIGIN,
end=RIGHT,
color=YELLOW,
buff=0.0,
stroke_width=5,
tip_length=0.2,
max_stroke_width_to_length_ratio=999,
max_tip_length_to_length_ratio=1
)
i_label = MathTex('\\hat{i}', color=YELLOW).scale(0.7).next_to(i_arrow, RIGHT, buff=0.15)
j_arrow = Arrow(
start=ORIGIN,
end=UP,
color=YELLOW,
buff=0.0,
stroke_width=5,
tip_length=0.2,
max_stroke_width_to_length_ratio=999,
max_tip_length_to_length_ratio=1
)
j_label = MathTex('\\hat{j}', color=YELLOW).scale(0.7).next_to(j_arrow, UP, buff=0.15)
k_dot = Dot(
point=i_arrow.get_start(),
color=YELLOW
)
k_circle = Circle(
arc_center=k_dot.get_center(),
radius=0.15,
color=YELLOW
)
k_label = MathTex('\\hat{k}', color=YELLOW).scale(0.7).next_to(k_circle, LEFT, buff=0.15)
coordsys = Group(i_arrow, j_arrow, i_label, j_label, k_dot, k_circle, k_label).scale(0.75).next_to(diagram, LEFT, buff=0.5, aligned_edge=DOWN)
self.play(
Write(i_arrow),
Write(j_arrow),
Write(i_label),
Write(j_label),
Write(k_dot),
Write(k_circle),
Write(k_label)
)
self.wait()
#endregion
#region Explain pure rolling
fixed_point = Dot(
point=disk_copy.get_edge_center(DOWN),
color=YELLOW
)
fixed_point_annot = MathTex('R', color=YELLOW).scale(0.6).next_to(fixed_point, DOWN, buff=0.15)
self.play(FadeIn(fixed_point))
for _ in range(2):
self.play(Flash(fixed_point))
self.play(Write(fixed_point_annot))
self.wait()
#endregion
#region Velocity
# Velocity of A
r_AR_arrow = Arrow(
start=fixed_point.get_center(),
end=disk_copy.get_edge_center(LEFT),
color=GREEN,
buff=0.0,
stroke_width=5,
tip_length=0.15,
max_stroke_width_to_length_ratio=999,
max_tip_length_to_length_ratio=1
)
r_AR_label = MathTex('r_{A/R}', color=GREEN).scale(0.6).next_to(r_AR_arrow.get_center(), UP+RIGHT, buff=0.075)
self.play(
FadeIn(r_AR_arrow),
Write(r_AR_label)
)
self.wait()
eq_a = MathTex(
'\\vec{v}_A = \\vec{v}_R + \\vec{v}_{A/R}'
).scale(0.55).to_corner(UP+LEFT, buff=0.5)
eq_a_sub = MathTex(
'\\vec{v}_A',
'=',
'0 + \\vec{\\omega}_{disk} \\times \\vec{r}_{A/R}',
'=',
'\\omega\\hat{k} \\times (-a\\hat{i} + a\\hat{j})',
'\\Rightarrow',
'\\vec{v}_A = -a\\omega\\hat{i} - a\\omega\\hat{j}',
).scale(0.55).next_to(eq_a, DOWN, buff=0.2, aligned_edge=LEFT)
eq_a_sub[3:].next_to(eq_a_sub[1:3], DOWN, aligned_edge=LEFT, buff=0.15)
eq_a_sub[5:].next_to(eq_a_sub[3:5], DOWN, aligned_edge=LEFT, buff=0.15)
self.play(Write(eq_a))
self.wait()
self.play(Write(eq_a_sub[:3]))
self.wait(0.5)
self.play(Write(eq_a_sub[3:5]))
self.wait(0.5)
self.play(Write(eq_a_sub[5:]))
self.wait()
self.play(
FadeOut(eq_a),
FadeOut(eq_a_sub[:-1]),
Transform(eq_a_sub[-1], eq_a_sub[-1].copy().to_corner(UP+LEFT, buff=0.5))
)
self.wait()
# velocity of B
# Label assumptions
assume_text = Tex('Purple:', ' assumed direction').scale(0.6).to_corner(UP+RIGHT)
assume_text[0].set_color(PURPLE)
v_b_arrow = Arrow(
start=rod_copy.get_end(),
end=rod_copy.get_end()+RIGHT,
color=PURPLE,
buff=0.0,
stroke_width=5,
tip_length=0.15,
max_stroke_width_to_length_ratio=999,
max_tip_length_to_length_ratio=1
)
v_b_label = MathTex('\\vec{v}_B', color=PURPLE).scale(0.6).next_to(v_b_arrow, RIGHT, buff=0.15)
omega_ab_arrow = Arc(
arc_center=rod_copy.get_center(),
radius=0.2,
start_angle=PI,
angle=1.5*PI,
color=PURPLE
).add_tip(tip_length=0.15)
omega_ab_arrow.move_arc_center_to(rod_copy.get_center())
omega_ab_annot = MathTex('\\omega_{AB}', color=PURPLE).scale(0.6).next_to(omega_ab_arrow, UP, buff=0.15)
self.play(
Write(assume_text),
Write(v_b_arrow),
Write(v_b_label),
Write(omega_ab_arrow),
Write(omega_ab_annot)
)
self.wait()
eq_b = MathTex(
'\\vec{v}_B = \\vec{v}_A + \\vec{v}_{B/A}'
).scale(0.55).next_to(eq_a_sub[-1], DOWN, aligned_edge=LEFT)
eq_b_sub = MathTex(
'|\\vec{v}_B|\\hat{i}',
'=',
'-a\\omega\\hat{i}-a\\omega\\hat{j} + \\vec{\\omega}_{AB}\\times\\vec{r}_{B/A}',
'=',
'-a\\omega\\hat{i}-a\\omega\\hat{j} + |\\vec{\\omega}_{AB}|\\hat{k} \\times (-2a\\cos(30^\\circ)\\hat{i} -a\\hat{j})',
'=',
'-a\\omega\\hat{i}-a\\omega\\hat{j} + a|\\vec{\\omega}_{AB}|\\hat{i}-2a\\cos(30^\\circ)|\\vec{\\omega}_{AB}|\\hat{j}',
).scale(0.55).next_to(eq_b, DOWN, buff=0.2, aligned_edge=LEFT)
eq_b_sub[3:].next_to(eq_b_sub[1:3], DOWN, aligned_edge=LEFT, buff=0.15)
eq_b_sub[5:].next_to(eq_b_sub[3:5], DOWN, aligned_edge=LEFT, buff=0.15)
eq_b_dir_i = MathTex(
'\\hat{i}:',
'|\\vec{v}_B|',
'=',
'-a\\omega+a|\\vec{\\omega}_{AB}|'
).scale(0.55).next_to(eq_b_sub, DOWN, aligned_edge=LEFT, buff=0.2).shift(0.5*RIGHT)
eq_b_dir_i[0].set_color(YELLOW)
eq_b_dir_j = MathTex(
'\\hat{j}:',
'0',
'=',
'-a\\omega-2a\\cos(30^\\circ)|\\vec{\\omega}_{AB}|',
).scale(0.55).next_to(eq_b_dir_i, DOWN, aligned_edge=LEFT, buff=0.2)
eq_b_dir_j[0].set_color(YELLOW)
omega_ab_ans = MathTex(
'|\\vec{\\omega}_{AB}| = \\frac{-\\omega}{2\\cos(30^\\circ)}',
'\\Rightarrow',
'\\vec{\\omega}_{AB} = -0.577\\omega\\hat{k}'
).scale(0.55).next_to(eq_b_dir_j, DOWN, aligned_edge=LEFT)
v_b_ans = MathTex(
'|\\vec{v}_B| = -a\\omega \\left(1 + \\frac{1}{2\\cos(30^\\circ)}\\right)',
'\\Rightarrow',
'\\vec{v}_B = -1.58a\\omega\\hat{i}'
).scale(0.55).next_to(omega_ab_ans, DOWN, aligned_edge=LEFT)
ansbox1 = SurroundingRectangle(v_b_ans[2])
ansgroup1 = Group(v_b_ans[2], ansbox1)
omega_ab_ans_newpos = omega_ab_ans[-1].copy().next_to(eq_a_sub[-1], DOWN, aligned_edge=LEFT)
ansgroup1_newpos = ansgroup1.copy().next_to(omega_ab_ans_newpos, DOWN, aligned_edge=LEFT)
self.play(Write(eq_b))
self.wait()
self.play(Write(eq_b_sub[:3]))
self.wait(0.5)
self.play(Write(eq_b_sub[3:5]))
self.wait(0.5)
self.play(Write(eq_b_sub[5:]))
self.wait(0.5)
self.play(
Write(eq_b_dir_i),
Write(eq_b_dir_j)
)
self.wait(0.5)
self.play(
Write(v_b_ans[0]),
Write(omega_ab_ans[0])
)
self.wait(0.5)
self.play(
Write(v_b_ans[1:]),
Write(omega_ab_ans[1:])
)
self.play(ShowCreation(ansbox1))
self.wait()
self.play(
FadeOut(eq_b),
FadeOut(eq_b_sub),
FadeOut(eq_b_dir_i),
FadeOut(eq_b_dir_j),
FadeOut(omega_ab_ans[:-1]),
FadeOut(v_b_ans[:-1]),
Transform(omega_ab_ans[-1], omega_ab_ans_newpos),
Transform(ansgroup1, ansgroup1_newpos)
)
self.wait()
#endregion
#region Acceleration
#region Explain pure rolling acceleration
point_O = Dot(
point=disk_copy.get_center(),
color=YELLOW
)
point_O_annot = MathTex('O', color=YELLOW).scale(0.6).next_to(point_O, UP+RIGHT, buff=0)
self.play(
FadeIn(point_O),
Write(point_O_annot)
)
for _ in range(2):
self.play(Flash(point_O))
a_O_arrow = Arrow(
start=point_O.get_center(),
end=point_O.get_center()+0.75*LEFT,
color=TEAL_D,
buff=0.0,
stroke_width=5,
tip_length=0.15,
max_stroke_width_to_length_ratio=999,
max_tip_length_to_length_ratio=1
)
a_O_label = MathTex('a_O', color=TEAL_D).scale(0.6).next_to(a_O_arrow.get_center(), UP, buff=0.15)
self.play(
Write(a_O_arrow),
Write(a_O_label)
)
self.wait()
a_O_eq = MathTex(
'\\vec{a}_O = \\vec{r}_{R/O}\\times\\vec{\\alpha}',
'\\Rightarrow',
'\\vec{a}_O = -a\\alpha\\hat{i}'
).scale(0.55).next_to(ansgroup1, DOWN, aligned_edge=LEFT)
self.play(Write(a_O_eq[0]))
self.wait()
self.play(Write(a_O_eq[1:]))
self.wait()
#endregion
# Acceleration of A
eq_a_accel = MathTex(
'\\vec{a}_A = \\vec{a}_O + \\vec{\\alpha}_{OA}\\times\\vec{r}_{A/O} - |\\vec{\\omega}_{OA}|^2\\vec{r}_{A/O}'
).scale(0.55).next_to(a_O_eq, DOWN, aligned_edge=LEFT)
eq_a_accel_sub = MathTex(
'\\vec{a}_A',
'=',
'-a\\alpha\\hat{i} + (\\alpha\\hat{k}\\times (-a\\hat{i})) - \\omega^2 (-a\\hat{i})',
'=',
'-a\\alpha\\hat{i} - a\\alpha\\hat{j} + a\\omega^2\\hat{i}',
'\\Rightarrow',
'\\vec{a}_A = a(\\omega^2-\\alpha)\\hat{i} - a\\alpha\\hat{j}',
).scale(0.55).next_to(eq_a_accel, DOWN, buff=0.2, aligned_edge=LEFT)
eq_a_accel_sub[3:].next_to(eq_a_accel_sub[1:3], DOWN, aligned_edge=LEFT, buff=0.15)
eq_a_accel_sub[5:].next_to(eq_a_accel_sub[3:5], DOWN, aligned_edge=LEFT, buff=0.15)
self.play(Write(eq_a_accel))
self.wait()
self.play(Write(eq_a_accel_sub[:3]))
self.wait(0.5)
self.play(Write(eq_a_accel_sub[3:5]))
self.wait(0.5)
self.play(Write(eq_a_accel_sub[5:]))
self.wait()
self.play(
FadeOut(eq_a_accel),
FadeOut(eq_a_accel_sub[:-1]),
Transform(eq_a_accel_sub[-1], eq_a_accel_sub[-1].copy().next_to(a_O_eq, DOWN, aligned_edge=LEFT))
)
self.wait()
# Acceleration of B
# Label assumptions
a_b_label = MathTex('\\vec{v}_B,\\,\\vec{a}_B', color=PURPLE).scale(0.6).next_to(v_b_arrow, RIGHT, buff=0.15)
alpha_ab_annot = MathTex('\\omega_{AB},\\,\\alpha_{AB}', color=PURPLE).scale(0.6).next_to(omega_ab_arrow, UP, buff=0.15)
self.play(
ReplacementTransform(v_b_label, a_b_label),
ReplacementTransform(omega_ab_annot, alpha_ab_annot)
)
self.wait()
eq_b_accel = MathTex(
'\\vec{a}_B = \\vec{a}_A + \\vec{\\alpha}_{AB}\\times\\vec{r}_{B/A} - |\\vec{\\omega}_{AB}|^2\\vec{r}_{B/A}'
).scale(0.55).next_to(eq_a_accel_sub[-1], DOWN, aligned_edge=LEFT)
eq_b_accel_sub = MathTex(
'|\\vec{a}_B|\\hat{i}',
'=',
'(a(\\omega^2-\\alpha)\\hat{i} - a\\alpha\\hat{j}) + (|\\vec{\\alpha}_{AB}|\\hat{k}\\times (-2a\\cos(30^\\circ)\\hat{i} - a\\hat{j})) - (0.577\\omega)^2 (-2a\\cos(30^\\circ)\\hat{i} - a\\hat{j})',
'=',
'(a(\\omega^2-\\alpha)\\hat{i} - a\\alpha\\hat{j}) - |\\vec{\\alpha}_{AB}|(-a\\hat{i}+2a\\cos(30^\\circ)\\hat{j}) + 0.333a\\omega^2(2\\cos(30^\\circ)\\hat{i} + \\hat{j})'
).scale(0.55).next_to(eq_b_accel, DOWN, buff=0.2, aligned_edge=LEFT)
eq_b_accel_sub[3:].next_to(eq_b_accel_sub[1:3], DOWN, aligned_edge=LEFT, buff=0.15)
eq_b_accel_dir_i = MathTex(
'\\hat{i}:',
'|\\vec{a}_B|',
'=',
'a(\\omega^2-\\alpha) + a|\\vec{\\alpha}_{AB}| + 0.577a\\omega^2'
).scale(0.55).next_to(eq_b_accel_sub, DOWN, aligned_edge=LEFT, buff=0.2).shift(0.5*RIGHT)
eq_b_accel_dir_i[0].set_color(YELLOW)
eq_b_accel_dir_j = MathTex(
'\\hat{j}:',
'0',
'=',
'-a\\alpha - 2a\\cos(30^\\circ)|\\vec{\\alpha}_{AB}| + 0.333a\\omega^2',
).scale(0.55).next_to(eq_b_accel_dir_i, DOWN, aligned_edge=LEFT, buff=0.2)
eq_b_accel_dir_j[0].set_color(YELLOW)
alpha_ab_ans = MathTex(
'|\\vec{\\alpha}_{AB}| = 0.192\\omega^2-0.577\\alpha',
'\\Rightarrow',
'\\vec{\\alpha}_{AB} = (0.192\\omega^2-0.577\\alpha)\\hat{k}'
).scale(0.55).next_to(eq_b_accel_dir_j, DOWN, aligned_edge=LEFT).shift(0.5*LEFT)
a_b_ans = MathTex(
'|\\vec{a}_B| = 1.769a\\omega^2 - 1.577a\\alpha',
'\\Rightarrow',
'\\vec{a}_B = (1.769a\\omega^2 - 1.577a\\alpha)\\hat{i}'
).scale(0.55).next_to(alpha_ab_ans, DOWN, aligned_edge=LEFT)
ansbox2 = SurroundingRectangle(a_b_ans[2])
self.play(Write(eq_b_accel))
self.wait()
self.play(Write(eq_b_accel_sub[:3]))
self.wait(0.5)
self.play(Write(eq_b_accel_sub[3:5]))
self.wait(0.5)
self.play(
Write(eq_b_accel_dir_i),
Write(eq_b_accel_dir_j)
)
self.wait(0.5)
self.play(
Write(a_b_ans[0]),
Write(alpha_ab_ans[0])
)
self.wait(0.5)
self.play(
Write(a_b_ans[1:]),
Write(alpha_ab_ans[1:])
)
self.play(ShowCreation(ansbox2))
self.wait()
#endregion
|
[
"Robin.Liu.831@gmail.com"
] |
Robin.Liu.831@gmail.com
|
86f0df1cdc3565a1f7596669eaecdfb925a1eb5e
|
179b85c1170939c31f26b4fab3459c6d0c636cef
|
/ftp_send.py
|
670d42f4ab369885a6b33a68c0f5b92d81a77424
|
[] |
no_license
|
rezabehjani/python_Example
|
872c239352523e01bd2ae3e8e9551aaedba74ec3
|
665a925242ec4226c04c823d907fe351f2a796e4
|
refs/heads/master
| 2021-01-03T22:04:59.054087
| 2020-04-21T07:44:06
| 2020-04-21T07:44:06
| 240,253,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
from ftplib import FTP
ftp = FTP('136.243.87.101')
ftp.login('reza', '19972910')
#show list file in ftp
ftp.dir()
#cd to file in ftp
ftp.cwd("/files")
ftp.dir()
fp = open("D:\\music Reza\\video\\Baran - Tazahor [1080].mp4", 'rb')
# upload file
ftp.storbinary("STOR Tazahor.mp4", fp, 1024)
fp.close()
print("ok send")
#file = open('kitten.jpg','rb')
#ftp.storbinary('STOR kitten.jpg', file)
|
[
"rezabehjani13@gmail.com"
] |
rezabehjani13@gmail.com
|
88a16982ff77ebb5084720c3eb36c664ee5dbf4e
|
7e16c5cb801dae422fc408c422f97c2a32c5e33f
|
/internal2cartesian.py
|
f4dd29a6a0996d17bd91dfb2195ec6e567ee9be3
|
[] |
no_license
|
Jussmith01/Internal-to-Cartesian
|
18e230275fa731e62a5f0612c5a6f4994456ea7f
|
1a7f6b72d1e9343e4359705248145331ee44af3e
|
refs/heads/master
| 2021-01-10T15:56:24.048690
| 2015-10-07T19:48:40
| 2015-10-07T19:48:40
| 43,840,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
import re
from vectors import *
from functions import readZmat, getdist, createCSVstring
import sys
# Inputs
a = 0.5
b = 4.0
inc = 0.01
type = 0# 0 = bonds; 1 = angles; 2 = dihedrals
tidx = 0
icfile = 'inputexample.txt'
fnout = 'distances.dat'
# -------------------CODE----------------------
a1idx = [];idx = [];bl = [];ba = [];bdi = []
readZmat(bl, ba, bdi, idx, a1idx, icfile)
# Variables
N = int((b-a)/inc)
fout = open(fnout, "w")
for i in range(0, N):
it = i * inc + a
if type == 0:
bl[tidx] = it
elif type == 1:
ba[tidx] = it
elif type == 2:
bdi[tidx] = it
else:
print "Run failed. Enter acceptable value for 'type'"
sys.exit(0)
dists = getdist(bl, a1idx, ba, idx, bdi)
output = createCSVstring(dists)
fout.write(output)
fout.close()
|
[
"jussmith48@gmail.com"
] |
jussmith48@gmail.com
|
30ae1b5c5ef14f56cba710bc9a40d1d1cd0d84dc
|
bc00bdc08d76c8be38c51b1f1caeced2a4668592
|
/abjad_demo/env/lib/python3.6/site-packages/uqbar/_version.py
|
a135f3b2259762ca39cbc026a3990ae925bbc05a
|
[] |
no_license
|
gsy/gmajor
|
769afd6e87f6712e4059f3f779f41932cbca962d
|
7f5f20a19494256615fbaaa840b2a0bbbf6e311f
|
refs/heads/master
| 2023-02-08T07:00:44.479895
| 2019-05-20T13:58:03
| 2019-05-20T13:58:03
| 161,866,236
| 0
| 0
| null | 2023-02-02T06:26:34
| 2018-12-15T03:32:48
|
Scheme
|
UTF-8
|
Python
| false
| false
| 87
|
py
|
__version_info__ = (0, 2, 16)
__version__ = ".".join(str(x) for x in __version_info__)
|
[
"chenxuanguang@chuangxin.com"
] |
chenxuanguang@chuangxin.com
|
17d2c400a107de5d20d5eafa4ed9af255198d7f4
|
ba1f77bea85efa1bf9de89c17b4940728ae3db52
|
/plot_loss.py
|
9b1a8bb536c4e77b74ef780191004a639fa59319
|
[] |
no_license
|
andrewbo29/feedforward_network
|
3b93a43f7efeb21b52fddc56149f7e774d495d7f
|
267cd97f35149855818240e1e3f3164fa71eca1a
|
refs/heads/master
| 2021-01-18T21:11:10.840803
| 2016-06-03T11:06:23
| 2016-06-03T11:06:23
| 52,079,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def plot_loss(log_fname):
loss = []
with open(log_fname) as f:
for line in f:
words = line.strip()
loss.append(float(words))
sns.set(style='whitegrid')
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_ylim([-0.5, 1])
ax.plot(loss, linewidth=3)
ax.set_title('Train loss')
ax.set_xlabel('iterations')
plt.show()
if __name__ == '__main__':
log_file = '/home/boyarov/Projects/cpp/feedforward_network/log_loss.txt'
# log_file = '/media/datac/andrew_workspace/darknet_1/log.txt'
plot_loss(log_file)
|
[
"andrewbo29@yandex.ru"
] |
andrewbo29@yandex.ru
|
0279409e67cf584ba24a2a24308967d9c568e754
|
20d461f0414f16c4c2b906e8871a96d85d46d734
|
/src/item.py
|
3ead9670c78135b4736e35ab3841527089bc39f0
|
[] |
no_license
|
beccacauthorn/Intro-Python-II
|
e3ec8ef4bb8e8d6f6f07ab02ef07f15d84257583
|
82b3605bc0283b1d7d01bc9803797cf1fbac3c49
|
refs/heads/master
| 2022-11-17T05:52:50.194511
| 2020-07-17T03:48:19
| 2020-07-17T03:48:19
| 279,744,375
| 0
| 0
| null | 2020-07-15T02:42:26
| 2020-07-15T02:42:25
| null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
#Create a file called `item.py` and add an `Item` class in there.
# The item should have `name` and `description` attributes.
# Hint: the name should be one word for ease in parsing later.
class Item:
def __init__(self, name, item_description):
self.name = name
self.item_description = item_description
def __str__(self):
return f'{self.name}: {self.item_description}'
def on_take(self):
print(f"You have picked up {self.name}")
def on_drop(self):
print(f"You have dropped {self.name}")
|
[
"beccacauthorn@gmail.com"
] |
beccacauthorn@gmail.com
|
8ec20d3340c9e4975c2289bcb569bbb74f0b4721
|
e20bbe07bba9f9c86dfe40e394079b7c527b0ba2
|
/4_Iterators and Generators/4.4_Implementing the Iterator Protocol.py
|
2ae96c3785cc6ff4802c70f032d558fe4bf0441e
|
[] |
no_license
|
gavinloverqq/Python_Cookbook_Notes
|
f819b0a30efb399277bd81c3bc85e6419b7e8845
|
422c3740940554eaaca3fd8f873bd3b36f91dca3
|
refs/heads/master
| 2020-03-19T13:26:08.002941
| 2018-06-17T13:38:39
| 2018-06-17T13:38:39
| 136,579,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,108
|
py
|
class Node:
def __init__(self, value):
self._value = value
self._children = []
def __repr__(self):
return 'Node({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
return iter(self._children)
def depth_first(self):
yield self
for c in self:
yield from c.depth_first()
class Node2:
def __init__(self, value):
self._value = value
self._children = []
def __repr__(self):
return 'Node({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
return iter(self._children)
def depth_first(self):
return DepthFirstIterator(self)
class DepthFirstIterator(object):
'''
Depth-first traversal
'''
def __init__(self, start_node):
self._node = start_node
self._children_iter = None
self._child_iter = None
def __iter__(self):
return self
# TODO: I am not understand this code ...
def __next__(self):
# Return myself if just started; create an iterator for children
if self._children_iter is None:
self._children_iter = iter(self._node)
return self._node
# If processing a child, return its next item
elif self._child_iter:
try:
nextchild = next(self._child_iter)
return nextchild
except StopIteration:
self._child_iter = None
return next(self)
# Advance to the next child and start its iteration
else:
print("advance ...")
self._child_iter = next(self._children_iter).depth_first()
return next(self)
# Example
if __name__ == '__main__':
root = Node2(0)
child1 = Node2(1)
child2 = Node2(2)
root.add_child(child1)
root.add_child(child2)
child1.add_child(Node2(3))
child1.add_child(Node2(4))
child2.add_child(Node2(5))
for ch in root.depth_first():
print(ch)
|
[
"779483309@qq.com"
] |
779483309@qq.com
|
07d9d74ffddb92feb9918379f74c35e641baa1f9
|
977abdcd089b5f19fefdb1ab8d2c284dde9ea7c9
|
/hikyuu/indicator/indicator_doc.py
|
221dbb2e52f27891e5627f4852905dafe2c5e4ba
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
fei090620/hikyuu
|
51f8ece1ab4ab366cb09e7c8c78d34155e167d26
|
161b9317f20e411468f2c1e4b0985d7a45fc141b
|
refs/heads/master
| 2020-05-18T21:25:40.171535
| 2019-05-02T12:02:41
| 2019-05-02T12:02:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,026
|
py
|
#!/usr/bin/python
# -*- coding: utf8 -*-
# cp936
#
# The MIT License (MIT)
#
# Copyright (c) 2017 fasiondog
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .indicator import *
ABS.__doc__ = """
ABS([data])
求绝对值
:param Indicator data: 输入数据
:rtype: Indicator
"""
ACOS.__doc__ = """
ACOS([data])
反余弦值
:param Indicator data: 输入数据
:rtype: Indicator
"""
AMA.__doc__ = """
AMA([data, n=10, fast_n=2, slow_n=30])
佩里.J 考夫曼(Perry J.Kaufman)自适应移动平均 [BOOK1]_
:param Indicator data: 输入数据
:param int n: 计算均值的周期窗口,必须为大于2的整数
:param int fast_n: 对应快速周期N
:param int slow_n: 对应慢速EMA线的N值
:return: Indicator
* result(0): AMA
* result(1): ER
"""
IAMO.__doc__ = """
IAMO([data])
获取成交金额,包装KData的成交金额成Indicator
:param data: 输入数据(KData 或 Indicator)
:return: Indicator
"""
ASIN.__doc__ = """
ASIN([data])
反正弦值
:param Indicator data: 输入数据
:rtype: Indicator
"""
ATAN.__doc__ = """
ATAN([data])
反正切值
:param Indicator data: 输入数据
:rtype: Indicator
"""
ICLOSE.__doc__ = """
ICLOSE([data])
获取收盘价,包装KData的收盘价成Indicator
:param data: 输入数据(KData 或 Indicator)
:return: Indicator
"""
COS.__doc__ = """
COS([data])
余弦值
:param Indicator data: 输入数据
:rtype: Indicator
"""
COUNT.__doc__ = """
COUNT([data, n=20])
统计满足条件的周期数。
用法:COUNT(X,N),统计N周期中满足X条件的周期数,若N=0则从第一个有效值开始。
例如:COUNT(CLOSE>OPEN,20)表示统计20周期内收阳的周期数
:param Indicator data: 条件
:param int n: 周期
:rtype: Indicator
"""
CROSS.__doc__ = """
CROSS(x, y)
交叉函数
:param x: 变量或常量,判断交叉的第一条线
:param y: 变量或常量,判断交叉的第二条线
:rtype: Indicator
"""
CVAL.__doc__ = """
CVAL([data, value=0.0, discard=0])
data 为 Indicator 实例,创建和 data 等长的常量指标,其值和为value,抛弃长度discard和data一样
:param Indicator data: Indicator实例
:param float value: 常数值
:param int len: 长度
:param int discard: 抛弃数量
:return: Indicator
"""
DIFF.__doc__ = """
DIFF([data])
差分指标,即data[i] - data[i-1]
:param Indicator data: 输入数据
:return: Indicator
"""
DOWNNDAY.__doc__ = """
DOWNNDAY(data[, n=3])
连跌周期数, DOWNNDAY(CLOSE,M)表示连涨M个周期
:param Indicator data: 输入数据
:param int n: 时间窗口
:rtype: Indicator
"""
EMA.__doc__ = """
EMA([data, n=22])
指数移动平均线(Exponential Moving Average)
:param Indicator data: 输入数据
:param int n: 计算均值的周期窗口,必须为大于0的整数
:return: Indicator
"""
EVERY.__doc__ = """
EVERY([data, n=20])
一直存在
用法:EVERY (X,N) 表示条件X在N周期一直存在
例如:EVERY(CLOSE>OPEN,10) 表示前10日内一直是阳线
:param data: 输入数据
:param int n: 计算均值的周期窗口,必须为大于0的整数
:rtype: Indicator
"""
EXIST.__doc__ = """
存在, EXIST(X,N) 表示条件X在N周期有存在
:param data: 输入数据
:param int n: 计算均值的周期窗口,必须为大于0的整数
:rtype: Indicator
"""
EXP.__doc__ = """
EXP([data])
EXP(X)为e的X次幂
:param Indicator data: 输入数据
:rtype: Indicator
"""
HHV.__doc__ = """
HHV([data, n=20])
N日内最高价, N=0则从第一个有效值开始。
:param Indicator data: 输入数据
:param int n: N日时间窗口
:return: Indicator
"""
HHVBARS.__doc__ = """
HHVBARS([data, n=20])
上一高点位置 求上一高点到当前的周期数。
用法:HHVBARS(X,N):求N周期内X最高值到当前周期数N=0表示从第一个有效值开始统计
例如:HHVBARS(HIGH,0)求得历史新高到到当前的周期数
:param Indicator data: 输入数据
:param int n: N日时间窗口
:rtype: Indicator
"""
IHIGH.__doc__ = """
IHIGH([data])
获取最高价,包装KData的最高价成Indicator
:param data: 输入数据(KData 或 Indicator)
:return: Indicator
"""
HSL.__doc__ = """
HSL(kdata)
获取换手率,等于 VOL(k) / CAPITAL(k)
:param KData kdata: k线数据
:rtype: Indicator
"""
IF.__doc__ = """
IF(x, a, b)
条件函数, 根据条件求不同的值。
用法:IF(X,A,B)若X不为0则返回A,否则返回B
例如:IF(CLOSE>OPEN,HIGH,LOW)表示该周期收阳则返回最高值,否则返回最低值
:param Indicator x: 条件指标
:param Indicator a: 待选指标 a
:param Indicator b: 待选指标 b
:rtype: Indicator
"""
INTPART.__doc__ = """
INTPART([data])
取整(绝对值减小取整,即取得数据的整数部分)
:param data: 输入数据
:rtype: Indicator
"""
IKDATA.__doc__ = """
IKDATA([data])
包装KData成Indicator,用于其他指标计算
:param data: KData 或 具有6个返回结果的Indicator(如KDATA生成的Indicator)
:return: Indicator
"""
KDATA_PART.__doc__ = """
KDATA_PART([data, kpart])
根据字符串选择返回指标KDATA/OPEN/HIGH/LOW/CLOSE/AMO/VOL,如:KDATA_PART("CLOSE")等同于CLOSE()
:param data: 输入数据(KData 或 Indicator)
:param string kpart: KDATA|OPEN|HIGH|LOW|CLOSE|AMO|VOL
:return: Indicator
"""
CAPITAL = LIUTONGPAN
LIUTONGPAN.__doc__ = """
LIUTONGPAN(kdata)
获取流通盘
:param KData kdata: k线数据
:rtype: Indicator
"""
LAST.__doc__ = """
LAST([data, m=10, n=5])
区间存在。
用法:LAST (X,M,N) 表示条件 X 在前 M 周期到前 N 周期存在。
例如:LAST(CLOSE>OPEN,10,5) 表示从前10日到前5日内一直阳线。
:param data: 输入数据
:param int m: m周期
:param int n: n周期
:rtype: Indicator
"""
LLV.__doc__ = """
LLV([data, n=20])
N日内最低价, N=0则从第一个有效值开始。
:param data: 输入数据
:param int n: N日时间窗口
:return: Indicator
"""
LN.__doc__ = """
LN([data])
求自然对数, LN(X)以e为底的对数
:param data: 输入数据
:rtype: Indicator
"""
LOG.__doc__ = """
LOG([data])
以10为底的对数
:param data: 输入数据
:rtype: Indicator
"""
LONGCROSS.__doc__ = """
LONGCROSS(a, b[, n=3])
两条线维持一定周期后交叉
用法:LONGCROSS(A,B,N)表示A在N周期内都小于B,本周期从下方向上穿过B时返 回1,否则返回0
例如:LONGCROSS(MA(CLOSE,5),MA(CLOSE,10),5)表示5日均线维持5周期后与10日均线交金叉
:param Indicator a:
:param Indicator b:
:param int n:
:rtype: Indicator
"""
ILOW.__doc__ = """
ILOW([data])
获取最低价,包装KData的最低价成Indicator
:param data: 输入数据(KData 或 Indicator)
:return: Indicator
"""
MA.__doc__ = """
MA([data, n=22, type="SMA"])
移动平均数包装,默认为简单平均数
:param Indicator data: 输入数据
:param int n: 时间窗口
:param string type: "EMA"|"SMA"|"AMA"
:return: Indicator
"""
MACD.__doc__ = """
MACD([data, n1=12, n2=26, n3=9])
平滑异同移动平均线
:param Indicator data: 输入数据
:param int n1: 短期EMA时间窗
:param int n2: 长期EMA时间窗
:param int n3: (短期EMA-长期EMA)EMA平滑时间窗
:return: 具有三个结果集的 Indicator
* result(0): MACD_BAR:MACD直柱,即MACD快线-MACD慢线
* result(1): DIFF: 快线,即(短期EMA-长期EMA)
* result(2): DEA: 慢线,即快线的n3周期EMA平滑
"""
MAX.__doc__ = """
MAX(ind1, ind2)
求最大值, MAX(A,B)返回A和B中的较大值。
:param Indicator ind1: A
:param Indicator ind2: B
:rtype: Indicator
"""
MIN.__doc__ = """
MIN(ind1, ind2)
求最小值, MIN(A,B)返回A和B中的较小值。
:param Indicator ind1: A
:param Indicator ind2: B
:rtype: Indicator
"""
MOD.__doc__ = """
MOD(ind1, ind2)
取整后求模。该函数仅为兼容通达信。实际上,指标求模可直接使用 % 操作符
用法:MOD(A,B)返回A对B求模
例如:MOD(26,10) 返回 6
:param Indicator ind1:
:param Indicator ind2:
:rtype: Indicator
"""
NDAY.__doc__ = """
NDAY(x, y[, n=3])
连大, NDAY(X,Y,N)表示条件X>Y持续存在N个周期
:param Indicator x:
:param Indicator y:
:param int n: 时间窗口
:rtype: Indicator
"""
NOT.__doc__ = """
NOT([data])
求逻辑非。NOT(X)返回非X,即当X=0时返回1,否则返回0。
:param Indicator data: 输入数据
:rtype: Indicator
"""
IOPEN.__doc__ = """
IOPEN([data])
获取开盘价,包装KData的开盘价成Indicator
:param data: 输入数据(KData 或 Indicator)
:return: Indicator
"""
POW.__doc__ = """
POW(data, n)
乘幂
用法:POW(A,B)返回A的B次幂
例如:POW(CLOSE,3)求得收盘价的3次方
:param data: 输入数据
:param int n: 幂
:rtype: Indicator
"""
REF.__doc__ = """
REF([data, n])
向前引用 (即右移),引用若干周期前的数据。
用法:REF(X,A) 引用A周期前的X值。
:param Indicator data: 输入数据
:param int n: 引用n周期前的值,即右移n位
:return: Indicator
"""
REVERSE.__doc__ = """
REVERSE([data])
求相反数,REVERSE(X)返回-X
:param Indicator data: 输入数据
:rtype: Indicator
"""
ROUND.__doc__ = """
ROUND([data, ndigits=2])
四舍五入
:param data: 输入数据
:param int ndigits: 保留的小数点后位数
:rtype: Indicator
"""
ROUNDDOWN.__doc__ = """
ROUNDDOWN([data, ndigits=2])
向下截取,如10.1截取后为10
:param data: 输入数据
:param int ndigits: 保留的小数点后位数
:rtype: Indicator
"""
ROUNDUP.__doc__ = """
ROUNDUP([data, ndigits=2])
向上截取,如10.1截取后为11
:param data: 输入数据
:param int ndigits: 保留的小数点后位数
:rtype: Indicator
"""
SAFTYLOSS.__doc__ = """
SAFTYLOSS([data, n1=10, n2=3, p=2.0])
亚历山大 艾尔德安全地带止损线,参见 [BOOK2]_
计算说明:在回溯周期内(一般为10到20天),将所有向下穿越的长度相加除以向下穿越的次数,得到噪音均值(即回溯期内所有最低价低于前一日最低价的长度除以次数),并用今日最低价减去(前日噪音均值乘以一个倍数)得到该止损线。为了抵消波动并且保证止损线的上移,在上述结果的基础上再取起N日(一般为3天)内的最高值
:param Indicator data: 输入数据
:param int n1: 计算平均噪音的回溯时间窗口
:param int n2: 对初步止损线去n2日内的最高值
:param float p: 噪音系数
:return: Indicator
"""
SIN.__doc__ = """
SIN([data])
正弦值
:param Indicator data: 输入数据
:rtype: Indicator
"""
SGN.__doc__ = """
SGN([data])
求符号值, SGN(X),当 X>0, X=0, X<0分别返回 1, 0, -1。
:param Indicator data: 输入数据
:rtype: Indicator
"""
SMA.__doc__ = """
SMA([data, n=22])
简单移动平均线
:param Indicator data: 输入数据
:param int n: 时间窗口
:return: Indicator
"""
SQRT.__doc__ = """
SQRT([data])
开平方
用法:SQRT(X)为X的平方根
例如:SQRT(CLOSE)收盘价的平方根
:param data: 输入数据
:rtype: Indicator
"""
STD = STDEV
STDEV.__doc__ = """
STDEV([data, n=10])
计算N周期内样本标准差
:param Indicator data: 输入数据
:param int n: 时间窗口
:return: Indicator
"""
STDP.__doc__ = """
STDP([data, n=10])
总体标准差,STDP(X,N)为X的N日总体标准差
:param data: 输入数据
:param int n: 时间窗口
:rtype: Indicator
"""
SUM.__doc__ = """
SUM([data, n=20])
求总和。SUM(X,N),统计N周期中X的总和,N=0则从第一个有效值开始。
:param Indicator data: 输入数据
:param int n: 时间窗口
:rtype: Indicator
"""
TAN.__doc__ = """
TAN([data])
正切值
:param Indicator data: 输入数据
:rtype: Indicator
"""
UPNDAY.__doc__ = """
UPNDAY(data[, n=3])
连涨周期数, UPNDAY(CLOSE,M)表示连涨M个周期
:param Indicator data: 输入数据
:param int n: 时间窗口
:rtype: Indicator
"""
VAR.__doc__ = """
VAR([data, n=2])
估算样本方差, VAR(X,N)为X的N日估算样本方差
:param Indicator data: 输入数据
:param int n: 时间窗口
:rtype: Indicator
"""
VARP.__doc__ = """
VARP([data, n=2])
总体样本方差, VARP(X,N)为X的N日总体样本方差
:param Indicator data: 输入数据
:param int n: 时间窗口
:rtype: Indicator
"""
VIGOR.__doc__ = """
VIGOR([kdata, n=2])
亚历山大.艾尔德力度指数 [BOOK2]_
计算公式:(收盘价今-收盘价昨)*成交量今
:param KData data: 输入数据
:param int n: EMA平滑窗口
:return: Indicator
"""
IVOL.__doc__ = """
IVOL([data])
获取成交量,包装KData的成交量成Indicator
:param data: 输入数据(KData 或 Indicator)
:return: Indicator
"""
WEAVE.__doc__ = """
WEAVE(ind1, ind2)
将ind1和ind2的结果组合在一起放在一个Indicator中。如ind = WEAVE(ind1, ind2), 则此时ind包含多个结果,按ind1、ind2的顺序存放。
:param Indicator ind1: 指标1
:param Indicator ind2: 指标2
:rtype: Indicator
"""
|
[
"fasiondog@163.com"
] |
fasiondog@163.com
|
640b13b1987767b0a3d0b86bd3125925a4a237cb
|
49982c52a56e86bc24605bf6bdb3e239036b2e32
|
/api_server.py
|
654c51eb6a237dd312a07462eaed96fb6e321057
|
[
"Apache-2.0"
] |
permissive
|
forkpool/GeeProxy
|
d75bdaa88018a7d49c5245268c749e88de5cc3bf
|
6f2f57ef1e1e8ea9a295cf987577dab5f1cadfe5
|
refs/heads/master
| 2023-05-12T17:05:36.562844
| 2020-08-17T06:36:09
| 2020-08-17T06:36:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
'''
@Author: qinzhonghe96@163.com
@Date: 2020-03-10 01:23:39
@LastEditors: qinzhonghe96@163.com
@LastEditTime: 2020-03-10 01:23:58
@Description:
'''
from GeeProxy.api.api import run_app
run_app()
|
[
"qinzhonghe96@163.com"
] |
qinzhonghe96@163.com
|
a6fa541aae3131eab1654b88551c903a078be740
|
6a1117f9a5671780c78c423c7bd54c3f7a469c81
|
/learn_temp/basic_app/templatetags/my_extras.py
|
a6cbad23344f161ea907215c79f57ca33c55011f
|
[] |
no_license
|
mahmoudgobba/django-deployment-example
|
533e14d00d7d50653195869dc961a550ab948ea0
|
9a83736cfe609fe59f3ed0403d31e88358475d32
|
refs/heads/master
| 2022-11-14T03:14:43.242344
| 2020-07-03T00:57:00
| 2020-07-03T00:57:00
| 276,688,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
from django import template
register = template.Library()
@register.filter(name='cut')
def Cut(value,arg):
"""
This cuts out all value of "arg" from the string
"""
return value.replace(arg,'')
# register.filtter('cut',Cut)
|
[
"mahmoud.aid55@yahoo.com"
] |
mahmoud.aid55@yahoo.com
|
eb9d465c0fdcbf5ae8e749b5c00705a8bacaabd5
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/EightTeV/VH_TTH/TTH_HToGG_M_125_TuneZ2star_8TeV_pythia6_cff.py
|
0e74691e62089d8fbb3ef01c45eec1f2489f987a
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,033
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
from GeneratorInterface.ExternalDecays.TauolaSettings_cff import *
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(1),
# put here the efficiency of your filter (1. if no filter)
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
# put here the cross section of your process (in pb)
crossSection = cms.untracked.double(1.0),
maxEventsToPrint = cms.untracked.int32(1),
comEnergy = cms.double(8000.0),
ExternalDecays = cms.PSet(
Tauola = cms.untracked.PSet(
TauolaPolar,
TauolaDefaultInputCards
),
parameterSets = cms.vstring('Tauola')
),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring('PMAS(25,1)=125.0 !mass of Higgs',
'MSEL=0 ! user selection for process',
'MSUB(102)=0 !ggH',
'MSUB(123)=0 !ZZ fusion to H',
'MSUB(124)=0 !WW fusion to H',
'MSUB(24)=0 !ZH production',
'MSUB(26)=0 !WH production',
'MSUB(121)=1 !gg to ttH',
'MSUB(122)=1 !qq to ttH',
'MDME(210,1)=0 !Higgs decay into dd',
'MDME(211,1)=0 !Higgs decay into uu',
'MDME(212,1)=0 !Higgs decay into ss',
'MDME(213,1)=0 !Higgs decay into cc',
'MDME(214,1)=0 !Higgs decay into bb',
'MDME(215,1)=0 !Higgs decay into tt',
'MDME(216,1)=0 !Higgs decay into',
'MDME(217,1)=0 !Higgs decay into Higgs decay',
'MDME(218,1)=0 !Higgs decay into e nu e',
'MDME(219,1)=0 !Higgs decay into mu nu mu',
'MDME(220,1)=0 !Higgs decay into tau nu tau',
'MDME(221,1)=0 !Higgs decay into Higgs decay',
'MDME(222,1)=0 !Higgs decay into g g',
'MDME(223,1)=1 !Higgs decay into gam gam',
'MDME(224,1)=0 !Higgs decay into gam Z',
'MDME(225,1)=0 !Higgs decay into Z Z',
'MDME(226,1)=0 !Higgs decay into W W'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
name = cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/Configuration/GenProduction/python/Attic/TTH_HToGG_M_125_TuneZ2star_8TeV_pythia6_cff.py,v $'),
annotation = cms.untracked.string('PYTHIA6 ttH, H->2gamma mH=125GeV with TAUOLA at 8TeV')
)
|
[
"sha1-c8b28d70dd1f4235246c4a027e80dcdcf397db6f@cern.ch"
] |
sha1-c8b28d70dd1f4235246c4a027e80dcdcf397db6f@cern.ch
|
4575d91d96b7789dc79ceaf32dede3a23ddf0dd9
|
b17f799e05ced53a70bc088fe49cd0ab072f88db
|
/str2.py
|
d8b34c431a27f1d2994db3d82ed9ec006f20065d
|
[] |
no_license
|
sharabao13/mypython_t
|
99cfb11bf6932d1e3a64b3214a72bcc01520a4a3
|
5783dfa274902254928ad5d53db813e4b20a8d65
|
refs/heads/master
| 2020-12-03T03:15:57.440612
| 2020-01-08T14:14:15
| 2020-01-08T14:14:15
| 231,194,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 92
|
py
|
# 字符串分隔
# 方法 partition 返回元祖 (heap,sep,tail)
#
# startwith
# endwith
|
[
"sharahong13@gmail.com"
] |
sharahong13@gmail.com
|
8894d154163de31d129e692a1299c8f470930ed8
|
71966cb76360f818c09dd4464c7d999ee2cd5ff8
|
/import.py
|
36a28219089b223e1a8425bd24ec058e34e44ebf
|
[
"MIT"
] |
permissive
|
gatesata/my-beancount-scripts
|
2f27b5998f35ee5c6305ccc0c13bfd2b63e8c5d0
|
c13a95985e847814f13c2d18ea56422ec956d482
|
refs/heads/master
| 2020-07-26T06:03:13.062182
| 2019-09-14T11:05:30
| 2019-09-14T11:05:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
from datetime import date
from beancount.core import data
from beancount.parser import parser, printer
from beancount import loader
from modules.imports.alipay import Alipay
from modules.imports.wechat import WeChat
from modules.imports.citic_credit import CITICCredit
import re
import argparse
parser = argparse.ArgumentParser("import")
parser.add_argument("path", help = "CSV Path")
parser.add_argument("--entry", help = "Entry bean path (default = main.bean)", default = 'main.bean')
parser.add_argument("--out", help = "Output bean path", default = 'out.bean')
args = parser.parse_args()
entries, errors, option_map = loader.load_file(args.entry)
importers = [Alipay, WeChat, CITICCredit]
instance = None
for importer in importers:
try:
with open(args.path, 'rb') as f:
file_bytes = f.read()
instance = importer(args.path, file_bytes, entries, option_map)
break
except:
pass
if instance == None:
print("No suitable importer!")
exit(1)
new_entries = instance.parse()
with open(args.out, 'w') as f:
printer.print_entries(new_entries, file = f)
print('Outputed to ' + args.out)
exit(0)
file = parser.parse_one('''
2018/01/15 * "测试" "测试"
Assets:Test 300 CNY
Income:Test
''')
print(file.postings)
file.postings[0] = file.postings[0]._replace(units = file.postings[0].units._replace(number = 100))
print(file.postings[0])
data = printer.format_entry(file)
print(data)
|
[
"git@zsxsoft.com"
] |
git@zsxsoft.com
|
9807e0d279b139e81f7f4a461b64f14634c105c1
|
a0e603abe4855c7cf8c0247299e57dfe30b0f36e
|
/src/update_tags.py
|
1b55d73b72feba925f7600893acadf28b49c7ea6
|
[
"MIT"
] |
permissive
|
massens/alfred-notion
|
2e275b475cc08263986104298fe4c5e8100f067d
|
9582299d7b4022f2d0f8debf72827b840634eefd
|
refs/heads/master
| 2020-06-22T16:57:27.143506
| 2019-06-18T02:31:31
| 2019-06-18T02:31:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 987
|
py
|
#!/usr/bin/env -S PATH="${PATH}:/usr/local/bin" python3
import sys
import json
from notion_api import tagsDatabase
from config import tagsFilePath
try:
database = tagsDatabase()
results = database.default_query().execute()
tags = [{
"uid": row.id,
"title": row.title,
"variables": {"tagName": row.title},
"arg": row.get_browseable_url(),
"match": row.title,
"copy": row.title,
"largetype": row.title
} for row in results]
doneTag = [{
"uid": "done",
"title": "Done",
"variables": {"tagName": "Done"},
"arg": "Done",
"match": "Done",
"copy": "Done",
"largetype": "Done"
}]
with open(tagsFilePath(), "w") as outfile:
json.dump({"items": doneTag + tags}, outfile)
print(str(len(tags)) + " tags")
except Exception as e:
# Print out nothing on STDOUT (missing value means means operation was unsuccessful)
sys.stderr.write(e)
|
[
"kevin.j.jalbert@gmail.com"
] |
kevin.j.jalbert@gmail.com
|
51bde64b6e8c44be8a7bca91c0d160b83b678fef
|
3d66286c210de64e52b0fa71acc0440bdd59fda7
|
/app/models/reviews.py
|
23bf4475ae9f8fdeff5b40d2ffd15e5d863b8e40
|
[
"MIT"
] |
permissive
|
YomZsamora/Watchlist
|
678f875d39d5ddac84623327b839ec47500331e5
|
d808f2b1fc569b7541f240a3b4f96256322ab863
|
refs/heads/main
| 2022-08-14T21:09:20.765769
| 2022-05-09T03:08:26
| 2022-05-09T03:08:26
| 485,275,242
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
class Reviews:
all_reviews = []
def __init__(self, movie_id, title, imageurl, review):
self.movie_id = movie_id
self.title = title
self.imageurl = imageurl
self.review = review
def save_review(self):
# save_review method that appends the review object to a class variable all_reviews that is an empty list.
Reviews.all_reviews.append(self)
@classmethod
def clear_reviews(cls):
# clears all the Items from the list.
Reviews.all_reviews.clear()
@classmethod
def get_reviews(cls, id):
response = []
for review in cls.all_reviews: # loops through all the reviews in the all_reviews list
if review.movie_id == id: # checks for reviews that have the same movie ID as the id passed.
response.append(review) # append those reviews to a new response list
return response
|
[
"samaurah@gmail.com"
] |
samaurah@gmail.com
|
3e3325a7c0a4e4cbc896466a9b9210bee03d20ac
|
e1021f41e4426ec326665da35ced1579a452d248
|
/TFVariable.py
|
650d3df2a0aa38de446132d43e7ccbc0aa3b52b0
|
[] |
no_license
|
weizhenzhao/Tensorflow
|
ccf47d68402e2c73df3e0e7c71c6bce0ded92c8e
|
cdbeb367cccc1db38440fb6abda7671a11734a6f
|
refs/heads/master
| 2021-01-20T00:20:43.723276
| 2017-06-25T02:41:59
| 2017-06-25T02:41:59
| 89,114,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,113
|
py
|
'''
Created on Apr 21, 2017
@author: P0079482
'''
#如何通过tf.variable_scope函数来控制tf.ger_variable函数获取已经创建过的变量
#在名字为foo的命名空间内创建名字为v的变量
import tensorflow as tf
with tf.variable_scope("foo"):
v = tf.get_variable("v",shape=[1],initializer=tf.constant_initializer(1.0))
#因为在命名空间foo中已经存在名为v的变量,所有下面的代码将会报错:
#Variable foo/v already exists,
with tf.variable_scope("foo"):
v = tf.get_variable("v",[1])
#在生成上下文管理器时,将参数reuse设置为True.这样tf.get_variable函数将直接获取已经声明的变量
with tf.variable_scope("foo",reuse=True):
v1 = tf.get_variable("v",[1])
print(v==v1) #输出为True,代表v,v1代表的是相同的Tensorflow中的变量
#将参数reuse设置为True是,tf.variable_scope将只能获取已经创建过的变量。
#因为在命名空间bar中还没有创建变量v,所以下面的代码将会报错
with tf.variable_scope("bar",reuse=True):
v = tf.get_variable("v",[1])
#如果tf.variable_scope函数使用reuse=None或者reuse=False创建上下文管理器
#tf.get_variable操作将创建新的变量。
#如果同名的变量已经存在,则tf.get_variable函数将报错
#Tensorflow中tf.variable_scope函数是可以嵌套的
with tf.variable_scope("root"):
#可以通过tf.get_variable_scope().reuse函数来获取上下文管理器中reuse参数的值
print(tf.get_variable_scope().reuse) #输出False,即最外层reuse是False
with tf.variable_scope("foo",reuse=True): #新建一个嵌套的上下文管理器并指定reuse为True
print(tf.get_variable_scope().reuse) #输出True
with tf.variable_scope("bar"): #新建一个嵌套的上下文管理器,但不指定reuse,这时reuse的取值会和外面一层保持一致
print(tf.get_variable_scope().reuse) #输出True
print(tf.get_variable_scope().reuse) #输出False
#tf.variable_scope函数生成的上下文管理器也会创建一个Tensorflow中的命名空间
#在命名空间内创建的变量名称都会带上这个命名空间作为前缀
#所以tf.variable_scope函数除了可以控制tf.get_variable执行的功能之外
#这个函数也提供了一个管理命名空间的方式
v1 = tf.get_variable("v",[1])
print(v1.name)#输出v:0 "v"为变量的名称,":0"表示这个变量是生成变量这个运算的第一个结果
with tf.variable_scope("foo"):
v2 = tf.get_variable("v",[1])
print(v2.name)#输出foo/v:0 在tf.variable_scope中创建的变量,名称前面会
#加入命名空间的名称,并通过/来分隔命名空间的名称和变量的名称
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v3 = tf.get_variable("v",[1])
print(v3.name) #输出foo/bar/v:0 命名空间可以嵌套,同时变量的名称也会加入所有命名空间的名称作为前缀
v4 = tf.get_variable("v1",[1])
print(v4.name) #输出foo/v1:0 当命名空间退出之后,变量名称也就不会再被加入其前缀了
#创建一个名称为空的命名空间,并设置reuse=True
with tf.variable_scope("",reuse=True):
v5=tf.get_variable("foo/bar/v",[1])#可以直接通过带命名空间名称的变量名来获取其他命名空间下的变量。
print(v5==v3)
v6=tf.get_variable("foo/v1",[1])
print(v6==v4)
#通过tf.variable_scope和tf.get_variable函数,以下代码对inference函数的前向传播结果做了一些改进
def inference(input_tensor,reuse=False):
#定义第一层神经网络的变量和前向传播过程
with tf.variable_scope('layer1',reuse=reuse):
#根据传进来的reuse来判断是创建新变量还是使用已经创建好了。在第一次构造网络时需要创建新的变量,
#以后每次调用这个函数都直接使用reuse=True就不需要每次将变量传进来了
weights= tf.get_variable("weights",[INPUT_NODE,LAYER1_NODE],initializer=tf.truncated_normal_initializer(stddev=0.1))
biases= tf.get_variable("biases",[LAYER1_NODE],initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor,weights)+biases)
#类似地定义第二层神经网络的变量和前向传播过程
with tf.variable_scope('layer2',reuse=reuse):
weights=tf.get_variable("weights",[LAYER1_NODE,OUTPUT_NODE],initializer=tf.truncated_normal_initializer(stddev=0.1))
biases=tf.get_variable("biases",[OUTPUT_NODE],initializer=tf.constant_initializer(0.0))
layer2=tf.matmul(layer1,weights)+biases
#返回最后的前向传播结果
return layer2
x=tf.placeholder(tf.float32,[None,INPUT_NODE],name='x-input')
y=inference(x)
#在程序中需要使用训练好的神经网络进行推倒时,可以直接调用inference(new_x,True)
#Tensorflow模型持久化
import tensorflow as tf
v1=tf.Variable(tf.constant(1.0,shape=[1]),name="v1")
v2=tf.Varibale(tf.constant(2.0,shape=[1]),name="v2")
result=v1+v2
init_op=tf.initialize_all_variables()
#声明tf.train.Saver类用于保存模型
saver=tf.train.Saver()
with tf.Session() as sess:
sess.run(init_op)
#将模型保存到/path/to/model/model.ckpt文件
saver.save(sess,"/path/to/model/model.ckpt")
#生成的文件
#model.ckpt.meta保存了TensorFlow计算图的结构
#model.ckpt这个文件保存了TensorFlow程序中每一个变量的取值
#checkpoint文件 保存了一个目录下所有的模型文件列表
#加载保存的文件
import tensorflow as tf
#使用和保存模型代码中一样的方式来声明变量
v1=tf.Variable(tf.constant(1.0,shape=[1]),name="v1")
v2=tf.Variable(tf.constant(2.0,shpae=[1]),name="v2")
result=v1+v2
saver=tf.train.Saver()
with tf.Session() as sess:
#加载已经保存的模型,并通过已经保存的模型中变量的值来计算加法
saver.restore(sess,"/path/to/model/model.ckpt")
print(sess.run(result))
import tensorflow as tf
#直接加载持久化的图
saver=tf.train.import_meta_graph("/path/to/model/model.ckpt/model.ckpt.meta")
with tf.Session() as sess:
saver.restore(sess,"/path/to/model/model.ckpt")
#通过张量的名称来获取张量
print(sess.run(tf.get_default_graph().get_tensorflow_by_name("add:0")))
#输出[ 3.]
#加载指定的变量
#可能之前有一个训练好的五层神经网络模型,但现在想尝试一个六层的神经网络
#那么可以将前面五层神经网络中的参数直接加载到新的模型,而仅仅将最后一层神经网络重新训练
#在加载模型的代码中使用saver=tf.train.Saver([v1])命令来构建tf.train.Saver类
v1=tf.Variable(tf.constant(1.0,shape=[1]),name="other-v1")
v2=tf.Variable(tf.constant(2.0,shape=[1]),name="other-v2")
#如果直接使用tf.train.Saver()来加载模型会报找不到的错误
#使用字典来重命名就可以加载原来的模型了,这个字典指定了原来名称为v1的变量现在加载到变量v1中(名称为other-v1)
#名称为v2的变量加载到变量v2中(名称为other-v2)
saver=tf.train.Saver({"v1":v1,"v2",v2})
#这样做的主要目的之一就是方便使用变量的滑动平均值
#给出了一个保存滑动平均模型的样例
import tensorflow as tf
v=tf.Variable(0,dtype=tf.float32,name="v")
#在没有申明滑动平均模型时只有一个变量v,所以下面的语句只会输出"v:0
for variables in tf.all_variables():
print(variables.name)
ema = tf.train.ExponentialMovingAverage(0.99)
maintain_averages_op=ema.apply(tf.all_variables())
#在申明滑动平均模型之后,Tensorflow会自动生成一个影子变量
#v/ExponentialMoving Average 于是下面的语句会输出
#"v:0" 和 "v/ExponentialMovingAverage:0"
for variables in tf.all_variables():
print(variables.name)
saver = tf.train.Saver()
with tf.Session() as sess:
init_op=tf.initialize_all_variables()
sess.run(init_op)
sess.run(tf.assign(v,10))
sess.run(maintain_averages_op)
#保存时Tensorflow会将v:0和v/ExponentialMovingAverage:0两个变量都存下来
saver.save(sess,"/path/to/model/model.ckpt")
print(sess.run([v,ema.average(v)])) #输出[10.0,0.099999905]
#一下代码给出了如何通过变量重命名直接读取变量的滑动平均值
v=tf.Variable(0,dtype=tf.float32,name="v")
#通过变量重命名将原来变量v的滑动平均值直接赋值给v
saver = tf.train.Saver({"v/ExponentialMovingAverage":v})
with tf.Session() as sess:
saver.restore(sess,"/path/to/model/model.ckpt")
print(sess.run(v))
#为了方便加载时重命名滑动平均变量,tf.train.ExponentialMovingAverage类提供了
#variables_to_restore函数来生成tf.train.Saver类所需要的变量重命名字典
import tensorflow as tf
v=tf.Variable(0,dtype=tf.float32,name="v")
ema=tf.train.ExponentialMovingAverage(0.99)
#通过使用variables_to_restore函数可以直接生成上面代码中提供的字典
#{"v/ExponentialMovingAverage":v}
print(ema.variables_to_restore())
saver = tf.train.Saver(ema.variables_to_restore())
with tf.Session() as sess:
saver.restore(sess,"/path/to/model/model.ckpt")
print(sess.run(v)) #输出0.099999905,即原来模型中变量v的滑动平均值
#下面代码给出了如何通过变量重命名直接读取变量的滑动平均值
#读取的变量v的值实际上是上面代码中变量v的滑动平均值
#通过这个方法就可以只用完全一样的代码来计算滑动平均模型前向传播的结果
v=tf.Variable(0,dtype=tf.float32,name="v")
#通过变量重命名将原来变量v的滑动平均值直接赋值给v
saver = tf.train.Saver({"v/ExponentialMovingAverage":v})
with tf.Session() as sess:
saver.restore(sess,"/path/to/model/model.ckpt")
print(sess.run(v)) #输出0.099999905 这个值就是原来模型中变量v的滑动平均值
import tensorflow as tf
from tensorflow.python.framework import graph_util
v1=tf.Variable(tf.constant(1.0,shape=[1]),name="v1")
v2=tf.Variable(tf.constant(2.0,shape=[2]),name="v2")
result=v1+v2
init_op=tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init_op)
#导出当前计算图的GraphDef部分,只需要这一个部分就可以完成从输入层到输出层的计算过程
graph_def=tf.get_default_graph().as_graph_def()
output_graph_def=graph_util.convert_variables_to_constants(sess,graph_def,['add'])
#将导出的模型存入文件
with tf.gfile.GFile("/path/to/model/combined_model.pb","wb") as f:
f.write(output_graph_def.SerializeToString())
#当只需要得到计算图中某个节点的取值时,这提供了一个更加方便的方法。
import tensorflow as tf
from tensorflow.python.platform import gfile
with tf.Session() as sess:
model_filename="/path/to/model/combined_model.pb"
#读取保存的模型文件,并将文件解析成对应的GraphDef Protocol Buffer
with gfile.FastGFile(model_filename,'rb') as f:
graph_def = tf.GraphDef()
graph_def,ParseFromString(f.read())
#将graph_def中保存的图加载到当前的图中。
#return_elements=["add":0]给出了返回的张量的名称。在保存
#的时候给出的是计算节点的名称,所以为"add"。在加载的时候给出
#的是张量的名称,所以是add:0
result = tf.import_graph_def(graph_def,return_elements=["add:0"])
print(sess.run(result))
import tensorflow as tf
#tf.train.NewCheckpointReader可以读取checkpoint文件中保存的所有变量
reader = tf.train.NewCheckpointReader('/path/to/model/model.ckpt')
#获取所有变量列表。这个事一个从变量名到变量维度的字典
all_variables=reader.get_variable_to_shape_map()
for variable_name in all_variables:
#variable_name为变量名称,all_variable[variable_name]为变量的维度
print(variable_name,all_variables[variable_name])
#获取名称为v1的变量
print("Value for variable v1 is",reader.get_tensor("v1"))
|
[
"958904120@qq.com"
] |
958904120@qq.com
|
ed5507ca40f694aaeb3f0fd6c7cea9ccbffd46ff
|
0ef88e57246d46aac70e49905f8394f63b0874c7
|
/logger.py
|
1c0ce0ed70e53f95dc2bb7e05acc1cfdfa6d0066
|
[] |
no_license
|
liko006/Image_Segmentation_practice
|
06924a60cba11b936582b5188116830d8d6efb8e
|
f4e4d93d170f3959aa96e2398b0c68f276993eb7
|
refs/heads/main
| 2023-04-25T06:23:56.247648
| 2021-05-14T06:53:36
| 2021-05-14T06:53:36
| 359,327,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,061
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import os
import sys
__all__ = ['setup_logger']
# reference from: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/utils/logger.py
def setup_logger(name, save_dir, distributed_rank, filename="log.txt", mode='w'):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# don't log results for the non-master process
if distributed_rank > 0:
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
fh = logging.FileHandler(os.path.join(save_dir, filename), mode=mode) # 'a+' for add, 'w' for overwrite
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
|
[
"noreply@github.com"
] |
liko006.noreply@github.com
|
b01109d3234d3e5a2c1000f7c0150a5e91f60ab6
|
46732d613208ee4096fbbd3fd74f22146471d1ce
|
/wangyiyun_songs&lyrics/all_singer歌手情绪分析/郝云/jieba分词并统计词频后输出结果到Excel和txt文档.py
|
f9eb64a2d25577b8f29969ec4dc7b43cf3e4d8fa
|
[] |
no_license
|
cassieeric/python_crawler
|
7cb02f612382801ae024e2cee70e0c2bcdba927c
|
6d2b4db3d34183d729f6fd30555c6d6f04514260
|
refs/heads/master
| 2022-11-30T20:30:50.031960
| 2022-11-27T02:53:22
| 2022-11-27T02:53:22
| 118,204,154
| 322
| 283
| null | 2022-12-21T09:33:08
| 2018-01-20T03:17:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,588
|
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import sys
import jieba
import jieba.analyse
import xlwt # 写入Excel表的库
# reload(sys)
# sys.setdefaultencoding('utf-8')
if __name__ == "__main__":
wbk = xlwt.Workbook(encoding='ascii')
sheet = wbk.add_sheet("wordCount") # Excel单元格名字
word_lst = []
key_list = []
for line in open('郝云歌词汇总_outputs.txt', encoding='gbk'): # 1.txt是需要分词统计的文档
item = line.strip('\n\r').split('\t') # 制表格切分
# print item
tags = jieba.analyse.extract_tags(item[0]) # jieba分词
for t in tags:
word_lst.append(t)
word_dict = {}
with open("wordCount_郝云歌词.txt", 'w') as wf2: # 打开文件
for item in word_lst:
if item not in word_dict: # 统计数量
word_dict[item] = 1
else:
word_dict[item] += 1
orderList = list(word_dict.values())
orderList.sort(reverse=True)
# print orderList
for i in range(len(orderList)):
for key in word_dict:
if word_dict[key] == orderList[i]:
wf2.write(key + ' ' + str(word_dict[key]) + '\n') # 写入txt文档
key_list.append(key)
word_dict[key] = 0
for i in range(len(key_list)):
sheet.write(i, 1, label=orderList[i])
sheet.write(i, 0, label=key_list[i])
wbk.save('wordCount_郝云歌词.xls') # 保存为 wordCount.xls文件
|
[
"noreply@github.com"
] |
cassieeric.noreply@github.com
|
8d192b1eceecea7cb7705301da9a7f46f6f2ab93
|
2aa82f6809da72301bd40ebbfc47a1470d8d340c
|
/log.py
|
0514f0627f43aee6ce3581d411d9e165188756ae
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
norangLemon/snuBot
|
ea5f47bfeab0e7e6ccc524c04efbe38df68207bb
|
02123052d9e53b6b2a8c2c304f97c670de8f01df
|
refs/heads/master
| 2021-01-09T20:52:15.538934
| 2019-05-22T10:48:25
| 2019-05-22T10:48:25
| 60,683,563
| 2
| 2
| null | 2019-05-22T10:48:26
| 2016-06-08T08:55:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,630
|
py
|
import logging
import logging.handlers
import sys
# log file 만들기
# '/'로 시작하는 command와 샤샤의 심심이 기능을 분리해서 로그를 남긴다
# 일반 채팅은 로그를 남기지 않는다
# 최상위 loger에게 stdout으로 출력하도록 한다
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s|%(name)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
# child logger는 file에 출력한다
# command log용 hander 생성
cmd_logger = logging.getLogger('cmdLoger')
cmd_fileHandler = logging.FileHandler('Logs/command.log')
cmd_streamHandler = logging.StreamHandler()
# 심심이 log용 handler 생성
chat_logger = logging.getLogger('chatLoger')
chat_fileHandler = logging.FileHandler('Logs/chat.log')
chat_streamHandler = logging.StreamHandler()
# 동등한 파일 형식 사용
# [파일명: 줄번호] <레벨>
# 메시지
formatter = logging.Formatter('\t[%(filename)s: %(lineno)s] <%(levelname)s> %(asctime)s\n%(message)s')
# 형식 적용, 핸들러를 로거에 추가, 레벨 설정
chat_fileHandler.setFormatter(formatter)
cmd_fileHandler.setFormatter(formatter)
chat_logger.addHandler(chat_fileHandler)
cmd_logger.addHandler(cmd_fileHandler)
chat_logger.addHandler(chat_streamHandler)
chat_logger.addHandler(cmd_streamHandler)
cmd_logger.setLevel(logging.DEBUG)
chat_logger.setLevel(logging.DEBUG)
# 함수명 alias
cmd_prtErr = cmd_logger.error
cmd_prtLog = cmd_logger.debug
chat_prtErr = chat_logger.error
chat_prtLog = chat_logger.debug
|
[
"pinethee@naver.com"
] |
pinethee@naver.com
|
e7be973356287b1f6f9f3adf7daae099cff5e85d
|
6fc13c46caf0b64f0e4b128378fb205dee87bd43
|
/gallery/migrations/0003_auto__add_field_galleryimage_description.py
|
db70b888fdd6a79acca5b359a38d95da13a072c8
|
[] |
no_license
|
wreckage/sammy-pjax
|
dc48b01d5cc48544d7a08cbd490ee428e4a84316
|
3b294fcfcd84892c3308551876bc8e4d156abbf3
|
refs/heads/master
| 2021-01-11T23:53:14.133047
| 2017-01-11T13:14:29
| 2017-01-11T13:14:29
| 78,640,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,439
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GalleryImage.description'
db.add_column(u'gallery_galleryimage', 'description',
self.gf('django.db.models.fields.CharField')(default='none', max_length=1000),
keep_default=False)
def backwards(self, orm):
# Deleting field 'GalleryImage.description'
db.delete_column(u'gallery_galleryimage', 'description')
models = {
u'gallery.gallery': {
'Meta': {'object_name': 'Gallery'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'gallery.galleryimage': {
'Meta': {'object_name': 'GalleryImage'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'gallery': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['gallery.Gallery']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
}
}
complete_apps = ['gallery']
|
[
"reubenurbina@gmail.com"
] |
reubenurbina@gmail.com
|
99d58e8cc76b55254b312749479d7bf54baae87e
|
8c9712b40d184fcb0ed1d2e1bc5369312f4086c4
|
/trojhackproj/ptfoapp/ptfoapp/settings.py
|
33dba0ee0a0d647528ab35c50c35625c4c59aa38
|
[] |
no_license
|
medhivya/2d4e-trial
|
6f738126d14a0eda8357f9fcba1caf859bd68426
|
d59c487d6ba534ab45a84f8f69536a6016666904
|
refs/heads/master
| 2020-04-26T11:24:47.178911
| 2019-03-03T11:13:36
| 2019-03-03T11:13:36
| 173,515,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,528
|
py
|
"""
Django settings for ptfoapp project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gs^^d-wu!rqxc&eerkbap9t_j@^rr8ljni%1-36xf^@8zeb+u@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'update.apps.UpdateConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ptfoapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ptfoapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# """ DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'mydatabase',
# 'USER': 'mydatabaseuser',
# 'PASSWORD': 'mypassword',
# 'HOST': '127.0.0.1',
# 'PORT': '5432',
# }
#} """
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"noreply@github.com"
] |
medhivya.noreply@github.com
|
b9930750f13f33f681ca4d714cfb85b0de95c5ea
|
e1585b905cbf76a344bc7b73dd14b445d3c5d70c
|
/packages/_LibreOffice/_LibreOffice.py
|
be5c9c6ab08e7d618f606d3a6d5aa5582c43402a
|
[
"MIT"
] |
permissive
|
camilleC/windows-installer
|
4584408bc6e3651d6e0907fd98985f17430d5bb7
|
2410240bed7559afaf458819ee1f9695ad647614
|
refs/heads/master
| 2021-01-16T22:46:50.910132
| 2012-09-04T22:50:06
| 2012-09-04T22:50:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
'''
@author:
'''
from ..defaultpackage.package import Package
class _LibreOffice(Package):
def __init__(self):
Package.__init__(self)
|
[
"nycteaa@students.wwu.edu"
] |
nycteaa@students.wwu.edu
|
0ae63594cd647e083be2093d5d31eba3b47fa930
|
714268a27bd4cc34ec053cb3d991012151554aad
|
/CodeChef/May Lunchtime/LOSTWKND.py
|
13d3ab6d379aba7ff6968174279243e54554f2d9
|
[] |
no_license
|
yashhR/competitive
|
2b649011c2cea74eea8d9646bcfafc73743651eb
|
37f2ec68b33828df4692bc23f28d532cb8d4a358
|
refs/heads/master
| 2022-11-10T04:53:47.634062
| 2020-06-22T16:43:03
| 2020-06-22T16:43:03
| 274,190,602
| 0
| 0
| null | 2020-06-22T16:36:02
| 2020-06-22T16:36:02
| null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
t = int(input())
while t:
info = list(map(int, input().split()))
p = info[-1]
info.pop(-1)
work = list(map(lambda x: x*p, info))
for i in range(5):
if i == 4:
if work[i] > 24:
print("Yes")
else:
print("No")
break
elif work[i] > 24:
work[i+1] += work[i] - 24
elif work[i] < 24:
work[i+1] -= 24 - work[i]
t -= 1
|
[
"17131a05h5@gvpce.ac.in"
] |
17131a05h5@gvpce.ac.in
|
ca055970b0ed963a1c28b40c99b3a2c958095e72
|
1fc7fc8cc0ad49133ba9a4dae910fd7d6e9b242c
|
/pyqtgraph/examples/multiprocess.py
|
2e32b041aa44c70ff2198f1041646ce0ef6291df
|
[
"MIT"
] |
permissive
|
Yingzhang1122/DiffractionLimitedAnalysis
|
2a67ac2ac87e9fdaf9262a565cc717899e439561
|
6ea260b738a624962a329dcb7ae19ee048515edf
|
refs/heads/main
| 2023-06-03T16:12:15.684375
| 2021-05-26T18:47:40
| 2021-05-26T18:47:40
| 368,825,659
| 0
| 0
|
MIT
| 2021-05-19T10:11:17
| 2021-05-19T10:11:17
| null |
UTF-8
|
Python
| false
| false
| 1,539
|
py
|
# -*- coding: utf-8 -*-
import initExample ## Add path to library (just for examples; you do not need this)
import numpy as np
import pyqtgraph.multiprocess as mp
import pyqtgraph as pg
import time
print("\n=================\nStart Process")
proc = mp.Process()
import os
print("parent:", os.getpid(), "child:", proc.proc.pid)
print("started")
rnp = proc._import('numpy')
arr = rnp.array([1,2,3,4])
print(repr(arr))
print(str(arr))
print("return value:", repr(arr.mean(_returnType='value')))
print( "return proxy:", repr(arr.mean(_returnType='proxy')))
print( "return auto: ", repr(arr.mean(_returnType='auto')))
proc.join()
print( "process finished")
print( "\n=================\nStart ForkedProcess")
proc = mp.ForkedProcess()
rnp = proc._import('numpy')
arr = rnp.array([1,2,3,4])
print( repr(arr))
print( str(arr))
print( repr(arr.mean()))
proc.join()
print( "process finished")
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
app = pg.mkQApp("Multiprocess Example")
print( "\n=================\nStart QtProcess")
import sys
if (sys.flags.interactive != 1):
print( " (not interactive; remote process will exit immediately.)")
proc = mp.QtProcess()
d1 = proc.transfer(np.random.normal(size=1000))
d2 = proc.transfer(np.random.normal(size=1000))
rpg = proc._import('pyqtgraph')
plt = rpg.plot(d1+d2)
## Start Qt event loop unless running in interactive mode or using pyside.
#import sys
#if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
#QtGui.QApplication.instance().exec_()
|
[
"zengjie.xia7@gmail.com"
] |
zengjie.xia7@gmail.com
|
800ad800104a7a948b97070e18c41fc42e2b2e44
|
5b3140819d8a1b531746b57403e976c2ced0cf56
|
/foundation/test.py
|
6b5e8fb4bce38b122b00a34aa47a108aad2cca43
|
[] |
no_license
|
celcoco/PythonExercise
|
53ff756f0f970b58b8aada4dab833f858f2be5ff
|
5906d7752374757a98a8a7454b11b8a23d7cd239
|
refs/heads/master
| 2021-01-19T05:18:36.517714
| 2016-09-26T02:53:55
| 2016-09-26T02:53:55
| 63,924,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
def traverse(seq_len):
idx = 1
traversed = []
while len(traversed) < seq_len :
traversed.append(idx)
traversed.append(2*len(traversed) + 1)
idx += 2
if idx > seq_len:
idx -= seq_len
print traversed,len(traversed)
sum = 0
for idx in traversed:
print idx
i = traversed.pop()
sum += (i * idx)
print(sum)
traverse(7)
|
[
"enle.chen@gmail.com"
] |
enle.chen@gmail.com
|
4a548acbd2e82977a69418ac05f42ad040b90671
|
c89a53455f295b777bcf1f7b0d373566bbd57356
|
/Vilgaxeye/mainfindpathfordemopathtwo20201207.py
|
8afbc5245f338e8eb837f6e7ed7a9607e5350088
|
[] |
no_license
|
KarnMatas/Vilgalaxy
|
9fe5133abcf8d60bae852848a578b69b3ca850ff
|
d7338fd5dfd98a26c0fce4dce331c2d281f53c90
|
refs/heads/main
| 2023-02-04T04:24:25.629964
| 2020-12-23T17:36:29
| 2020-12-23T17:36:29
| 313,986,716
| 0
| 1
| null | 2020-12-04T13:54:46
| 2020-11-18T16:05:48
|
Python
|
UTF-8
|
Python
| false
| false
| 13,664
|
py
|
import cv2
import math
import imutils
import numpy as np
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
from matplotlib import pyplot as plt
# from CardDetect import *
# cropsize = 40
# def resize(frame,cropsize):
# oldframe = frame[cropsize:-cropsize, cropsize:-cropsize]
# return oldframe
def map(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
def edgedetect(frame):
# imgGrey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# newimg = frame[40:-86, 40:-55] # พอมีไฟแล้วมาแก้ cropsize
kernel = np.ones((3,3),np.uint8)/ 10
average = cv2.filter2D(frame, -1, kernel)
edge = cv2.Canny(average, 6, 98) # 10 71 # ส่องไฟ 6 98
return edge
def adjustment_markpath(frame):
# kernel = np.ones((2, 2), np.uint8) / 10
kernelc = np.ones((3, 3), np.uint8) / 10
opening = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, kernelc)
dirate1 = cv2.dilate(opening, kernelc, iterations= 20) #31 มีไฟ 10
erode1 = cv2.erode(dirate1, kernelc, iterations= 15) #28 5 , 7
# cv2.imshow('test',erode1)
return erode1
def adjustment_chessboard(frame):
# kernel = np.ones((2, 2), np.uint8) / 10
kernelc = np.ones((3, 3), np.uint8) / 10
opening = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, kernelc)
dirate1 = cv2.dilate(opening, kernelc, iterations= 9) # 8 มีไฟ 9
erode1 = cv2.erode(dirate1, kernelc, iterations= 25) # 28 25
return erode1
def findendp(frame):
pix = np.copy(frame)
for p in range(pix.shape[0]):
for q in range(pix.shape[1]):
if (pix[p][q] != 0):
pix[p][q] = 1
fakernelme = np.array([[1,1,1],
[1,10,1],
[1,1,1]])
filterme = cv2.filter2D(pix,-1,fakernelme)
return np.argwhere(filterme == 11)
pathcontours=[]
thinedpath=[]
markercontours=[]
centermarkers=[] # ใช้ [x,y] ได้เลย
finishcontours=[]
centerfinish=[]
X = []
Y = []
Z = []
minpix=[]
maxpix=[]
valuepic=[]
rawZ = []
cornerpoints_skel = []
end_skel=[]
sortpoint=[]
anglespath=[]
world_position=[]
rf_list=[]
def spreadline(endpoints,thispath,picgrey):
for i in range(len(centermarkers)): #endpoint สั่งสลับ [x,y] เป็น [y,x]
for j in range(len(endpoints)): # ep x , ep y
length = math.sqrt(pow((endpoints[j][1]-centermarkers[i][0]),2) + pow((endpoints[j][0]-centermarkers[i][1]),2))
print('dolength=',length)
if length <= 155.00:
# print(length)
markcolor = picgrey[endpoints[j][0],endpoints[j][1]]
cv2.circle(picgrey, (endpoints[j][1],endpoints[j][0]), 7, int(markcolor), -1)
# print('mark1=',markcolor)
cv2.line(thispath, (centermarkers[i][0],centermarkers[i][1]), (endpoints[j][1],endpoints[j][0]), (255,255,255), 1)
cv2.line(picgrey, (centermarkers[i][0],centermarkers[i][1]), (endpoints[j][1],endpoints[j][0]), int(markcolor), 10)
for i in range(len(centerfinish)): #endpoint สั่งสลับ [xcc,y] เป็น [y,x]
for j in range(len(endpoints)): # ep x , ep y
length = math.sqrt(pow((endpoints[j][1]-centerfinish[i][0]),2) + pow((endpoints[j][0]-centerfinish[i][1]),2))
# print('dolength2=',length)
if length <= 155.00:
# print(length)
markcolor2 = picgrey[endpoints[j][0],endpoints[j][1]]
cv2.circle(picgrey, (endpoints[j][1],endpoints[j][0]), 7, int(markcolor2), -1)
# print('mark2=',markcolor2)
cv2.line(thispath, (centerfinish[i][0],centerfinish[i][1]), (endpoints[j][1],endpoints[j][0]), (255,255,255), 1)
cv2.line(picgrey, (centerfinish[i][0],centerfinish[i][1]), (endpoints[j][1],endpoints[j][0]), int(markcolor2), 10)
def plotmypath(frame,picgrey,order): # picgrey = newimg
global rawZ,valuepic,temp
temp = np.argwhere(frame == 255)
x,y = temp.T
# print(len(temp))
# print("x=",len(x))
for j in range(len(temp)):
valuepic.append(picgrey[x[j], y[j]])
# X.append(j[1])
# Y.append(j[0])
rawZ.append(picgrey[x[j], y[j]])
# print(len(valuepic))
maxpix.append(max(valuepic))
minpix.append(min(valuepic))
valuepic = []
# print(maxpix)
for k in range(len(rawZ)):
# if (maxpix[i] - minpix[i]) > 10.0:
Z.append(map(rawZ[k], minpix[order], maxpix[order], 20.0, 10.0)) # สลับ 10 20
# print(rawZ)
# print(Z)
rawZ = []
def convert_coordinate(lstpixel):
yworld = (lstpixel[0] / 530) * 370
xworld = (lstpixel[1] / 530) * 370
yworld = (yworld - 370) * (-1)
xworld = (xworld - 370) * (-1)
return [xworld,yworld]
def PathFinder(frame):
global cornerpoints_skel
clone = frame.copy()[35:-35, 35:-35]
# cv2.imshow("120",clone)
# return clone
imgGrey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
newimg = imgGrey[35:-35, 35:-35] #40:-86, 40:-55
newgrey = newimg.copy()
denoiseimg = cv2.fastNlMeansDenoising(newgrey, None, 10, 7, 21)
cv2.imshow('raw',newimg)
keepedge = edgedetect(newimg)
adjustimg_markpath = adjustment_markpath(keepedge)
adjustimg_chessboard = adjustment_chessboard(keepedge)
contours, hierarchy = cv2.findContours(adjustimg_markpath, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contours1, hierarchy1 = cv2.findContours(adjustimg_chessboard, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for path in range(len(contours)):
look = np.zeros(keepedge.shape, np.uint8)
cv2.drawContours(look,contours,path,(255,255,255),-1)
cv2.imshow('cnt',look)
cv2.waitKey(0)
if (cv2.contourArea(contours[path]) > 25000 and cv2.contourArea(contours[path]) < 50000) :
pathcontours.append(np.zeros(keepedge.shape, np.uint8))
cv2.drawContours(pathcontours[len(pathcontours)-1],contours,path,(255,255,255),-1)
cv2.imshow('thinpath',pathcontours[0])
cv2.waitKey(0)
elif (cv2.contourArea(contours[path]) > 10000 and cv2.contourArea(contours[path]) < 25000) :
# compute the center of the contour
M = cv2.moments(contours[path])
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
markercontours.append(np.zeros(keepedge.shape, np.uint8))
cv2.drawContours(markercontours[len(markercontours)-1],contours,path,(255,255,255),-1)
# cv2.circle(markercontours[len(markercontours)-1], (cX, cY), 7, (0, 0, 255), -1)
print(len(markercontours))
cv2.waitKey(0)
centermarkers.append([cX, cY])
Mcb = cv2.moments(contours1[0])
cXcb = int(Mcb["m10"] / Mcb["m00"])
cYcb = int(Mcb["m01"] / Mcb["m00"])
finishcontours.append(np.zeros(keepedge.shape, np.uint8))
cv2.drawContours(finishcontours[0],contours1,-1,(255,255,255),-1)
# cv2.circle(finishcontours[0], (cXcb, cYcb), 7, (0, 0, 255), -1)
centerfinish.append([cXcb,cYcb])
cv2.imshow('finishpls',finishcontours[0])
cv2.imshow('marker',markercontours[0])
cv2.waitKey(0)
for path in range(len(pathcontours)):
# gray = cv2.cvtColor(pathcontours[path], cv2.COLOR_BGR2GRAY)
thined = cv2.ximgproc.thinning(pathcontours[path])
thinedpath.append(thined)
endpoints = findendp(thined)
cv2.imshow('circle',thinedpath[0])
cv2.waitKey(0)
# gradientpath = cv2.bitwise_and(,)
################################## ต้องมาทำเป็นสำหรับหลาย path [0] เป็น [i]
spreadline(endpoints,thinedpath[path],newimg) # endp ,รูปที่ thined, รูปดั้งเดิมสีเทา
fullskel = cv2.bitwise_and(pathcontours[0],thinedpath[0])
# minus = cv2.addWeighted(fullskel,-1,thinedpath[0],1,0)
endpoints2= findendp(fullskel) # หา จุดปลายใหม่ของ เส้นที่ ตเิมความยาวั้ง คอนทัวทั้งหมดแล้ว
spreadline(endpoints2,fullskel,denoiseimg)
contours2, hierarchy2 = cv2.findContours(fullskel, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# print(len(contours2))
epsilon = 0.05*cv2.arcLength(contours2[0],True)
approx = cv2.approxPolyDP(contours2[0],epsilon,True)
approximg = np.zeros(keepedge.shape, np.uint8)
cv2.drawContours(approximg,approx,-1,(255,255,255),3)
for ap in range(len(approx)):
if list(approx[ap][0]) not in cornerpoints_skel:
cornerpoints_skel.append(list(approx[ap][0]))
for n in range(len(endpoints2)):
end_skel.append(list(endpoints2[n]))
# ปรับ พิกัดของ endpoints
for e in end_skel:
e.reverse()
for n in range(len(end_skel)):
if list(end_skel[n]) not in cornerpoints_skel:
cornerpoints_skel.append(list(end_skel[n]))
print("corner=",cornerpoints_skel)
cv2.imshow("approximg",approximg)
pcolor = 10
for p in range(len(cornerpoints_skel)):
cv2.circle(clone, (cornerpoints_skel[p][0],cornerpoints_skel[p][1]), 7, (255-(3*pcolor),255-(2*pcolor),255-(pcolor)), -1)
pcolor+=20
cv2.circle(clone, (422,100), 7, (0,255,0), -1)
##################### sort points in line ##############################################
startpoint = centermarkers[0]
print("start",startpoint)
sortpoint.append(startpoint)
# print(contours2[0][0])
count = 0
for p in range(len(contours2[0])):
for q in range(len(cornerpoints_skel)):
if count == 0:
if list(contours2[0][p][0]) == sortpoint[0] :
count = 1
if count == 1:
if list(contours2[0][p][0]) == cornerpoints_skel[q] :
# print(list(contours2[0][p][0]),cornerpoints_sskel[q])
if cornerpoints_skel[q] not in sortpoint:
sortpoint.append(cornerpoints_skel[q])
print(cornerpoints_skel[q])
if len(sortpoint) == len(cornerpoints_skel):
break
for p in range(len(contours2[0])):
for q in range(len(cornerpoints_skel)):
if list(contours2[0][p][0]) == cornerpoints_skel[q] :
if cornerpoints_skel[q] not in sortpoint:
sortpoint.append(cornerpoints_skel[q])
print(cornerpoints_skel[q])
if len(sortpoint) == len(cornerpoints_skel):
break
print("sort=",sortpoint)
qcolor = 10
clone2 = frame.copy()[35:-35, 35:-35]
for p in range(len(sortpoint)):
cv2.circle(clone2, (sortpoint[p][0],sortpoint[p][1]), 7, (255-(3*qcolor),255-(2*qcolor),255-(qcolor)), -1)
qcolor+=20
for poi in range(len(sortpoint)-1):
cv2.line(clone2, (sortpoint[poi][0],sortpoint[poi][1]), (sortpoint[poi+1][0],sortpoint[poi+1][1]), (255,255,255), 1)
cv2.circle(clone2, (sortpoint[3][0],sortpoint[3][1]), 7, (0,255,0), -1)
########### sortpoint เป็น list ของ เส้นทางที่เรียงจุดกันแล้ว
########################## หามุม ##############################
for p in range(len(sortpoint)-1):
ang = math.degrees(math.atan2(sortpoint[p+1][1]-sortpoint[p][1],sortpoint[p+1][0]-sortpoint[p][0]))
if ang < 0 :
ang += 360
# ang+=90
print('p',sortpoint[p])
print('p+1',sortpoint[p+1])
print('now-ang= ',ang)
anglespath.append(ang)
######################### เปลี่ยน coordinate ###################
for p in range(len(sortpoint)):
world_position.append(convert_coordinate(sortpoint[p]))
print("world_position",world_position)
for p in range(len(world_position)-1):
normy = math.sqrt(math.pow((world_position[p+1][1]-world_position[p][1]),2)+math.pow((world_position[p+1][0]-world_position[p][0]),2))
rf_list.append(normy)
print(rf_list)
print(newimg.shape)
################ plot 3d ######################
plotmypath(fullskel,denoiseimg,0)
cv2.imshow('grey',newimg)
cv2.imshow('denosie',denoiseimg)
cv2.imshow('point',clone)
cv2.imshow('point2',clone2)
fig = plt.figure()
ax = plt.axes(projection="3d")
x, y = temp.T
# ax.plot3D(X, Y, Z, 'gray')
ax.scatter3D(x,y, Z, c=Z, cmap='hsv')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
# x, y = temp.T
# plt.scatter(x,y)
plt.show()
cv2.waitKey(0)
# return adjustimg_chessboard
######################################## main ########################################################
# img = cv2.imread('fieldimages/myfield.png', cv2.IMREAD_COLOR)
img = cv2.imread('fieldimages/myfieldgogo.png', cv2.IMREAD_COLOR)
cv2.namedWindow('tuner')
def nothing(x):
pass
# create min
cv2.createTrackbar('min','tuner',10,200,nothing)
# create trackbars for max
cv2.createTrackbar('max','tuner',10,200,nothing)
# create tratrackbars for erosion contours
# cv2.createTrackbar('erodeCon','tuner',1,10,nothing)
# while(1):
# tune1 = cv2.getTrackbarPos('min','tuner')
# tune2 = cv2.getTrackbarPos('max','tuner')
# x = img.copy()
# func = PathFinder(x,tune1,tune2)
# cv2.imshow('tuner',func)
# cv2.imwrite('fieldimages/adjustfield.png', func)
# cv2.waitKey(10)
x = img.copy()
# func = PathFinder(x,tune1,tune2)
|
[
"56544166+KarnMatas@users.noreply.github.com"
] |
56544166+KarnMatas@users.noreply.github.com
|
430efca059ba8e99ce07e28c0118ee8db7567ddd
|
08f4bc2751ec9d9a312e8a11ddf79c86df94a4fa
|
/AV_control_host/build/catkin_generated/installspace/_setup_util.py
|
aa91c1058d2eacf79b43d0e77e1e68893df12082
|
[] |
no_license
|
physicsdolphin/Agile_Vehicle
|
4b2750d4dcad0a1dcba7db19be8fefaf5ff91459
|
c409f0e2e30769f19cf47f7163db6b1b8e073d46
|
refs/heads/main
| 2023-08-26T12:32:56.441728
| 2021-10-25T02:34:01
| 2021-10-25T02:34:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,355
|
py
|
#!/home/wbc/anaconda3/bin/python3
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This file generates shell code for the setup.SHELL scripts to set environment variables."""
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
PATH_TO_ADD_SUFFIX = ['bin']
if IS_WINDOWS:
# while catkin recommends putting dll's into bin, 3rd party packages often put dll's into lib
# since Windows finds dll's via the PATH variable, prepend it with path to lib
PATH_TO_ADD_SUFFIX.extend([['lib', os.path.join('lib', 'x86_64-linux-gnu')]])
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': PATH_TO_ADD_SUFFIX,
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python3/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
"""
Generate shell code to reset environment variables.
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
"""
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
"""
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
"""
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
"""
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
"""
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
"""Generate shell code to prepend environment variables for the all workspaces."""
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted(key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH'):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
"""
Return the prefix to prepend to the environment variable NAME.
Adding any path in NEW_PATHS_STR without creating duplicate or empty items.
"""
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
"""Generate shell code with found environment hooks for the all workspaces."""
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
parser.add_argument('--local', action='store_true', help='Only consider this prefix path and ignore other prefix path in the environment')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if not args.local:
# environment at generation time
CMAKE_PREFIX_PATH = r'/home/wbc/SRT/AV_control/devel;/opt/ros/noetic'.split(';')
else:
# don't consider any other prefix path than this one
CMAKE_PREFIX_PATH = []
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
# CMAKE_PREFIX_PATH uses forward slash on all platforms, but __file__ is platform dependent
# base_path on Windows contains backward slashes, need to be converted to forward slashes before comparison
if os.path.sep != '/':
base_path = base_path.replace(os.path.sep, '/')
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
|
[
"glaciercoder@github.com"
] |
glaciercoder@github.com
|
cff59f646f6b18df9aba102a124e6b05941d94bf
|
cf88dbdda8803f77036fa51d1cc9968006754166
|
/tests/test_code_style.py
|
068bbc74436fd0e267247968d9cddb5a4c84cc8f
|
[
"MIT"
] |
permissive
|
saymedia/python-coal
|
b11cab7e0de92bd74ca35b25c0d11dff90fb5ae2
|
5aefaaaf56727a61cee7e37fae1e7f25ef97c952
|
refs/heads/master
| 2021-01-20T06:25:46.990699
| 2013-11-19T00:49:21
| 2013-11-19T00:49:21
| 14,349,835
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
import unittest
import pep8
import os.path
tests_dir = os.path.dirname(__file__)
modules_dir = os.path.abspath(os.path.join(tests_dir, "..", "coal"))
class TestCodeStyle(unittest.TestCase):
def test_pep8_conformance(self):
pep8style = pep8.StyleGuide()
result = pep8style.check_files([tests_dir, modules_dir])
self.assertEqual(
result.total_errors,
0,
"Found pep8 conformance issues",
)
|
[
"matkins@saymedia.com"
] |
matkins@saymedia.com
|
65cdeb847c52630545c176d506c77e08da15e5a7
|
3469a778edcc959050f54a0c68c27db6efa643e2
|
/D2/re_ip.py
|
e78b3e212c52e1454ed1493481cccf791001b6ef
|
[] |
no_license
|
ciscowh/Python-test
|
d8e3ddfac1725259f1c39c0ddbc9cca34cc719a1
|
0260aa2aac6c0a81e697c53b82930339df2ccf8c
|
refs/heads/master
| 2022-12-07T00:37:41.153998
| 2020-08-02T16:17:32
| 2020-08-02T16:17:32
| 281,024,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
import re
str1='Port-channel1.189 192.168.189.254 YES CONFIG up'
re_result = re.match('(\w+.\w+\d.\d\d\d)\s*(\d\d\d.\d\d\d.\d\d\d.\d\d\d)\s?\w+\s?\w+\s?(\w+)',str1).groups()
port = re_result[0]
IP = re_result[1]
status = re_result[2]
str_po = '接口'
str_ip = 'IP地址'
str_status = '状态'
line1 = f'{str_po:<6}:{port:<10}'
line2 = f'{str_ip:<6}:{IP:<10}'
line3 = f'{str_status:<6}:{status:<10}'
print('-'*80)
print(line1)
print(line2)
print(line3)
# line1='{:8}:{}' .format('接口',str_port)
# line2='{:8}:{}' .format('IP地址',str_ip)
# line3='{:8}:{}' .format('状态',str_status)
#
#
# print('-'*100)
# print(line1)
# print(line2)
# print(line3)
|
[
"95636521@qq.com"
] |
95636521@qq.com
|
abe62a0645c07773cd8b8c0817344736bb804b0a
|
624d5a364fb1a6f7c0b56e02429ac1280b116c02
|
/django_crud/urls.py
|
91724291b37e72b08fb1d45cd35d0a11f223a59e
|
[] |
no_license
|
yeojinhwang/django_crud
|
c167dd10ccd0947b030967b6f9540807e82f83ec
|
7b9fce1503c169f3e9b21b40c550459acdc2ce83
|
refs/heads/master
| 2020-04-23T14:45:37.223417
| 2019-02-19T07:21:14
| 2019-02-19T07:21:14
| 171,242,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 848
|
py
|
"""django_crud URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('boards/', include('boards.urls')),
path('jobs/', include('jobs.urls')),
]
|
[
"hwangyj0202@gmail.com"
] |
hwangyj0202@gmail.com
|
dbe51e99e91db517aeb0e986569300a370b3791b
|
d599358cc35b883e0ba007f58067b2e9818cd365
|
/3sum_Closet.py
|
69a4b51d8cfe22a1df6d6ae3da7c25c80578183f
|
[] |
no_license
|
San1357/Leetcode-july-challenge-2021
|
b4a992492a398480ba72719259963d27d30e2f36
|
0de1916dd84b82b0126a790c936655959fd26ad0
|
refs/heads/main
| 2023-06-28T14:12:04.455490
| 2021-07-31T15:48:31
| 2021-07-31T15:48:31
| 389,917,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
class Solution(object):
def threeSumClosest(self, nums, target):
result, min_diff = 0, float("inf")
nums.sort()
for i in reversed(range(2, len(nums))):
if i+1 < len(nums) and nums[i] == nums[i+1]:
continue
left, right = 0, i-1
while left < right:
total = nums[left]+nums[right]+nums[i]
if total < target:
left += 1
elif total > target:
right -= 1
else:
return target
if abs(total-target) < min_diff:
min_diff = abs(total-target)
result = total
return result
|
[
"noreply@github.com"
] |
San1357.noreply@github.com
|
3a4a81507b810e8c3de2c5d62a86f33c9cc9dba4
|
371c3eb56a0e0044f53209c457c7d1982511ccec
|
/server/venv/Scripts/pilprint.py
|
72ff8f5b6bd60a9b2789dee8906d1fa2e7ff1e78
|
[] |
no_license
|
oceanixinc/lingohop
|
1e4e9e2602691e8720d8e45ed0d682a0d9e65666
|
cb0595675fd31c0589757e5e0551e0ca6e1ac91b
|
refs/heads/master
| 2021-03-24T11:50:40.020683
| 2017-05-03T21:09:05
| 2017-05-03T21:09:05
| 66,260,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,628
|
py
|
#!d:\work\lingohop\server\venv\scripts\python.exe
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
import subprocess
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.3/2003-05-05 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -d debug (show available drivers)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printerArgs = [] # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printerArgs = ["lpr"]
elif o == "-P":
# printer channel
printerArgs = ["lpr", "-P%s" % a]
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printerArgs:
p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE)
fp = p.stdin
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
if printerArgs:
fp.close()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
|
[
"mrinalmech@gmail.com"
] |
mrinalmech@gmail.com
|
cea81827eb196d2bd0479583275edcbe40891b5b
|
93cc7d1297c202f561cb0faf1dbf852b6b4611fd
|
/sprites/metalgear.py
|
dab6de90ff64a9266044416b4f646df9a06d8a6e
|
[
"CC-BY-4.0"
] |
permissive
|
carloartieri/8bit_raspi_pixel_art_display
|
60ebbc1b4d5fc0f6eecef3bd0c579bbf7e8a9473
|
f8afc8a03460d07d1151b4cbd7bb62e35283fe35
|
refs/heads/master
| 2020-03-23T02:40:55.605446
| 2018-09-29T16:10:50
| 2018-09-29T16:10:50
| 140,986,084
| 11
| 0
| null | 2018-09-29T16:10:51
| 2018-07-15T00:03:57
|
Python
|
UTF-8
|
Python
| false
| false
| 7,344
|
py
|
import sys
sys.path.append("../")
from settings import (NES_PALETTE_HEX, animation_settings)
from core import sprite
SolidSnakeWalkRightGun01 = sprite(
palette = {
"b":NES_PALETTE_HEX[0, 13],
"g":NES_PALETTE_HEX[1, 9],
"s":NES_PALETTE_HEX[3, 7],
},
matrix = [
"x8b3x5",
"x7b5x4",
"x7b2g2b1x4",
"x5b4s2b1x4",
"x3b1g1b1g1b2g1s1b2x3",
"x2b1g3s1b1s1b5x2",
"x2b2g2s1g1s1b2s1b1g1b1x1",
"x1b1s1g1b1s1b1g1s1b2s1b1g1b1x1",
"x1b1s2b1s1g3b1g1s1g2b1x1",
"x1b1g1s1b2g4b2g1b2x1",
"x2b1s1g1b4g1b5x1",
"b3g1s1b11",
"b3g1s1b8s1b1x1",
"x4b9x3",
"x4b2g1s1g1b1g1b2x3",
"x4b1g4b1g2b1x3",
"x5b1g4b1g1b1x3",
"x6b2g2b1g1b1x3",
"x6b1g3b1g1b1x3",
"x6b2g2b2x4",
"x6b1g3b2x4",
"x6g1b1g1b3x4",
"x5b1g1b5x4",
"x4b4x1g1b2x4",
"x5b3x1b4x3",
"x6b2x2b6",
"x7b6x3",
"x9b3x4",
]
)
SolidSnakeWalkRightGun02 = sprite(
palette = {
"b":NES_PALETTE_HEX[0, 13],
"g":NES_PALETTE_HEX[1, 9],
"s":NES_PALETTE_HEX[3, 7],
},
matrix = [
"x8b3x5",
"x7b5x4",
"x7b2g2b1x4",
"x6b3s2b1x4",
"x4b2g1b2g1s1b2x3",
"x3b1g1b2s2b3g1b1x2",
"x2b1g2s1g1s1b1s1b2g1b1x2",
"x2b1g1s1g1b1s1b1s1b2s1b1x2",
"x1b3g1b1s1g2b1g1b3x2",
"x1b1s1g1b2s1g3b4x2",
"x1b1g1s2b4g1b6",
"b3g2s2b5g1s1b1x1",
"b5s2b7x2",
"x3b4g1b2g1b2x3",
"x3b2g1s1g2b2s1g1b1x2",
"x3b1g2s1g1b1g3s1g1b1x1",
"x4b1g4b3g3b1",
"x4b2g2b1x2b3g1b1",
"x3b1g4b1x3b1g2b1",
"x2b1g4b1x4b1g2b1",
"x1b1x1b1g1b2x5b1g1b2",
"b1x2g1b1x7b2g1b1",
"b1x1b2x6b3x1g1b1",
"b1g1b1x4b6x1b2",
"x1b1x2b10x1b1",
"x3b12x1",
"x3b11x2",
"x4b7x5",
]
)
SolidSnakeWalkRight01 = sprite(
palette = {
"b":NES_PALETTE_HEX[0, 13],
"g":NES_PALETTE_HEX[1, 9],
"s":NES_PALETTE_HEX[3, 7],
},
matrix = [
"x8b3x5",
"x7b5x4",
"x7b2g2b1x4",
"x3b1g1b4s2b1x4",
"x1b2g3b3g1s1b2x3",
"b1g1b1g1s1g1s1b1s1b2s1g1b1x2",
"g1s2b1g2s1g1b1s1b1s1g1b1x2",
"s2b2g1b1s1g1b1s1b1g2b2x1",
"b1s1g1b4g2b1g2b1s2b1",
"x1b1s2b2g5b1s1g2b1",
"x2g1s1b4g2b2g1b2x1",
"x3b4s1b1s1b3x3",
"x3b1g2b5x5",
"x3b1g1s1g3b2x5",
"x4g2s1g1b1g2b1x4",
"x4b1g4b1g2b1x3",
"x5b1g4b1g1b1x3",
"x6b2g2b1g1b1x3",
"x6b1g3b1g1b1x3",
"x6b2g2b2x4",
"x6b1g3b2x4",
"x6g1b1g1b3x4",
"x5b1g1b5x4",
"x4b4x1g1b2x4",
"x5b3x1b4x3",
"x6b2x2b6",
"x7b6x3",
"x9b3x4",
]
)
SolidSnakeWalkRight02 = sprite(
palette = {
"b":NES_PALETTE_HEX[0, 13],
"g":NES_PALETTE_HEX[1, 9],
"s":NES_PALETTE_HEX[3, 7],
},
matrix = [
"x8b3x5",
"x7b5x4",
"x7b2g2b1x4",
"x6b3s2b2x3",
"x4b2g1b2g1s1b2x3",
"x3b1g2s1b1s1b2g1b1x3",
"x2b1g1s1g2s1b1s1b1g1b1x3",
"x2b1g1b2g1s1g1b1g2b1x3",
"x3b1g1s1b4g1b1x4",
"x3b1s1g1b2s2b2x4",
"x3b1s3g3b2x4",
"x4b1g2b4g1b1x3",
"x5b3g1b1g3b1x2",
"x5b1g1s1g4s1g1b1x1",
"x4b1g2s1g2b1g2s1b1x1",
"x4b1g1s1g3b3g1b1x1",
"x4b1g4b3g3b1",
"x4b2g2b1x2b3g1b1",
"x3b1g4b1x3b1g2b1",
"x2b1g4b1x4b1g2b1",
"x1b1x1b1g1b2x5b1g1b2",
"b1x2g1b1x7b2g1b1",
"b1x1b2x6b3x1g1b1",
"b1g1b1x4b6x1b2",
"x1b1x2b10x1b1",
"x3b12x1",
"x3b11x2",
"x4b7x5",
]
)
MetalGearBG01 = sprite(
palette = {
"b":NES_PALETTE_HEX[0, 13],
"l":NES_PALETTE_HEX[2, 0],
"d":NES_PALETTE_HEX[1, 0],
"g":NES_PALETTE_HEX[2, 10],
"r":NES_PALETTE_HEX[0, 11],
},
matrix = [
"b11l1b4" + "b16",
"l9b2l5" + "l16",
"d10b1d3b1d1" + "d16",
"d9b2d2b2l1" + "d16",
"d9b2l1b4" + "d16",
"d9b2l1b2l1d1" + "d16",
"d12b2l1d1" + "d16",
"d9b1l1d1b2l1d1" + "d16",
"d16" + "d16",
"b2d5b1d8" + "d8b2d5b1",
"b1d3b1d5b2d4" + "d2b2d4b1d3b1d3",
"d2b1d4b1d1b2d1b3d1" + "d1b2d1b3d3b1d4b1",
"b3d1b1d2b7d1b1" + "b6d1b4d1b1d2b1",
"d2b7d1b1d1b1d1b2" + "b1d1b1d1b1d1b2d2b6",
"b4d1b2d1b8" + "b12d1b2d1",
"b32",
"b32",
"b1r6b2r6b1" + "b1r6b2r6b1",
"b1r1b5r1b1r1b5r1" + "b1r1b5r1b1r1b5r1",
"b1r1b5r1b1r1b5r1" + "b1r1b5r1b1r1b5r1",
"b1r1b5r1b1r1b5r1" + "b1r1b5r1b1r1b5r1",
"b1r1b5r1b1r1b5r1" + "b1r1b5r1b1r1b5r1",
"b1r1b5r1b1r1b5r1" + "b1r1b5r1b1r1b5r1",
"b2r6b2r6" + "b2r6b2r6",
"b16" + "b16",
"b1g7b1g7" + "b1g7b1g7",
"b1g1b5l1b1g1b5l1" + "b1g1b5l1b1g1b5l1",
"b1g1b1r4l1b1g1b1r4l1" + "b1g1b1r4l1b1g1b1r4l1",
"b1g1b1r4l1b1g1b1r4l1" + "b1g1b1r4l1b1g1b1r4l1",
"b1g1b1r4l1b1g1b1r4l1" + "b1g1b1r4l1b1g1b1r4l1",
"b1g1b1r4l1b1g1b1r4l1" + "b1g1b1r4l1b1g1b1r4l1",
"b1g1l6b1g1l6" + "b1g1l6b1g1l6",
]
)
MetalGearBG02 = sprite(
palette = {
"b":NES_PALETTE_HEX[0, 13],
"l":NES_PALETTE_HEX[3, 1],
"d":NES_PALETTE_HEX[1, 1],
"r":NES_PALETTE_HEX[1, 7],
"g":NES_PALETTE_HEX[2, 7],
"y":NES_PALETTE_HEX[0, 0],
},
matrix = [
"b1r2b3r5b3r2" + "r1b3g1r5y1b5",
"b1r1b1g1b2r5b5" + "b4g1r5y1b1y1b3",
"b2g2b2r5b1g4" + "g3b1g1r5y1b1y2b2",
"b1g3b2r5b5" + "b4g1r5y1b1y3b1",
"b1g2b2r5b3r3" + "r2b3g1r5y1b1y2b1",
"b1g2b2r1g1r1g1r1b6" + "b5g1r1g1r1g1r1y1b1y2b1",
"b1g2b2r1b1r1b1r1b1g5" + "g4b1g1r1b1r1b1r1y1b1y2b1",
"b1g2b2r5b6" + "b5g1r5y1b1y2b1",
"b1g1b2r5b3r4" + "r3b3g1r5y1b1y1b1",
"b1g1b2r5b7" + "b6g1r5y1b1y1b1",
"b1g1b2r5b1g6" + "g5b1g1r5y1b1y1b1",
"b1g1b2r5b7" + "b6g1r5y1b1y1b1",
"b3r5b3r5" + "r4b3g1r5y1b2",
"b3r1g1r1g1r1b8" + "b7g1r1g1r1g1r1y1b2",
"b3r1b1r1b1r1b1g7" + "g6b1g1r1b1r1b1r1y1b2",
"b3r5b8" + "b7g1r5y1b2",
"b2r5b2r7" + "r6b2g1r5y1b1",
"b2r5b2r7" + "r6b2g1r5y1b1",
"b2r5b2r7" + "r6b2g1r5y1b1",
"b16" + "b16",
"b16" + "b16",
"b2d13b1" + "b1d14b1",
"b1l1b12d1b1" + "b1d1b12d1b1",
"b1l1b12d1b1" + "b1d1b12d1b1",
"b1l1b1d1b10d1b1" + "b1d1b12d1b1",
"b1l1b1d2b9d1b1" + "b1d1b12d1b1",
"b1d1l4d9b1" + "b1l1d13b1",
"b1d6b9" + "b16",
"b16" + "b16",
"b1l14d1" + "b1l14d1",
"b1l1b12l1d1" + "b1l1b12l1d1",
"b1l1b1d11l1b1" + "b1l1b1d11l1b1",
]
)
metalgear_animation = animation_settings(
sprite_list=[[SolidSnakeWalkRightGun01,
SolidSnakeWalkRightGun02,],
[SolidSnakeWalkRight01,
SolidSnakeWalkRight02,],
],
bg_sprites=[MetalGearBG01,
MetalGearBG02,],
xoffs=[[0, 0],
[0, 0],
],
yoffs=[[-1, 0],
[-1, 0],
],
frame_time=0.040,
spbg_ratio=9,
center=True,
bg_scroll_speed=(1, 0),
cycles_per_char=5,
reversible=False,
)
|
[
"cartieri@guardanthealth.com"
] |
cartieri@guardanthealth.com
|
3ddc451d381de33b7a2ad5da04358f63c4d38f1e
|
612c128a31cd2b34534142fcb45c4bd83d17ab2c
|
/src/core_game/coregame_manager.py
|
696323e8c8f667e78215c4aa0179a1d6f6938970
|
[
"MIT"
] |
permissive
|
sskkrrttt/valorant-skin-cli
|
7c14559e69c78eeeaa5af2a88af98cba43c62a39
|
65c78990bada8ab963757fea2d0f4d55fa2569b3
|
refs/heads/master
| 2023-06-15T06:51:39.158404
| 2021-07-13T23:10:00
| 2021-07-13T23:10:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
'''
async manager calls to coregame to update session
session keeps track of inmatch/map/other data about live match
'''
import asyncio
from .session import Session
# important imports or something
from ..flair_management.skin_manager.skin_manager import Skin_Manager
class Coregame_Manager:
def __init__(self,client):
self.client = client
self.skin_manager = Skin_Manager(self.client)
self.session = Session(self.client,self.skin_manager)
async def main_loop(self):
await self.session.update_presence()
|
[
"colinjoe9@gmail.com"
] |
colinjoe9@gmail.com
|
d410bec00a2a34da61f0af3a62839e82e95192d3
|
0abe3c336e8f8a6f807b0c43d4c0f2b1e98640c2
|
/pelix/remote/discovery/mdns.py
|
b49121ea060f46d1692945a999006b35aac3e395
|
[
"Apache-2.0"
] |
permissive
|
tcalmant/ipopo
|
0c9109b4e3e8fbc373c49d897f87a5e351428c76
|
1d0add361ca219da8fdf72bb9ba8cb0ade01ad2f
|
refs/heads/v1
| 2023-08-29T07:51:28.650303
| 2022-12-08T15:27:20
| 2022-12-08T16:01:25
| 4,015,794
| 67
| 34
|
Apache-2.0
| 2022-12-08T16:01:26
| 2012-04-13T12:53:13
|
Python
|
UTF-8
|
Python
| false
| false
| 14,493
|
py
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Pelix remote services: Zeroconf (mDNS) discovery and event notification
This module depends on the zeroconf package
:author: Thomas Calmant
:copyright: Copyright 2020, Thomas Calmant
:license: Apache License 2.0
:version: 1.0.1
..
Copyright 2020 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import json
import logging
import socket
# Zeroconf
import zeroconf
# iPOPO decorators
from pelix.ipopo.decorators import (
ComponentFactory,
Requires,
Provides,
Invalidate,
Validate,
Property,
)
import pelix.constants
# Remote services
import pelix.remote
import pelix.remote.beans as beans
from pelix.utilities import is_bytes, is_string, to_str
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory(pelix.remote.FACTORY_DISCOVERY_ZEROCONF)
@Provides(pelix.remote.SERVICE_EXPORT_ENDPOINT_LISTENER)
@Property("_rs_type", pelix.remote.PROP_ZEROCONF_TYPE, "_pelix-rs._tcp.local.")
@Property("_ttl", "zeroconf.ttl", 60)
@Requires("_access", pelix.remote.SERVICE_DISPATCHER_SERVLET)
@Requires("_registry", pelix.remote.SERVICE_REGISTRY)
class ZeroconfDiscovery(object):
"""
Remote services discovery and notification using the module zeroconf
"""
# Service type for the Pelix dispatcher servlet
DNS_DISPATCHER_TYPE = "_rs-dispatcher._tcp.local."
def __init__(self):
"""
Sets up the component
"""
# Imported endpoints registry
self._registry = None
# Dispatcher access
self._access = None
# Remote Service type
self._rs_type = None
# Zeroconf TTL
self._ttl = 60
# Framework UID
self._fw_uid = None
# Address of this framework
self._address = None
# Zeroconf
self._zeroconf = None # type: zeroconf.Zeroconf
self._browsers = []
# Endpoint UID -> ServiceInfo
self._export_infos = {}
# mDNS name -> Endpoint UID
self._imported_endpoints = {}
@Invalidate
def invalidate(self, _):
"""
Component invalidated
"""
# Stop listeners
for browser in self._browsers:
browser.cancel()
# Close Zeroconf
self._zeroconf.unregister_all_services()
self._zeroconf.close()
# Clean up
self._export_infos.clear()
self._zeroconf = None
self._fw_uid = None
_logger.debug("Zeroconf discovery invalidated")
@Validate
def validate(self, context):
"""
Component validated
"""
# Get the framework UID
self._fw_uid = context.get_property(pelix.constants.FRAMEWORK_UID)
# Get the host address
self._address = socket.inet_aton(
socket.gethostbyname(socket.gethostname())
)
# Prepare Zeroconf
self._zeroconf = zeroconf.Zeroconf()
# Register the dispatcher servlet as a service
self.__register_servlet()
# Listen to our types
self._browsers.append(
zeroconf.ServiceBrowser(
self._zeroconf, ZeroconfDiscovery.DNS_DISPATCHER_TYPE, self
)
)
self._browsers.append(
zeroconf.ServiceBrowser(self._zeroconf, self._rs_type, self)
)
_logger.debug("Zeroconf discovery validated")
@staticmethod
def _serialize_properties(props):
"""
Converts properties values into strings
"""
new_props = {}
for key, value in props.items():
if is_string(value):
new_props[key] = value
else:
try:
new_props[key] = json.dumps(value)
except ValueError:
new_props[key] = "pelix-type:{0}:{1}".format(
type(value).__name__, repr(value)
)
# FIXME: to simplify the usage with ECF, send single strings instead of
# arrays
for key in (
pelix.constants.OBJECTCLASS,
pelix.remote.PROP_IMPORTED_CONFIGS,
):
try:
new_props[key] = props[key][0]
except KeyError:
pass
return new_props
@staticmethod
def _deserialize_properties(props):
"""
Converts properties values into their type
"""
new_props = {}
for key, value in props.items():
key = to_str(key)
if is_bytes(value):
# Convert value to string if necessary
value = to_str(value)
try:
try:
new_props[key] = json.loads(value)
except (TypeError, ValueError):
if is_string(value) and value.startswith("pelix-type:"):
# Pseudo-serialized
value_type, value = value.split(":", 3)[2:]
if "." in value_type and value_type not in value:
# Not a builtin type...
_logger.warning(
"Won't work: %s (%s)", value, value_type
)
new_props[key] = eval(value)
else:
# String
new_props[key] = value
except Exception as ex:
_logger.error("Can't deserialize %s: %s", value, ex)
return new_props
def __register_servlet(self):
"""
Registers the Pelix Remote Services dispatcher servlet as a service via
mDNS
"""
# Get the dispatcher servlet access
access = self._access.get_access()
# Convert properties to be stored as strings
properties = {
"pelix.version": pelix.__version__,
pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID: self._fw_uid,
"pelix.access.port": access[0],
"pelix.access.path": access[1],
}
properties = self._serialize_properties(properties)
# Prepare the service type
svc_name = "{0}.{1}".format(
self._fw_uid, ZeroconfDiscovery.DNS_DISPATCHER_TYPE
)
# Prepare the mDNS entry
info = zeroconf.ServiceInfo(
ZeroconfDiscovery.DNS_DISPATCHER_TYPE, # Type
svc_name, # Name
self._address, # Access address
access[0], # Access port
properties=properties,
)
# Register the service
self._zeroconf.register_service(info, self._ttl)
def endpoints_added(self, endpoints):
"""
Multiple endpoints have been added
:param endpoints: A list of ExportEndpoint beans
"""
# Get the dispatcher servlet port
access_port = self._access.get_access()[0]
# Handle each one separately
for endpoint in endpoints:
self._endpoint_added(endpoint, access_port)
def _endpoint_added(self, exp_endpoint, access_port):
"""
A new service is exported
:param exp_endpoint: An ExportEndpoint bean
:param access_port: The dispatcher access port
"""
# Convert the export endpoint into an EndpointDescription bean
endpoint = beans.EndpointDescription.from_export(exp_endpoint)
# Get its properties
properties = endpoint.get_properties()
# Convert properties to be stored as strings
properties = self._serialize_properties(properties)
# Prepare the service name
svc_name = "{0}.{1}".format(
endpoint.get_id().replace("-", ""), self._rs_type
)
# Prepare the mDNS entry
info = zeroconf.ServiceInfo(
self._rs_type, # Type
svc_name, # Name
self._address, # Access address
access_port, # Access port
properties=properties,
)
self._export_infos[exp_endpoint.uid] = info
# Register the service
self._zeroconf.register_service(info, self._ttl)
@staticmethod
def endpoint_updated(endpoint, old_properties):
# pylint: disable=W0613
"""
An end point is updated
:param endpoint: The updated endpoint
:param old_properties: Previous properties of the endpoint
"""
# Not available...
# TODO: register a temporary service while the update is performed ?
return
def endpoint_removed(self, endpoint):
"""
An end point is removed
:param endpoint: Endpoint being removed
"""
try:
# Get the associated service info
info = self._export_infos.pop(endpoint.uid)
except KeyError:
# Unknown service
_logger.debug("Unknown removed endpoint: %s", endpoint)
else:
# Unregister the service
self._zeroconf.unregister_service(info)
def _get_service_info(self, svc_type, name, max_retries=10):
# type: (str, str, int) -> zeroconf.ServiceInfo
"""
Tries to get information about the given mDNS service
:param svc_type: Service type
:param name: Service name
:param max_retries: Number of retries before timeout
:return: A ServiceInfo bean
"""
info = None
retries = 0
while (
self._zeroconf is not None
and info is None
and retries < max_retries
):
# Try to get information about the service...
info = self._zeroconf.get_service_info(svc_type, name)
retries += 1
return info
def add_service(self, zeroconf_, svc_type, name):
"""
Called by Zeroconf when a record is updated
:param zeroconf_: The Zeroconf instance than notifies of the
modification
:param svc_type: Service type
:param name: Service name
"""
# Get information about the service
info = self._get_service_info(svc_type, name)
if info is None:
_logger.warning(
"Timeout reading service information: %s - %s", svc_type, name
)
return
# Read properties
properties = self._deserialize_properties(info.properties)
try:
sender_uid = properties[pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID]
if sender_uid == self._fw_uid:
# We sent this message
return
except KeyError:
# Not a Pelix message
_logger.warning("Not a Pelix record: %s", properties)
return
if svc_type == ZeroconfDiscovery.DNS_DISPATCHER_TYPE:
# Dispatcher servlet found, get source info
address = to_str(socket.inet_ntoa(info.address))
port = info.port
self._access.send_discovered(
address, port, properties["pelix.access.path"]
)
elif svc_type == self._rs_type:
# Remote service
# Get the first available configuration
configuration = properties[pelix.remote.PROP_IMPORTED_CONFIGS]
if not is_string(configuration):
configuration = configuration[0]
# Ensure we have a list of specifications
specs = properties[pelix.constants.OBJECTCLASS]
if is_string(specs):
specs = [specs]
try:
# Make an import bean
endpoint = beans.ImportEndpoint(
properties[pelix.remote.PROP_ENDPOINT_ID],
properties[pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID],
[configuration],
None,
specs,
properties,
)
except KeyError as ex:
# Log a warning on incomplete endpoints
_logger.warning(
"Incomplete endpoint description, missing %s: %s",
ex,
properties,
)
return
else:
# Register the endpoint
if self._registry.add(endpoint):
# Associate the mDNS name to the endpoint on success
self._imported_endpoints[name] = endpoint.uid
def remove_service(self, zeroconf_, svc_type, name):
"""
Called by Zeroconf when a record is removed
:param zeroconf_: The Zeroconf instance than notifies of the
modification
:param svc_type: Service type
:param name: Service name
"""
if svc_type == self._rs_type:
# Get information about the service
try:
# Get the stored endpoint UID
uid = self._imported_endpoints.pop(name)
except KeyError:
# Unknown service
return
else:
# Remove it
self._registry.remove(uid)
elif svc_type == ZeroconfDiscovery.DNS_DISPATCHER_TYPE:
# A dispatcher servlet is gone
fw_uid = name.split(".", 1)[0]
if fw_uid == self._fw_uid:
# Local message: ignore
return
# Remote framework is lost
self._registry.lost_framework(fw_uid)
|
[
"thomas.calmant@gmail.com"
] |
thomas.calmant@gmail.com
|
80a16c5d0c1017c5a638a61803ede4759b34201e
|
bbbe070b22d97da33bb19abd22c79d4ea1f9e409
|
/django_app/Tipo_de_Ingrediente/admin.py
|
1c1a2ef128e2302f976521844f8b4eda05f153b2
|
[] |
no_license
|
danielyoshizawa/projeto_db_subway
|
1928f7c999c8087858619a4a8382c55ae1538b80
|
ead6959297487db29c9dbdbcb1dde5ddd6f84eed
|
refs/heads/master
| 2021-01-12T14:17:57.828602
| 2016-11-28T18:01:52
| 2016-11-28T18:01:52
| 68,964,762
| 0
| 1
| null | 2016-11-22T20:06:12
| 2016-09-22T21:45:30
|
Ruby
|
UTF-8
|
Python
| false
| false
| 163
|
py
|
from django.contrib import admin
from Tipo_de_Ingrediente.models import Tipo_de_Ingrediente
# Register your models here.
admin.site.register(Tipo_de_Ingrediente)
|
[
"yoshidanielcwb@gmail.com"
] |
yoshidanielcwb@gmail.com
|
86f86654dcc045f2c536af73fa39685c25c5c29d
|
0184fa50190412dd2cf5eb1da0305b43259f9a72
|
/productos/migrations/0001_initial.py
|
4e8b6d962e4e003163afc495a568939bb5266772
|
[] |
no_license
|
CARocha/mcampesino
|
20c88975e992aecdf7371e760d76d5a46883565f
|
79dae67c2c2cbad5167a624a3f89a8117b5cb8e4
|
refs/heads/master
| 2021-01-19T00:41:08.919529
| 2013-04-05T16:12:36
| 2013-04-05T16:12:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,691
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ProductosFrescos'
db.create_table('productos_productosfrescos', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=200)),
('unidad', self.gf('django.db.models.fields.CharField')(max_length=15, null=True, blank=True)),
('picture', self.gf('mcampesino.thumbs.ImageWithThumbsField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal('productos', ['ProductosFrescos'])
# Adding model 'ProductosProcesados'
db.create_table('productos_productosprocesados', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=200)),
('unidad', self.gf('django.db.models.fields.CharField')(max_length=15, null=True, blank=True)),
('picture', self.gf('mcampesino.thumbs.ImageWithThumbsField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal('productos', ['ProductosProcesados'])
def backwards(self, orm):
# Deleting model 'ProductosFrescos'
db.delete_table('productos_productosfrescos')
# Deleting model 'ProductosProcesados'
db.delete_table('productos_productosprocesados')
models = {
'productos.productosfrescos': {
'Meta': {'object_name': 'ProductosFrescos'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'picture': ('mcampesino.thumbs.ImageWithThumbsField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'})
},
'productos.productosprocesados': {
'Meta': {'object_name': 'ProductosProcesados'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'picture': ('mcampesino.thumbs.ImageWithThumbsField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['productos']
|
[
"crocha09.09@gmail.com"
] |
crocha09.09@gmail.com
|
eb8e0958019cdf56a28622415948ecb82da8035c
|
074d417b3e468562bfb8ae6a1ec09a9e86bdc391
|
/.ipynb_checkpoints/app-checkpoint.py
|
a60d526c9317b5da096937d874c6d209cc9e148a
|
[] |
no_license
|
hamzachataoui/NLP-for-arabic_dialect_Detection-TopicDetection-SentimentAnalysis
|
6e7d2176d6520d4716c666867a30005088af8d06
|
94d2e156fa563890c1aff9792f23b7ea77e4b7d7
|
refs/heads/main
| 2023-08-05T01:55:38.786053
| 2021-09-25T10:36:48
| 2021-09-25T10:36:48
| 308,471,554
| 6
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,820
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
import re
app = Flask(__name__)
lr = pickle.load(open('lr.pkl', 'rb'))
mulNb = pickle.load(open('MultinomialNB_topic.pkl', 'rb'))
nb = pickle.load(open('nb.pkl', 'rb'))
vectorizer = pickle.load(open('vectorizer.pkl', 'rb'))
topicVect = pickle.load(open('tfidf_topic.pkl', 'rb'))
dialectVect = pickle.load(open('tfidf.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html',variable=None, variables=None)
def cleanPunc(sentence):
cleaned = re.sub(r'[?|؟|،|:|!|\'|"]',r'',sentence)
cleaned = re.sub(r'[.|-|,|)|(|\|/]',r' ',cleaned)
cleaned = cleaned.strip()
cleaned = cleaned.replace("\n"," ")
cleaned = re.sub(r'^هه+', '', cleaned)
return cleaned
def remove_emoji(string):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese char
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", flags=re.UNICODE)
string = emoji_pattern.sub(r'', string)
return string
def clean_NonArabs(Twits):
Twits = re.sub(r'[A-Za-z0-9]+', '', Twits)
return Twits
def stopWords(text):
stemSentence = ""
for word in str(text).split():
if word not in arb_stopwords:
stemSentence += word
stemSentence += " "
return stemSentence
# +
arb_stopwords = nltk.corpus.stopwords.words('arabic')
c = pd.read_excel('stop.xlsx')
c = c['Unnamed: 1'].tolist()
arb_stopwords.extend(c)
# -
from nltk.stem import ISRIStemmer
stemmer = ISRIStemmer()
def stemming(sentence):
stemSentence = ""
for word in sentence.split():
stem = stemmer.stem(word)
stemSentence += stem
stemSentence += " "
stemSentence = stemSentence.strip()
return stemSentence
@app.route('/predict',methods=['POST'])
def predict():
tweet = request.form['text']
result = list()
result.append(dict())
result[0]['tweet'] = tweet
sentiment_tweet = stopWords(tweet)
sentiment_tweet = stemming(sentiment_tweet)
sentiment_tweet = cleanPunc(sentiment_tweet)
sentiment_vectTweet = vectorizer.transform([sentiment_tweet])
prediction = lr.predict(sentiment_vectTweet).toarray()[0]
proba = lr.predict_proba(sentiment_vectTweet).toarray()[0]
result[0]['sentiment'] = {}
if prediction[0] == 1:
result[0]['sentiment']["Negative"] = round(proba[0]*100, 2)
if prediction[1] == 1:
result[0]['sentiment']["Neutre"] = round(proba[1]*100, 2)
if prediction[2] == 1:
result[0]['sentiment']["Positive"] = round(proba[2]*100, 2)
topic_tweet = remove_emoji(tweet)
#topic_tweet = stopWords(tweet)
#topic_tweet = stemming(topic_tweet)
topic_tweet = cleanPunc(topic_tweet)
#topic_tweet = clean_NonArabs(topic_tweet)
topic_vectTweet = topicVect.transform([topic_tweet])
topic_prediction = mulNb.predict(topic_vectTweet)
topic_proba = mulNb.predict_proba(topic_vectTweet)[0]
result[0]['topic'] = {}
if topic_prediction == "autre":
result[0]['topic']["autre"] = round(topic_proba[0]*100, 2)
if topic_prediction == "politique":
result[0]['topic']["politique"] = round(topic_proba[1]*100, 2)
if topic_prediction == "sante":
result[0]['topic']["sante"] = round(topic_proba[2]*100, 2)
if topic_prediction == "social":
result[0]['topic']["social"] = round(topic_proba[3]*100, 2)
if topic_prediction == "sport":
result[0]['topic']["sport"] = round(topic_proba[4]*100, 2)
if topic_prediction == "économique":
result[0]['topic']["économique"] = round(topic_proba[5]*100, 2)
#dialect_tweet = remove_emoji(tweet)
dialect_tweet = stopWords(tweet)
dialect_tweet = stemming(dialect_tweet)
dialect_tweet = cleanPunc(dialect_tweet)
#dialect_tweet = clean_NonArabs(dialect_tweet)
dialect_vectTweet = dialectVect.transform([dialect_tweet])
dialect_prediction = nb.predict(dialect_vectTweet)
dialect_proba = nb.predict_proba(dialect_vectTweet)[0]
result[0]['dialect'] = {}
if dialect_prediction == "Algerian":
result[0]['dialect']["Algerian"] = round(dialect_proba[0]*100, 2)
if dialect_prediction == "Lebanon":
result[0]['dialect']["Lebanon"] = round(dialect_proba[1]*100, 2)
if dialect_prediction == "Morocco":
result[0]['dialect']["Morocco"] = round(dialect_proba[2]*100, 2)
if dialect_prediction == "Tunisian":
result[0]['dialect']["Tunisian"] = round(dialect_proba[3]*100, 2)
if dialect_prediction == "egypt":
result[0]['dialect']["egypt"] = round(dialect_proba[4]*100, 2)
return render_template('index.html', variable=result, variables=None)
@app.route('/predictFile',methods=['POST'])
def predictFile():
tweets = request.files['csvFile']
data = pd.read_excel(tweets)
result = list()
sentiment_tweets = pd.DataFrame()
sentiment_tweets['Twits'] = data['Twits'].apply(stopWords)
sentiment_tweets['Twits'] = sentiment_tweets['Twits'].apply(stemming)
sentiment_tweets['Twits'] = sentiment_tweets['Twits'].apply(cleanPunc)
vectTweets = vectorizer.transform(sentiment_tweets['Twits'])
sentiment_predictions = lr.predict(vectTweets).toarray()
sentiment_probas = lr.predict_proba(vectTweets).toarray()
topic_tweets = pd.DataFrame()
topic_tweets['Twits'] = data['Twits'].apply(remove_emoji)
topic_tweets['Twits'] = topic_tweets['Twits'].apply(cleanPunc)
topic_vectTweets = topicVect.transform(topic_tweets['Twits'])
topic_predictions = mulNb.predict(topic_vectTweets)
topic_probas = mulNb.predict_proba(topic_vectTweets)
dialect_tweets = pd.DataFrame()
#dialect_tweets['Twits'] = data['Twits'].apply(stopWords)
#dialect_tweets['Twits'] = dialect_tweets['Twits'].apply(stemming)
dialect_tweets['Twits'] = data['Twits'].apply(cleanPunc)
dialect_vectTweets = dialectVect.transform(dialect_tweets['Twits'])
dialect_predictions = nb.predict(dialect_vectTweets)
dialect_probas = nb.predict_proba(dialect_vectTweets)
for i, prediction in enumerate(sentiment_predictions):
tmp = {}
tmp['tweet'] = data.iloc[i][0]
tmp['sentiment'] = {}
tmp['topic'] = {}
tmp['dialect'] = {}
if prediction[0] == 1:
tmp['sentiment']["Negative"] = round(sentiment_probas[i][0]*100, 2)
if prediction[1] == 1:
tmp['sentiment']["Neutre"] = round(sentiment_probas[i][1]*100, 2)
if prediction[2] == 1:
tmp['sentiment']["Positive"] = round(sentiment_probas[i][2]*100, 2)
if topic_predictions[i] == "autre":
tmp['topic']["autre"] = round (topic_probas[i][0]*100, 2)
if topic_predictions[i] == "politique":
tmp['topic']["politique"] = round (topic_probas[i][1]*100, 2)
if topic_predictions[i] == "sante":
tmp['topic']["sante"] = round (topic_probas[i][2]*100, 2)
if topic_predictions[i] == "social":
tmp['topic']["social"] = round (topic_probas[i][3]*100, 2)
if topic_predictions[i] == "sport":
tmp['topic']["sport"] = round (topic_probas[i][4]*100, 2)
if topic_predictions[i] == "économique":
tmp['topic']["économique"] = round (topic_probas[i][5]*100, 2)
if dialect_predictions[i] == "Algerian":
tmp['dialect']["Algerian"] = round(dialect_probas[i][0]*100, 2)
if dialect_predictions[i] == "Lebanon":
tmp['dialect']["Lebanon"] = round(dialect_probas[i][1]*100, 2)
if dialect_predictions[i] == "Morocco":
tmp['dialect']["Morocco"] = round(dialect_probas[i][2]*100, 2)
if dialect_predictions[i] == "Tunisian":
tmp['dialect']["Tunisian"] = round(dialect_probas[i][3]*100, 2)
if dialect_predictions[i] == "egypt":
tmp['dialect']["egypt"] = round(dialect_probas[i][4]*100, 2)
result.append(tmp)
return render_template('index.html',variable=None, variables=result)
if __name__ == "__main__":
app.run(debug=False)
vectTweet = vectorizer.transform(["tweet"])
prediction = model.predict(vectTweet).toarray()[0]
proba = model.predict_proba(vectTweet).toarray()[0]
result = list()
result.append(dict())
result[0]['tweet'] = "de"
result[0]['sentiment'] = {}
if 1== 1:
result[0]['sentiment']["Negative"] = round(proba[0]*100, 2)
if 1 == 1:
result[0]['sentiment']["Neutre"] = round(proba[1]*100, 2)
if prediction[2] == 1:
result[0]['sentiment']["Positive"] = round(proba[2]*100, 2)
teest = [{'tweet': 'de', 'sentiment': {'Negative': 0.0, 'Neutre': 0.98}}]
dd = pd.DataFrame(['cc','dd'])
dd.iloc[1][0]
|
[
"noreply@github.com"
] |
hamzachataoui.noreply@github.com
|
88003dac9146455ec30d6d23ac9b2b742b15c289
|
a7b21e187141b19ecd1163ddbc99cb2925abca6e
|
/ex/pyhxbc/tsUserv.py
|
327a739f2be0b9fb43377df2b6bc770f92d46ab6
|
[] |
no_license
|
zhangyue0503/python
|
b387b955aafeecad254e1bdc0b2115c94ca6667b
|
4b7014d80b325eed37af9f280f7c92a5edcdbb19
|
refs/heads/master
| 2023-04-04T19:45:31.190018
| 2023-03-20T06:57:36
| 2023-03-20T06:57:36
| 70,409,084
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
#!/usr/bin/env python
from socket import *
from time import ctime
HOST = ''
PORT = 21568
BUFSIZ = 1024
ADDR = (HOST,PORT)
udpSerSock = socket(AF_INET,SOCK_STREAM)
udpSerSock.bind(ADDR)
while True:
print 'waiting for message...'
data, addr = udpSerSock.recvfrom(BUFSIZ)
udpSerSock.sendto('[%s] %s' % (ctime(), data), addr)
print '...received from and returned to:', addr
udpSerSock.close()
|
[
"zhangyue0503@hotmail"
] |
zhangyue0503@hotmail
|
92fca1268b7584330755792319953fa74a501063
|
86dc940f511c5550447acb0a44b2fd845ad64db5
|
/dymos/transcriptions/solve_ivp/components/solve_ivp_control_group.py
|
01a47867dad5a126c387673fbc10327c6f975ba6
|
[
"Apache-2.0"
] |
permissive
|
thearn/dymos
|
5900c9e456d4bed32082aa787baff63ab9caf8b0
|
1f36a4472fdeb93d337904955c012f254a5db06c
|
refs/heads/master
| 2020-04-06T15:23:05.374083
| 2019-11-26T14:51:01
| 2019-11-26T14:51:01
| 157,576,764
| 0
| 0
| null | 2018-11-14T16:20:44
| 2018-11-14T16:20:43
| null |
UTF-8
|
Python
| false
| false
| 9,922
|
py
|
from __future__ import print_function, division
from six import string_types, iteritems
import numpy as np
from scipy.linalg import block_diag
import openmdao.api as om
from ...grid_data import GridData
from dymos.utils.misc import get_rate_units
from ....utils.lagrange import lagrange_matrices
class SolveIVPControlInterpComp(om.ExplicitComponent):
"""
Compute the approximated control values and rates given the values of a control at output nodes
and the approximated values at output nodes, given values at the control input nodes.
Notes
-----
.. math::
u = \\left[ L \\right] u_d
\\dot{u} = \\frac{d\\tau_s}{dt} \\left[ D \\right] u_d
\\ddot{u} = \\left( \\frac{d\\tau_s}{dt} \\right)^2 \\left[ D_2 \\right] u_d
where
:math:`u_d` are the values of the control at the control discretization nodes,
:math:`u` are the values of the control at all nodes,
:math:`\\dot{u}` are the time-derivatives of the control at all nodes,
:math:`\\ddot{u}` are the second time-derivatives of the control at all nodes,
:math:`L` is the Lagrange interpolation matrix,
:math:`D` is the Lagrange differentiation matrix,
and :math:`\\frac{d\\tau_s}{dt}` is the ratio of segment duration in segment tau space
[-1 1] to segment duration in time.
"""
def initialize(self):
self.options.declare('control_options', types=dict,
desc='Dictionary of options for the dynamic controls')
self.options.declare('time_units', default=None, allow_none=True, types=string_types,
desc='Units of time')
self.options.declare('grid_data', types=GridData, desc='Container object for grid info')
self.options.declare('output_nodes_per_seg', default=None, types=(int,), allow_none=True,
desc='If None, results are provided at the all nodes within each'
'segment. If an int (n) then results are provided at n '
'equally distributed points in time within each segment.')
# Save the names of the dynamic controls/parameters
self._dynamic_names = []
self._input_names = {}
self._output_val_names = {}
self._output_val_all_names = {}
self._output_rate_names = {}
self._output_rate2_names = {}
def _setup_controls(self):
control_options = self.options['control_options']
num_nodes_all = self.num_nodes_all
num_nodes_output = self.num_nodes_output
num_control_input_nodes = self.options['grid_data'].subset_num_nodes['control_input']
time_units = self.options['time_units']
for name, options in iteritems(control_options):
self._input_names[name] = 'controls:{0}'.format(name)
self._output_val_all_names[name] = 'control_values_all:{0}'.format(name)
self._output_val_names[name] = 'control_values:{0}'.format(name)
self._output_rate_names[name] = 'control_rates:{0}_rate'.format(name)
self._output_rate2_names[name] = 'control_rates:{0}_rate2'.format(name)
shape = options['shape']
input_shape = (num_control_input_nodes,) + shape
all_shape = (num_nodes_all,) + shape
output_shape = (num_nodes_output,) + shape
units = options['units']
rate_units = get_rate_units(units, time_units)
rate2_units = get_rate_units(units, time_units, deriv=2)
self._dynamic_names.append(name)
self.add_input(self._input_names[name], val=np.ones(input_shape), units=units)
self.add_output(self._output_val_all_names[name], shape=all_shape, units=units)
self.add_output(self._output_val_names[name], shape=output_shape, units=units)
self.add_output(self._output_rate_names[name], shape=output_shape, units=rate_units)
self.add_output(self._output_rate2_names[name], shape=output_shape,
units=rate2_units)
def setup(self):
output_nodes_per_seg = self.options['output_nodes_per_seg']
time_units = self.options['time_units']
gd = self.options['grid_data']
num_seg = gd.num_segments
num_nodes_all = gd.subset_num_nodes['all']
if output_nodes_per_seg is None:
num_nodes_output = num_nodes_all
else:
num_nodes_output = num_seg * output_nodes_per_seg
self.add_input('dt_dstau', shape=num_nodes_output, units=time_units)
self.val_jacs = {}
self.rate_jacs = {}
self.rate2_jacs = {}
self.val_jac_rows = {}
self.val_jac_cols = {}
self.rate_jac_rows = {}
self.rate_jac_cols = {}
self.rate2_jac_rows = {}
self.rate2_jac_cols = {}
self.sizes = {}
self.num_nodes_all = num_nodes_all
self.num_nodes_output = num_nodes_output
num_disc_nodes = gd.subset_num_nodes['control_disc']
num_input_nodes = gd.subset_num_nodes['control_input']
# Find the indexing matrix that, multiplied by the values at the input nodes,
# gives the values at the discretization nodes
L_id = np.zeros((num_disc_nodes, num_input_nodes), dtype=float)
L_id[np.arange(num_disc_nodes, dtype=int),
gd.input_maps['dynamic_control_input_to_disc']] = 1.0
# Matrices L_do and D_do interpolate values and rates (respectively) at output nodes from
# values specified at control discretization nodes.
L_da, _ = gd.phase_lagrange_matrices('control_disc', 'all')
L_do_blocks = []
D_do_blocks = []
for iseg in range(num_seg):
i1, i2 = gd.subset_segment_indices['control_disc'][iseg, :]
indices = gd.subset_node_indices['control_disc'][i1:i2]
nodes_given = gd.node_stau[indices]
if output_nodes_per_seg is None:
i1, i2 = gd.subset_segment_indices['all'][iseg, :]
indices = gd.subset_node_indices['all'][i1:i2]
nodes_eval = gd.node_stau[indices]
else:
nodes_eval = np.linspace(-1, 1, output_nodes_per_seg)
L_block, D_block = lagrange_matrices(nodes_given, nodes_eval)
L_do_blocks.append(L_block)
D_do_blocks.append(D_block)
L_do = block_diag(*L_do_blocks)
D_do = block_diag(*D_do_blocks)
self.L = np.dot(L_do, L_id)
self.L_all = np.dot(L_da, L_id)
self.D = np.dot(D_do, L_id)
# Matrix D_dd interpolates rates at discretization nodes from values given at control
# discretization nodes.
_, D_dd = gd.phase_lagrange_matrices('control_disc', 'control_disc')
# Matrix D2 provides second derivatives at output nodes given values at input nodes.
self.D2 = np.dot(D_do, np.dot(D_dd, L_id))
self._setup_controls()
self.set_check_partial_options('*', method='cs')
def compute(self, inputs, outputs):
control_options = self.options['control_options']
for name, options in iteritems(control_options):
u = inputs[self._input_names[name]]
a = np.tensordot(self.D, u, axes=(1, 0)).T
b = np.tensordot(self.D2, u, axes=(1, 0)).T
# divide each "row" by dt_dstau or dt_dstau**2
outputs[self._output_val_names[name]] = np.tensordot(self.L, u, axes=(1, 0))
outputs[self._output_val_all_names[name]] = np.tensordot(self.L_all, u, axes=(1, 0))
outputs[self._output_rate_names[name]] = (a / inputs['dt_dstau']).T
outputs[self._output_rate2_names[name]] = (b / inputs['dt_dstau'] ** 2).T
class SolveIVPControlGroup(om.Group):
def initialize(self):
self.options.declare('control_options', types=dict,
desc='Dictionary of options for the dynamic controls')
self.options.declare('time_units', default=None, allow_none=True, types=string_types,
desc='Units of time')
self.options.declare('grid_data', types=GridData, desc='Container object for grid info')
self.options.declare('output_nodes_per_seg', default=None, types=(int,), allow_none=True,
desc='If None, results are provided at the all nodes within each'
'segment. If an int (n) then results are provided at n '
'equally distributed points in time within each segment.')
def setup(self):
gd = self.options['grid_data']
control_options = self.options['control_options']
time_units = self.options['time_units']
if len(control_options) < 1:
return
opt_controls = [name for (name, opts) in iteritems(control_options) if opts['opt']]
if len(opt_controls) > 0:
ivc = self.add_subsystem('indep_controls', subsys=om.IndepVarComp(),
promotes_outputs=['*'])
self.add_subsystem(
'control_interp_comp',
subsys=SolveIVPControlInterpComp(time_units=time_units, grid_data=gd,
control_options=control_options,
output_nodes_per_seg=self.options['output_nodes_per_seg']),
promotes_inputs=['*'],
promotes_outputs=['*'])
for name, options in iteritems(control_options):
if options['opt']:
num_input_nodes = gd.subset_num_nodes['control_input']
ivc.add_output(name='controls:{0}'.format(name),
val=options['val'],
shape=(num_input_nodes, np.prod(options['shape'])),
units=options['units'])
|
[
"noreply@github.com"
] |
thearn.noreply@github.com
|
dda4965db6bc90ff683653d69741ba78636b1639
|
ded519cf89b578109b2874b425ac99bac0470e13
|
/tests/test_malicious_uploads.py
|
b7e5c32a18d643c2719337e64dc92dd5221f0fbe
|
[] |
no_license
|
Tubbz-alt/arxiv-filemanager
|
6fa21225a0c75f86c008fe57405d0a04568a9916
|
59c72d037518f343753145148461fbcb4aeb34bc
|
refs/heads/master
| 2022-04-12T04:20:03.821659
| 2019-06-05T14:08:47
| 2019-06-05T14:08:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
"""Tests specifically focused on security vulnerabilities."""
import os
from unittest import TestCase, mock
from datetime import datetime
import tempfile
from werkzeug.datastructures import FileStorage
from werkzeug.utils import secure_filename
import shutil
from filemanager.process import upload
TEST_FILES_DIRECTORY = os.path.join(os.getcwd(), 'tests/test_files_upload')
class TestRelativePaths(TestCase):
"""Test uploaded archives that include relative paths."""
@mock.patch(f'{upload.__name__}._get_base_directory')
def test_relative_path(self, mock_get_base_dir):
"""Uploaded tarball contains a relative path two levels up."""
UPLOAD_BASE_DIRECTORY = tempfile.mkdtemp()
mock_get_base_dir.return_value = UPLOAD_BASE_DIRECTORY
file_path = os.path.join(TEST_FILES_DIRECTORY, 'relative_path.tar.gz')
with open(file_path, 'rb') as fp:
file = FileStorage(fp)
# Now create upload instance
u = upload.Upload(12345)
# Process upload
u.process_upload(file)
self.assertNotIn('ir.png', os.listdir(UPLOAD_BASE_DIRECTORY),
'File should be prevented from escaping upload'
' workspace.')
|
[
"brp53@cornell.edu"
] |
brp53@cornell.edu
|
fc71f5ef9510879b616faee6fc4371a43f8b7e01
|
7c292ca8a627b96eb74f9b9dfbb47dc2bbaeef24
|
/Parciales/TallerGrupal.py
|
3e5ae974c029adf760e2f5ef86d98f4c1b46aa93
|
[] |
no_license
|
DuvanDu/ProgramacionI
|
b2469a0fba12f8ace520b265cc738da4339beafd
|
71869d74698157ddb048076936d0e48fb75789bc
|
refs/heads/main
| 2023-05-26T23:27:58.824162
| 2021-05-27T12:56:51
| 2021-05-27T12:56:51
| 335,283,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,132
|
py
|
class ElementosDigitales():
def __init__(self, nombreEntrada, creadorEntrada, fechapuEntrada):
self.nombre = nombreEntrada
self.creador = creadorEntrada
self.fechapu = fechapuEntrada
def mostrarAtributos(self):
print(f'''El nombre del autor {self.nombre}
Su creador es {self.creador}
Su fecha de publicacion {self.fechapu}
''')
class Usuario():
def __init__(self, nombreEntrada, edadEntrada, sexoEntrada, nacionalidadEntrada):
self.nombre = nombreEntrada
self.edad = edadEntrada
self.sexo = sexoEntrada
self.nacionalidad = nacionalidadEntrada
def mostrarAtributos(self):
print(f'''El nombre del usuario {self.nombre}
Su edad es {self.edad}
Sexo {self.sexo}
Su nacionalidad es {self.nacionalidad}
''')
def cancion(self,nombreCancion):
'''Expresa que esta escuchando una cancion'''
print(f'Hola soy {self.nombre} y estoy escuchando {nombreCancion}')
class Pagina():
def __init__(self, tipoEntrada, formatoEntrada, fechapuEntrada):
self.tipo = tipoEntrada
self.formato = formatoEntrada
self.fechapu = fechapuEntrada
def mostrarAtributos(self):
print(f'''Su tipo de entrada es {self.tipo}
Su formato es {self.formato}
Se publico el {self.fechapu}
''')
class Cancion(ElementosDigitales):
def __init__(self, nombreEntrada, creadorEntrada, fechapuEntrada, generoEntrada, duracionEntrada):
ElementosDigitales.__init__(self, nombreEntrada, creadorEntrada, fechapuEntrada)
self.genero = generoEntrada
self.duracion = duracionEntrada
def nueCancion(self,nombreCancion, fecha):
'''Expresa que esta escuchando una cancion'''
print(f'Hola soy {self.nombre} y estoy escuchando {nombreCancion} de {fecha} ')
def bucleCancion(self, cantidadRepro, nombreCancion):
for i in range (cantidadRepro):
print(f'{nombreCancion} sonando {i+1} vez')
class Artista(Usuario):
def __init__(self, nombreEntrada, edadEntrada, sexoEntrada, nacionalidadEntrada, generoEntrada, numeroCanEntrada, numeroAlbEntrada):
Usuario.__init__(self, nombreEntrada, edadEntrada, sexoEntrada, nacionalidadEntrada)
self.genero = generoEntrada
self.numeroCan = numeroCanEntrada
self.numeroAlb = numeroAlbEntrada
def concierto(self,nombreCiudad):
'''Expresa que dara un concierto en dicha ciudad'''
print(f'Hola soy {self.nombre} y dare un concierto en {nombreCiudad}')
class Favoritos(Pagina):
def __init__(self, tipoEntrada, formatoEntrada, fechapuEntrada, favoritosComEntrada, listaFavEntrada, fechaUpEntrada):
Pagina.__init__(self, tipoEntrada, formatoEntrada, fechapuEntrada)
self.favoritosCom = favoritosComEntrada
self.listaFav = listaFavEntrada
self.fechaUp = fechaUpEntrada
#-----Integrantes: Mariana Villegas y Duvan Duque-----#
|
[
"duque.duvan@uces.edu.co"
] |
duque.duvan@uces.edu.co
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.