blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f8f355ac590a2f2b29218b8123e6de75f99bdb76
|
81d3d194cb2545916fc12e47eb9bc9471999d226
|
/spokehub/wsgi.py
|
d5c0380350e9978efcc2288af6a76de7b4f68a78
|
[] |
no_license
|
thraxil/spokehub
|
91e88278bfccbff8c636221e13fec5a72b7b174b
|
e1ac0a335d454719af6b29ae1e7c8aae0b69a425
|
refs/heads/master
| 2021-07-12T20:06:44.398113
| 2021-03-15T16:08:08
| 2021-03-17T10:46:29
| 16,827,151
| 1
| 2
| null | 2021-03-28T11:08:51
| 2014-02-14T04:37:29
|
Python
|
UTF-8
|
Python
| false
| false
| 189
|
py
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"spokehub.settings")
application = get_wsgi_application()
|
[
"anders@columbia.edu"
] |
anders@columbia.edu
|
dba74e1793f65d06a2685f1f3cecc728ed06726e
|
5d029426b972b94ed5e9bd587a1336cfbd8655cf
|
/app/__init__.py
|
5ebcb695608297970eaeb1ec7db3cd474b9ae7f3
|
[] |
no_license
|
jkhostopia/shShorts
|
ee03bebd938b6f02b01cab4b022f2752726eabd9
|
dd2e3081a3e139e3fe2a03caa4bae8c6937e6cf6
|
refs/heads/master
| 2022-12-11T18:57:51.174069
| 2020-08-19T04:38:45
| 2020-08-19T04:38:45
| 288,174,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
from flask import Flask
app = Flask(__name__)
from app import routes
import logging
from logging.handlers import RotatingFileHandler
import os
if not app.debug:
# ...
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/apperror.log', maxBytes=10485760,
backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Shell Shorts')
|
[
"noreply@github.com"
] |
jkhostopia.noreply@github.com
|
c5f5d783e7178823a11129913b70eeaaff7aacee
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/ocr-numbers/38ce19b8e031425f9401126d45985f54.py
|
4d4e69b832039edf020fe93f081b0cad9f3c79f9
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
raw_nums = [
' _ ', #' _ _ _ _ _ _ _ ',
'| | |', #' _| _||_||_ |_ ||_||_|',
'|_| |', #'|_ _| | _||_| ||_| _|',
' ', #' '
]
max_num = 1
num_grid = []
for ix in range(max_num+1):
num_grid.append( [ s[ix*3:(ix+1)*3] for s in raw_nums ] )
def cmp_str(l1, l2):
for ix in range(4):
if l1[ix] != l2[ix]:
return False
return True
def number(l):
if len(l) != 4:
raise ValueError('not enough lines')
for line in l:
if len(line) != 3:
raise ValueError('Line too short')
for ix in range(max_num+1):
if cmp_str(l, num_grid[ix]):
return str(ix)
return '?'
def grid(num):
num = int(num)
if num > max_num:
raise ValueError('not implemented yet')
return num_grid[num]
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
1ee2c923e5c6c91fec4bb361065d5dec791dea0c
|
2c55f64836673c45e88adac4bbdd5fc5763a06ff
|
/chapter05playingwithsetsandprobability.py
|
ae1cc82207c759fb0861794139d7616d183fc395
|
[] |
no_license
|
raymondmar61/pythondoingmathwithpython
|
d6dc9f114cc0948a9b8b5283ab349f8e7d75052e
|
d18dce5bdb7e8931b32ac39e687d8a2b33bdcef2
|
refs/heads/master
| 2020-05-18T11:00:02.215981
| 2019-05-29T02:20:34
| 2019-05-29T02:20:34
| 184,365,328
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,478
|
py
|
#Chapter 5 Playing With Sets And Probability
#A set is a collection of distinct objects often called elements or members. No two members of a set are the same. In mathematical notation, a set is enclosed in curly brackets; e.g. {2, 4, 6}
from sympy import FiniteSet
from fractions import Fraction
firstset = FiniteSet(2, 4, 6)
print(firstset) #print {2, 4, 6}
secondset = FiniteSet(1, 1.5, Fraction(1, 5))
print(secondset) #print {1/5, 1, 1.5}
thirdset = FiniteSet(1, 1.5, 3)
print(len(thirdset)) #print 3
print(3 in thirdset) #print True
print(568 in thirdset) #print False
fourthset = FiniteSet()
print(fourthset) #print EmptySet()
numberslist = [1, 99, 840]
fifthset = FiniteSet(*numberslist)
print(fifthset) #print {1, 99, 840}
print(set(numberslist)) #print {840, 1, 99}
#sets ignore repeats of a member and don't keep track of the order
numberslist2 = [1, 99, 7392, 99]
sixthset = FiniteSet(*numberslist2)
print(sixthset) #print {1, 99, 7392}
for eachsixthset in sixthset:
print(eachsixthset) #print 1\n 99\n 7392
#A set is a subset of another set if all the members are also members of the other set. Remember, all.
seventhset = FiniteSet(999, 439, 20984)
eigthset = FiniteSet(999, 69, 48)
ninthset = FiniteSet(999, 69)
print(seventhset.is_subset(eigthset)) #print False
print(eigthset.is_subset(seventhset)) #print False
print(ninthset.is_subset(eigthset)) #print True
#A set is a superset if the set contains all of the members. Remember, contains.
print(ninthset.is_superset(eigthset)) #print False
print(eigthset.is_superset(ninthset)) #print True
#The power set is the set of all possible subsets
tenthset = FiniteSet(20, 55, 41, 98)
print(tenthset.powerset()) #print {EmptySet(), {20}, {41}, ..., {20, 55, 98}, {41, 55, 98}, {20, 41, 55, 98}}
seventhset = FiniteSet(999, 439, 20984)
eigthset = FiniteSet(999, 69, 48)
ninthset = FiniteSet(999, 69)
print(seventhset.is_proper_subset(eigthset)) #print False
print(eigthset.is_proper_subset(seventhset)) #print False
print(ninthset.is_proper_subset(eigthset)) #print True
print(ninthset.is_proper_superset(eigthset)) #print False
print(eigthset.is_proper_superset(ninthset)) #print True
tenthset = FiniteSet(1, 2, 3)
eleventhset = FiniteSet(2, 4, 6)
print(tenthset.union(eleventhset)) #print {1, 2, 3, 4, 6}
print(tenthset.intersect(eleventhset)) #print {2}
#we can apply union and intersect to more than two sets.
tenthset = FiniteSet(1, 2, 3)
eleventhset = FiniteSet(2, 4, 6)
twelthset = FiniteSet(3, 5, 7)
print(tenthset.union(eleventhset).union(twelthset)) #print {1, 2, 3, 4, 5, 6, 7}
print(tenthset.intersect(eleventhset).intersect(twelthset)) #print EmptySet()
#The cartesian product creates a set that consists of all possible pairs made by taking an element from each set.
print(tenthset*eleventhset) #print {1, 2, 3} x {2, 4, 6}
tentheleventh = tenthset*eleventhset
for eachtentheleventh in tentheleventh:
print(eachtentheleventh)
'''
(1, 2)
(1, 4)
(1, 6)
(2, 2)
(2, 4)
(2, 6)
(3, 2)
(3, 4)
(3, 6)
'''
#If we apply the exponential operator to a set, we get the Cartesian product of the set times itself the specified number of times.
tenthsetpowerthree = tenthset**3
print(tenthsetpowerthree) #print {1, 2, 3} x {1, 2, 3} x {1, 2, 3}
for eachtenthsetpowerthree in tenthsetpowerthree:
print(eachtenthsetpowerthree)
'''
(1, 1, 1)
(1, 1, 2)
(1, 1, 3)
(1, 2, 1)
(1, 2, 2)
(1, 2, 3)
(1, 3, 1)
(1, 3, 2)
(1, 3, 3)
(2, 1, 1)
(2, 1, 2)
(2, 1, 3)
(2, 2, 1)
(2, 2, 2)
(2, 2, 3)
(2, 3, 1)
(2, 3, 2)
(2, 3, 3)
(3, 1, 1)
(3, 1, 2)
(3, 1, 3)
(3, 2, 1)
(3, 2, 2)
(3, 2, 3)
(3, 3, 1)
(3, 3, 2)
(3, 3, 3)
'''
print("\n")
#Uniform probability all outcomes equally likely
#Probability terms. An experiment is the test. An experiment is called a trial. A sample space is all possible outcomes denoted S. An event is a set of outcomes denoted E. RM: An event is the number of experiments based on the sample size.
def uniformdistributionprobability(event, samplespace):
return len(event)/len(samplespace)
#probability roll a 3 on a six-sided die
samplespace = [1, 2, 3, 4, 5, 6]
event = [3]
print(uniformdistributionprobability(event, samplespace)) #print 0.16666666666666666
#probability roll a prime number or an odd number
samplespace = [1, 2, 3, 4, 5, 6]
primenumber = {2, 3, 5}
oddnumber = {1, 3, 5}
event = primenumber.union(oddnumber)
print(event) #print {1, 2, 3, 5}
print(uniformdistributionprobability(event, samplespace)) #print 0.6666666666666666
#probability roll a prime number and an odd number
samplespace = [1, 2, 3, 4, 5, 6]
primenumber = FiniteSet(2, 3, 5)
oddnumber = [1, 3, 5]
oddnumber = FiniteSet(*oddnumber)
event = primenumber.intersect(oddnumber)
print(event) #print {3, 5}
print(uniformdistributionprobability(event, samplespace)) #print 0.3333333333333333
#random numbers
from random import randint
print(randint(1,6)) #prints random number between 1 and 6 inclusive
def rolladicetototalscore(totalscore):
diceroll = 0
rollnumber = 1
while diceroll <= totalscore:
diceroll = diceroll + randint(1,6)
if diceroll >= totalscore:
return rollnumber, diceroll
else:
rollnumber += 1
totalscore = 20
print("Score of {} reached in {} rolls.".format(rolladicetototalscore(totalscore)[1],rolladicetototalscore(totalscore)[0])) #print Score of 20 reached in 6 rolls. RM: number of rolls is random diceroll = diceroll + randint(1,6)
def targetscoreprobability(sidedie, numberofrolls, targetscore):
diesides = [n for n in range(1,sideddie+1)]
diesides = FiniteSet(*diesides)
#print("Dice sides",diesides)
samplesize = diesides**numberofrolls
#print("Sample Size",samplesize)
successfulrolls = [] #RM: I could use a counter for successfulrolls. I choose to count the targetscores in a list to comprehend.
for eachsamplesize in samplesize:
#print(eachsamplesize, sum(eachsamplesize))
if sum(eachsamplesize) == targetscore:
#print(eachsamplesize, sum(eachsamplesize))
successfulrolls.append(eachsamplesize)
successfuloutcomescount = len(successfulrolls)
samplesizecount = len(samplesize)
probability = successfuloutcomescount/samplesizecount
return probability
sideddie = 6
numberofrolls = 5
targetscore = 25
print("Probability of exact target score {} rolling a {}-sided die {} times is {}.".format(targetscore, sideddie, numberofrolls, targetscoreprobability(sideddie,numberofrolls,targetscore))) #print Probability of exact target score 25 rolling a 6-sided die 5 times is 0.016203703703703703.
sideddie = 20
numberofrolls = 4
targetscore = 80
print("Probability of exact target score {} rolling a {}-sided die {} times is {}.".format(targetscore, sideddie, numberofrolls, targetscoreprobability(sideddie,numberofrolls,targetscore))) #print Probability of exact target score 80 rolling a 20-sided die 4 times is 6.25e-06.
#Nonuniform probability outcomes nonequally likely. Draw a probability number line representing the possible outcomes.
from random import random
print(random()) #prints random floatingpoint number between 0 and 1 exclusive
#probability a coin is heads 2/3 and tails 1/3
def flipcoin():
#2/3 heads, 1/3 tails
coinflip = random()
if coinflip < 2/3:
return "Heads"
else:
return "Tails"
for n in range(0,19):
print(flipcoin())
'''
Heads
Heads
Heads
Heads
Heads
Tails
Heads
Heads
Heads
Heads
Heads
Heads
Tails
Heads
Heads
Heads
Tails
Heads
Heads
'''
# dollarbills = [5, 10, 20, 50]
# probabilities = [1/6, 1/6, 1/3, 1/3]
# dollarbillsprobability = []
# initializeprobability = 0
# for p in probabilities:
# initializeprobability+=p
# dollarbillsprobability.append(initializeprobability)
# print(dollarbillsprobability)
def fictionalfreeatm():
dollarbills = [5, 10, 20, 50]
probabilities = [1/6, 1/6, 1/3, 1/3]
dollarbillsprobability = []
initializeprobability = 0
for p in probabilities:
initializeprobability+=p
dollarbillsprobability.append(initializeprobability)
#print(dollarbillsprobability)
probability = random()
for index, eachdollarbillsprobability in enumerate(dollarbillsprobability):
#print(index, eachdollarbillsprobability)
if probability < eachdollarbillsprobability:
return probability, index, dollarbills[index]
atmvisits = 20
counter = 1
amountwithdrawn = 0
while counter <= atmvisits:
withdrawal = fictionalfreeatm()
print(withdrawal)
amountwithdrawn+=withdrawal[2]
counter+=1
print(amountwithdrawn)
'''
(0.10568177477900953, 0, 5)
(0.6719005340142075, 3, 50)
(0.24341150124107103, 1, 10)
(0.4094235719966657, 2, 20)
(0.6714461798290673, 3, 50)
print(amountwithdrawn)-->135
'''
|
[
"raym61@hotmail.com"
] |
raym61@hotmail.com
|
b31eceb37b5b7e829c53b6e59b770a0e8061880f
|
1a0b516452dc37f5686cc87add50cae6b3830cb8
|
/backend/app/__init__.py
|
1eeb5150e865400a97494dac6bdc41a22af82a38
|
[] |
no_license
|
findcongwang/iot_data_collector_example
|
b2119434210816299599bdbe72eb65ad90219604
|
8d78d26bb98909897a1ed7c8f9fd671cfa5ddec6
|
refs/heads/main
| 2023-02-28T20:37:26.537199
| 2021-02-09T23:53:34
| 2021-02-09T23:53:34
| 337,159,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
import json
from datetime import datetime
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from pymemcache.client.base import Client
app = Flask(__name__)
app.config.from_object('config.Config')
db = SQLAlchemy()
db.init_app(app)
from .models import DeviceData
"""
We use memcached to cache the top 10 devices per feature on write.
Dashboard interval is one of ["all_time", "past_minute", "past_hour"]
Memcached = {
<feature>_<interval>: {timestamp, minheap}
}
"""
intervals = ["all_time", "past_minute", "past_hour"]
memcached = Client('localhost')
for feature in DeviceData.features():
for itv in intervals:
key = "_".join([feature, itv])
# initialize if nothing cached
if memcached.get(key, None) is None:
ts = datetime.fromtimestamp(0).isoformat()
memcached.set(
"_".join([feature, itv]),
json.dumps({"timestamp": ts, "minmaxes": []})
)
with app.app_context():
db.create_all()
from . import routes
|
[
"findcongwang@gmail.com"
] |
findcongwang@gmail.com
|
c60f1607a09f2deb1dc277f57c1ce10a9fc7d54f
|
3acf7a22b1d68ab238b713da46788c9ffdd5b96e
|
/src/huapa_deep_res_bi/code/data_helpers.py
|
8c510fceaddbc185525e6fea933e570904a886b5
|
[] |
no_license
|
ethnhe/DeepNeurlNetSentimentClassification
|
245bb6737c050699753f8e25667a47b6fcf247f4
|
e181676761233434322c3cfcaa013b2ed385bfe7
|
refs/heads/master
| 2020-04-10T03:46:01.461070
| 2018-12-07T06:32:32
| 2018-12-07T06:32:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,007
|
py
|
#-*- coding: utf-8 -*-
#author: Zhen Wu
import numpy as np
import pickle
import tqdm
import os
import nori2 as nori
from tqdm import tqdm
import pickle
def load_embedding(embedding_file_path, corpus, embedding_dim):
wordset = set();
for line in corpus:
line = line.strip().split()
for w in line:
wordset.add(w.lower())
words_dict = dict(); word_embedding = []; index = 1
words_dict['$EOF$'] = 0 #add EOF
word_embedding.append(np.zeros(embedding_dim))
with open(embedding_file_path, 'rb') as f:
word_emb_dict = pickle.loads(f.read())
print(len(word_emb_dict))
for index, line in enumerate(word_emb_dict,start=1):
embedding = line[1]
word_embedding.append(embedding)
words_dict[line[0]] = index
"""
with open(embedding_file_path, 'r') as f:
for line in f:
check = line.strip().split()
if len(check) == 2: continue
line = line.strip().split()
if line[0] not in wordset: continue
embedding = [float(s) for s in line[1:]]
word_embedding.append(embedding)
words_dict[line[0]] = index
index +=1
"""
return np.asarray(word_embedding), words_dict
def fit_transform(x_text, words_dict, max_sen_len, max_doc_len):
x, sen_len, doc_len = [], [], []
for index, doc in enumerate(x_text):
t_sen_len = [0] * max_doc_len
t_x = np.zeros((max_doc_len, max_sen_len), dtype=int)
sentences = doc.split('<sssss>')
i = 0
for sen in sentences:
j = 0
for word in sen.strip().split():
if j >= max_sen_len:
break
if word not in words_dict: continue
t_x[i, j] = words_dict[word]
j += 1
t_sen_len[i] = j
i += 1
if i >= max_doc_len:
break
doc_len.append(i)
sen_len.append(t_sen_len)
x.append(t_x)
return np.asarray(x), np.asarray(sen_len), np.asarray(doc_len)
class Dataset(object):
def __init__(self, data_file):
self.t_usr = []
self.t_prd = []
self.t_hlp = []
self.t_tme = []
self.t_label = []
self.t_docs = []
self.t_sums = []
self.nf = nori.Fetcher()
nori_path = data_file.replace(".ss", ".nori.list")
if os.path.exists(nori_path):
nid_list = [i.strip() for i in open(nori_path, 'r').readlines()]
for nid in nid_list:
line = pickle.loads(self.nf.get(nid))
self.t_usr.append(line[0])
self.t_prd.append(line[1])
self.t_label.append(line[2])
self.t_docs.append(line[3])
self.t_tme.append(line[4])
self.t_hlp.append(line[6])
else:
with nori.open(nori_path.replace(".list",""), 'w') as nw:
nid_list = []
with open(data_file, 'r') as f:
idx = 0
for line in f:
# line = line.strip().decode('utf8', 'ignore').split('\t\t')
line = line.strip().split('\t\t')
if idx == 0 :
print("one input data line: ", line)
idx = 2
print("length:", len(line))
if len(line) < 8:
continue
line = [i.strip().lower() for i in line]
line[2] = int(float(line[2])) - 1
nid = nw.put(pickle.dumps(line))
nid_list.append(nid)
self.t_usr.append(line[0])
self.t_prd.append(line[1])
self.t_label.append(line[2])
self.t_docs.append(line[3])
self.t_tme.append(line[4])
self.t_hlp.append(line[6])
# self.t_sums.append(line[8].strip().lower())
os.system('nori speedup {} --on'.format(nori_path.replace(".list","")))
with open(nori_path, 'w') as of:
for nid in nid_list:
print(nid, file = of)
self.data_size = len(self.t_docs)
self.sum_size = len(self.t_sums)
def get_usr_prd_hlp_tme_dict(self):
usrdict, prddict, hlpdict, tmedict = dict(), dict(), dict(), dict()
usridx, prdidx, hlpidx, tmeidx = 0, 0, 0, 0
for u in self.t_usr:
if u not in usrdict:
usrdict[u] = usridx
usridx += 1
for p in self.t_prd:
if p not in prddict:
prddict[p] = prdidx
prdidx += 1
for h in self.t_hlp:
if h not in hlpdict:
hlpdict[h] = hlpidx
hlpidx += 1
for t in self.t_tme:
if t not in tmedict:
tmedict[t] = tmeidx
tmeidx += 1
return usrdict, prddict, hlpdict, tmedict
def genBatch(self, usrdict, prddict, hlpdict, tmedict, wordsdict, batch_size, max_sen_len, max_doc_len, n_class):
self.epoch = int(len(self.t_docs) / batch_size)
if len(self.t_docs) % batch_size != 0:
self.epoch += 1
self.usr = []
self.prd = []
self.hlp = []
self.tme = []
self.label = []
self.docs = []
self.sen_len = []
self.doc_len = []
self.sums = []
self.ssen_len = []
self.sdoc_len = []
for i in range(self.epoch):
self.usr.append(np.asarray(list(map(lambda x: usrdict.get(x, len(usrdict)), self.t_usr[i*batch_size:(i+1)*batch_size])), dtype=np.int32))
self.prd.append(np.asarray(list(map(lambda x: prddict.get(x, len(prddict)), self.t_prd[i*batch_size:(i+1)*batch_size])), dtype=np.int32))
self.hlp.append(np.asarray(list(map(lambda x: hlpdict.get(x, len(hlpdict)), self.t_hlp[i*batch_size:(i+1)*batch_size])), dtype=np.int32))
self.tme.append(np.asarray(list(map(lambda x: tmedict.get(x, len(tmedict)), self.t_tme[i*batch_size:(i+1)*batch_size])), dtype=np.int32))
self.label.append(np.eye(n_class, dtype=np.float32)[self.t_label[i*batch_size:(i+1)*batch_size]])
b_docs, b_sen_len, b_doc_len = fit_transform(self.t_docs[i*batch_size:(i+1)*batch_size],
wordsdict, max_sen_len, max_doc_len)
# b_sums, b_ssen_len, b_sdoc_len = fit_transform(self.t_sums[i*batch_size:(i+1)*batch_size],
# wordsdict, max_ssen_len, max_sdoc_len)
self.docs.append(b_docs)
self.sen_len.append(b_sen_len)
self.doc_len.append(b_doc_len)
"""
self.sums.append(b_sums)
self.ssen_len.append(b_ssen_len)
self.sdoc_len.append(b_sdoc_len)
"""
def batch_iter(self, usrdict, prddict, hlpdict, tmedict, wordsdict, n_class, batch_size, num_epochs, max_sen_len, max_doc_len, shuffle=True):
data_size = len(self.t_docs)
num_batches_per_epoch = int(data_size / batch_size) + \
(1 if data_size % batch_size else 0)
self.t_usr = np.asarray(self.t_usr)
self.t_prd = np.asarray(self.t_prd)
self.t_hlp = np.asarray(self.t_hlp)
self.t_tme = np.asarray(self.t_tme)
self.t_label = np.asarray(self.t_label)
self.t_docs = np.asarray(self.t_docs)
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
self.t_usr = self.t_usr[shuffle_indices]
self.t_prd = self.t_prd[shuffle_indices]
self.t_hlp = self.t_hlp[shuffle_indices]
self.t_tme = self.t_tme[shuffle_indices]
self.t_label = self.t_label[shuffle_indices]
self.t_docs = self.t_docs[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start = batch_num * batch_size
end = min((batch_num + 1) * batch_size, data_size)
usr = map(lambda x: usrdict[x], self.t_usr[start:end])
prd = map(lambda x: prddict[x], self.t_prd[start:end])
hlp = map(lambda x: hlpdict[x], self.t_hlp[start:end])
tme = map(lambda x: tmedict[x], self.t_tme[start:end])
label = np.eye(n_class, dtype=np.float32)[self.t_label[start:end]]
docs, sen_len, doc_len = fit_transform(self.t_docs[start:end], wordsdict, max_sen_len, max_doc_len)
batch_data = zip(usr, prd, hlp, tme, docs, label, sen_len, doc_len)
yield batch_data
|
[
"heyisheng@megvii.com"
] |
heyisheng@megvii.com
|
db6d810b48a8953558ca2300f85cdbda928d94e5
|
bdec175f02173938f99e546e772ce8b3730a3f48
|
/basics/ex94.py
|
f4c921e18b6d1616f1f2cef7e1821cb959a77f74
|
[] |
no_license
|
hmunduri/MyPython
|
99f637f6665a733903968047aa46b763d9557858
|
af26f3a4ffb9b786d682114635b432480010ffc8
|
refs/heads/master
| 2020-03-09T13:13:59.873228
| 2018-04-20T22:33:30
| 2018-04-20T22:33:30
| 128,805,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
x = b'Abcd'
print(tuple(x))
|
[
"root@himagiri0275.mylabserver.com"
] |
root@himagiri0275.mylabserver.com
|
ccbf9eca6a67c56757f628f48ebba4f1013ff7c8
|
c9da94f3f010bfe415e4e805e037e5558b06eb11
|
/polls/urls.py
|
f470b7a47fc8252cfccb05eea8f60f8608eb24c9
|
[] |
no_license
|
nam2582/hanvit_basic
|
1b807456aa09918393bba5852b3ca30c4626720c
|
22d5ae48eb39be551d2dfac013a7415056d7678b
|
refs/heads/master
| 2020-06-17T19:49:33.489942
| 2016-12-01T16:01:54
| 2016-12-01T16:01:54
| 74,974,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^(?P<question_id>\d+)/$', views.detail, name='detail'),
url(r'^(?P<question_id>\d+)/vote/$', views.vote, name='vote'),
url(r'^(?P<question_id>\d+)/results/$', views.results, name='results'),
]
|
[
"sangwoonam@SANGWOOui-Mac-mini.local"
] |
sangwoonam@SANGWOOui-Mac-mini.local
|
a0ac56290b54da96b8e6193865496d39b06a3b95
|
a076650ace6022949665b900afb0ef0cd4d5509a
|
/python/Lib/site-packages/grpc/_common.py
|
bd1a4b8112bad741dabf10dec10298ec6c71a45d
|
[] |
no_license
|
Gouet/Keras_loaded_model
|
5829b1179accc7c34aaef3d75fffb5f10fed8e31
|
512b291ad55d3e7ee31cfc75670d296a75550d64
|
refs/heads/master
| 2021-09-29T14:36:29.771088
| 2018-11-25T15:42:04
| 2018-11-25T15:42:04
| 154,397,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,650
|
py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared implementation."""
import logging
import six
import grpc
from grpc._cython import cygrpc
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
cygrpc.ConnectivityState.idle:
grpc.ChannelConnectivity.IDLE,
cygrpc.ConnectivityState.connecting:
grpc.ChannelConnectivity.CONNECTING,
cygrpc.ConnectivityState.ready:
grpc.ChannelConnectivity.READY,
cygrpc.ConnectivityState.transient_failure:
grpc.ChannelConnectivity.TRANSIENT_FAILURE,
cygrpc.ConnectivityState.shutdown:
grpc.ChannelConnectivity.SHUTDOWN,
}
CYGRPC_STATUS_CODE_TO_STATUS_CODE = {
cygrpc.StatusCode.ok: grpc.StatusCode.OK,
cygrpc.StatusCode.cancelled: grpc.StatusCode.CANCELLED,
cygrpc.StatusCode.unknown: grpc.StatusCode.UNKNOWN,
cygrpc.StatusCode.invalid_argument: grpc.StatusCode.INVALID_ARGUMENT,
cygrpc.StatusCode.deadline_exceeded: grpc.StatusCode.DEADLINE_EXCEEDED,
cygrpc.StatusCode.not_found: grpc.StatusCode.NOT_FOUND,
cygrpc.StatusCode.already_exists: grpc.StatusCode.ALREADY_EXISTS,
cygrpc.StatusCode.permission_denied: grpc.StatusCode.PERMISSION_DENIED,
cygrpc.StatusCode.unauthenticated: grpc.StatusCode.UNAUTHENTICATED,
cygrpc.StatusCode.resource_exhausted: grpc.StatusCode.RESOURCE_EXHAUSTED,
cygrpc.StatusCode.failed_precondition: grpc.StatusCode.FAILED_PRECONDITION,
cygrpc.StatusCode.aborted: grpc.StatusCode.ABORTED,
cygrpc.StatusCode.out_of_range: grpc.StatusCode.OUT_OF_RANGE,
cygrpc.StatusCode.unimplemented: grpc.StatusCode.UNIMPLEMENTED,
cygrpc.StatusCode.internal: grpc.StatusCode.INTERNAL,
cygrpc.StatusCode.unavailable: grpc.StatusCode.UNAVAILABLE,
cygrpc.StatusCode.data_loss: grpc.StatusCode.DATA_LOSS,
}
STATUS_CODE_TO_CYGRPC_STATUS_CODE = {
grpc_code: cygrpc_code
for cygrpc_code, grpc_code in six.iteritems(
CYGRPC_STATUS_CODE_TO_STATUS_CODE)
}
def encode(s):
if isinstance(s, bytes):
return s
else:
return s.encode('ascii')
def decode(b):
if isinstance(b, str):
return b
else:
try:
return b.decode('utf8')
except UnicodeDecodeError:
_LOGGER.exception('Invalid encoding on %s', b)
return b.decode('latin1')
def _transform(message, transformer, exception_message):
if transformer is None:
return message
else:
try:
return transformer(message)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(exception_message)
return None
def serialize(message, serializer):
return _transform(message, serializer, 'Exception serializing message!')
def deserialize(serialized_message, deserializer):
return _transform(serialized_message, deserializer,
'Exception deserializing message!')
def fully_qualified_method(group, method):
return '/{}/{}'.format(group, method)
|
[
"victor.gouet@epitech.eu"
] |
victor.gouet@epitech.eu
|
b77de00da770cac07e6c97f20b6913735e20eece
|
b1af2ba3fd1eb7534d0085e34f9df26d3ef73766
|
/trees.py
|
72d38e7597e7977906ffd8febc36258535bd9dbd
|
[] |
no_license
|
StopImpossible/machine_learning_in_action
|
22b713d0202e3540cffa48f90ffa6a2e49054c30
|
8d06c9a25b30844b651c0216fa210051dda60dd4
|
refs/heads/master
| 2016-09-06T15:11:37.415459
| 2015-04-23T06:01:27
| 2015-04-23T06:01:27
| 34,123,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,972
|
py
|
from math import log
import operator
def calc_shannon_ent(data_set):
num_entries = len(data_set)
label_counts = {}
for feat_vec in data_set:
current_label = feat_vec[-1]
if current_label not in label_counts.keys():
label_counts[current_label] = 0
label_counts[current_label] += 1
shanno_ent = 0.0
for key in label_counts:
prob = float(label_counts[key]) / num_entries
shanno_ent -= prob * log(prob, 2)
return shanno_ent
def create_data_set():
data_set = [
[1, 1, 'Yes'],
[1, 1, 'Yes'],
[1, 0, 'No'],
[0, 1, 'No'],
[0, 1, 'No']
]
labels = ['no surfacing', 'flippers']
return data_set, labels
def spit_data_set(data_set, axis, value):
ret_data_set = []
for feat_vec in data_set:
if feat_vec[axis] == value:
reduced_feat_vec = feat_vec[:axis]
reduced_feat_vec.extend(feat_vec[axis + 1:])
ret_data_set.append(reduced_feat_vec)
return ret_data_set
def choose_best_feature_to_split(data_set):
num_feature = len(data_set[0]) - 1
base_entropy = calc_shannon_ent(data_set)
print base_entropy
best_info_gain = 0.0
best_feature = -1
for i in range(num_feature):
feat_list = [example[i] for example in data_set]
unique_vals = set(feat_list)
new_entropy = 0.0
for value in unique_vals:
sub_data_set = spit_data_set(data_set, i, value)
prob = len(sub_data_set) / float(len(data_set))
new_entropy += prob * calc_shannon_ent(sub_data_set)
info_gain = base_entropy - new_entropy
if(info_gain > best_info_gain):
best_info_gain = info_gain
best_feature = i
return best_feature
def majority_cnt(class_list):
class_count = {}
for vote in class_list:
if vote not in class_count.key():
class_count[vote] = 0
class_count[vote] += 1
sorted_class_count = sorted(class_count.items(),
key = operator.itemgetter(1),
reverse = True)
return sorted_class_count
def create_tree(data_set, labels):
class_list = [example[-1] for example in data_set]
if class_list.count(class_list[0]) == len(class_list):
return class_list[0]
if len(data_set[0]) == 1:
return majority_cnt(class_list)
best_feature = choose_best_feature_to_split(data_set)
best_feature_label = labels[best_feature]
my_tree = {best_feature_label:{}}
del(labels[best_feature])
feat_values = [example[best_feature] for example in data_set]
unique_vals = set(feat_values)
for value in unique_vals:
sub_labels = labels[:]
my_tree[best_feature_label][value] = create_tree(spit_data_set(data_set, best_feature, value), sub_labels)
return my_tree
my_data, labels = create_data_set()
my_tree = create_tree(my_data, labels)
print my_tree
|
[
"wkwinchi45@icloud.com"
] |
wkwinchi45@icloud.com
|
4597dc33d08af74ebcb534d0e8370982daa1bc0e
|
a970773ac578c5a73b6d67a5d1915aca01b59262
|
/Instance_Class_Static_MethodsAll.py
|
a0591ad126cd44942b6457e9cbf629c6d74cb796
|
[] |
no_license
|
amitagrahari2512/Python-OOPS-Basics
|
01baeb130df9ba55e328ff8d56a86ba72e1451ba
|
cf1889b6c013b3201d1e08f14035687e2783400e
|
refs/heads/master
| 2020-12-21T04:41:26.191797
| 2020-01-29T18:53:50
| 2020-01-29T18:53:50
| 236,310,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
print("We can use same name as instance variable and class variable , but we need not to use it"
"because it is confusing")
print("We can call class method and static method with object also.")
print("Here we use school as as Instance Variable as well as Class Variable")
class Student:
school = "IndianSchool"
def __init__(self,m1,m2,m3,school):
self.m1 = m1
self.m2 = m2
self.m3 = m3
self.school = school
def avg(self):
return (self.m1+self.m2+self.m3)/3
@classmethod
def schoolName(cls):
return cls.school
@staticmethod
def schoolInfo():
print("This is a static method.Here we not work on instance variable or class variable")
s1 = Student(10,20,30,"A")
s2 = Student(100,200,300,"B")
print(s1.avg())
print("We can use same name as instance variable and class variable , but we need not to use it"
"because it is confusing")
print("We can call class method and static method with object also.")
print("Student.schoolName()",Student.schoolName())
print("s1.schoolName() : {} , s1.school : {}, ".format(s1.schoolName(),s1.school))
print("s2.schoolName() : {} , s2.school : {}, ".format(s2.schoolName(),s2.school))
Student.schoolInfo()
s1.schoolInfo()
s2.schoolInfo()
|
[
"amit.agrahari@nagarro.com"
] |
amit.agrahari@nagarro.com
|
4d2040cfd857462d9177e04a474cd0ec2c7dffc0
|
eaeb9a6ed7f55512567f77acb26a41c9ac33d88f
|
/9/test_day9.py
|
65d6c3f6fec67e023a5950c32a9bf28b5c7a67b2
|
[] |
no_license
|
apriljgranzow/advent-of-code-2017
|
07f066a1904c8631bf18c63b84b0daeadf275bcc
|
5717372e0e37c799baa57659aba63ec22650183d
|
refs/heads/master
| 2020-04-21T00:23:11.234313
| 2019-03-09T04:10:14
| 2019-03-09T04:10:14
| 169,194,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
import solution as s
def test_part_one():
examples = {
r'{}' : 1,
r'{{{}}}' : 6,
r'{{},{}}' : 5,
r'{{{},{},{{}}}}' : 16,
r'{<a>,<a>,<a>,<a>}' : 1,
r'{{<ab>},{<ab>},{<ab>},{<ab>}}' : 9,
r'{{<!!>},{<!!>},{<!!>},{<!!>}}' : 9,
r'{{<a!>},{<a!>},{<a!>},{<ab>}}' : 3
}
for elem in examples:
assert s.part_one(elem) == examples[elem]
def test_part_two():
examples = {
'<>' : 0,
'<random characters>' : 17,
'<<<<>' : 3,
'<{!>}>' : 2,
'<!!>' : 0,
'<!!!>>' : 0,
'<{o"i!a,<{i<a>' : 10
}
for elem in examples:
assert s.part_two(elem) == examples[elem]
|
[
"4971311+apriljgranzow@users.noreply.github.com"
] |
4971311+apriljgranzow@users.noreply.github.com
|
da69b3e7f67a20803ea74942b5394c331f4ef4ab
|
cf3eee09420844f3d42579c74a6fe0428ed5daa9
|
/quickparse/__init__.py
|
961b0fecf938cb5b7cce718a783d977314dc5f5d
|
[
"MIT"
] |
permissive
|
silkyanteater/quickparse
|
c29b1fe47734661e6fbbf92787ffdf0a2db61123
|
f2d8edee0caee38a8392abc38b5119c6a009ff99
|
refs/heads/master
| 2021-07-03T06:26:00.083976
| 2021-02-27T16:08:18
| 2021-02-27T16:10:32
| 222,091,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
__version__ = '0.9.0'
__license__ = "MIT"
__author__ = 'silkyanteater'
from .quickparse import QuickParse
|
[
"cyclopesrufus@gmail.com"
] |
cyclopesrufus@gmail.com
|
26386fef2eaa634783368e84dcf3465711c3750b
|
265af0af6ef3e99ae07aa59aadf9ee1f59785667
|
/samples/openapi3/client/3_0_3_unit_test/python/unit_test_api/paths/response_body_post_not_response_body_for_content_types/post.pyi
|
820f6bcd9afba06d77aebe1111320bb40f03f800
|
[
"Apache-2.0"
] |
permissive
|
ingenovishealth/openapi-generator
|
0a936b884f7554639dd73eb389a14898101b819a
|
22beeaac4e9edac5c886a6b2078afbacfeaef102
|
refs/heads/master
| 2023-02-09T03:17:32.449794
| 2023-02-01T04:27:27
| 2023-02-01T04:27:27
| 218,272,372
| 0
| 0
|
Apache-2.0
| 2023-09-12T14:00:27
| 2019-10-29T11:35:27
|
Java
|
UTF-8
|
Python
| false
| false
| 8,388
|
pyi
|
# coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from unit_test_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
class SchemaFor200ResponseBodyApplicationJson(
schemas.ComposedSchema,
):
class MetaOapg:
not_schema = schemas.IntSchema
def __new__(
cls,
*_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ],
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
) -> 'SchemaFor200ResponseBodyApplicationJson':
return super().__new__(
cls,
*_args,
_configuration=_configuration,
**kwargs,
)
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _post_not_response_body_for_content_types_oapg(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _post_not_response_body_for_content_types_oapg(
self,
skip_deserialization: typing_extensions.Literal[True],
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _post_not_response_body_for_content_types_oapg(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _post_not_response_body_for_content_types_oapg(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class PostNotResponseBodyForContentTypes(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def post_not_response_body_for_content_types(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post_not_response_body_for_content_types(
self,
skip_deserialization: typing_extensions.Literal[True],
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post_not_response_body_for_content_types(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post_not_response_body_for_content_types(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._post_not_response_body_for_content_types_oapg(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def post(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post(
self,
skip_deserialization: typing_extensions.Literal[True],
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post(
self,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._post_not_response_body_for_content_types_oapg(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
|
[
"noreply@github.com"
] |
ingenovishealth.noreply@github.com
|
43fbcc6ea2fcc49ceabaa059322ca7f75d31c926
|
1c4271cd8bbe50dbc490166e38dc21754f81bfc8
|
/api/projects/models.py
|
08c7fbe8ae65fc2c98c4c12b6b06fce3e86a2ee8
|
[] |
no_license
|
urbrob/django-graphql-template
|
96b9873612992a4f6fbd851a4c27e4414c59a458
|
b9106e220400cfbc09bc240b60e6f8fc2506f6c2
|
refs/heads/master
| 2020-05-20T05:10:30.343821
| 2019-05-10T21:13:21
| 2019-05-10T21:13:21
| 185,396,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
from django.db import models
from users.models import User
class Membership(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='memberships')
project = models.ForeignKey('Project', on_delete=models.CASCADE, related_name='memberships')
class Project(models.Model):
users = models.ManyToManyField(User, through=Membership, related_name='projects')
name = models.CharField(max_length=128)
|
[
"noreply@github.com"
] |
urbrob.noreply@github.com
|
4c0c384fe274f0ac2032a6b13cff9f90adcaf2da
|
193109693cfabc95bb929846c3050a979b468a54
|
/_solutions/spaceShooter/src/ship.py
|
0c9d6d40730331d136271887c708f50579497a01
|
[] |
no_license
|
jtmcg/game-design-course-code
|
a84da963a4ccfc40d84e6cf4a43d0921c4aeedc0
|
cba3e4c0f34c566702af580883f023156697acca
|
refs/heads/master
| 2021-08-22T03:22:35.905407
| 2021-07-24T01:59:19
| 2021-07-24T01:59:19
| 215,602,586
| 0
| 19
| null | 2021-07-06T00:58:19
| 2019-10-16T17:12:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,369
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 22 14:17:47 2019
@author: J. Tyler McGoffin
"""
import pygame
class Ship:
def __init__(self, WINDOWWIDTH, WINDOWHEIGHT): #These are not global variables here but arguments passed to this class.
self.image = pygame.transform.scale(pygame.image.load("ArtAssets7/ship.png"), (80,80)) #Resize as loaded
self.rect = self.image.get_rect()
self.leftLimit = 10
self.rightLimit = WINDOWWIDTH - 10
self.topLimit = 10
self.bottomLimit = WINDOWHEIGHT - 10
self.moveSpeed = 5 #pixels per frame
self.setStartPos()
def move(self, left, right, up, down):
if left and self.rect.left >= self.leftLimit:
self.rect.left -= self.moveSpeed
if right and self.rect.right <= self.rightLimit:
self.rect.left += self.moveSpeed
if up and self.rect.top >= self.topLimit:
self.rect.top -= self.moveSpeed
if down and self.rect.bottom <= self.bottomLimit:
self.rect.top += self.moveSpeed
def setStartPos(self):
#spawns ship in start position. Using Center Point for control
xCoord = (self.rightLimit + self.leftLimit) / 2
yCoord = self.bottomLimit - self.rect.height/2
self.rect.center = (xCoord, yCoord)
|
[
"jtmcgoffin@gmail.com"
] |
jtmcgoffin@gmail.com
|
ec5283a4e7489ec15806353bf65b1188b13172ac
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/detection/GFocalV2/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py
|
5b57cd1a5a4962e965c8d00442b67d775370c2aa
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 706
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
total_epochs = 24
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
8e6105dd44eadbc46d92fa738c6aa7b756063a5e
|
7bee97304a639d4669e07c01fbd0d7a41c81b1fa
|
/ch2sql/tools/hit_ltp.py
|
109c6969ba3aa6e695b63c59f4e3f4ae312136f2
|
[] |
no_license
|
we1l1n/ch2sql
|
f038825b95b1ca50d26438eb45ae5b68f158aabb
|
5b7035c56c96fdc78b0ee2adadeab018c1c5b0fc
|
refs/heads/master
| 2020-04-17T08:26:13.092545
| 2018-04-20T16:04:32
| 2018-04-20T16:04:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,987
|
py
|
import jieba
import os
class LtpParser(object):
"""
哈工大LTP工具的使用包装. git clone以后这里的代码需要修改,
要将ltp_path 的路径改为本地机器存储ltp模型文件的路径. 模型文件
较大, 无法随项目一起上传
"""
default_ltp_api_key = "s1n5k7M9i5zXTqamAy3V1U7CkwskygraFX5fpKyH"
# 路径变化这里需要修改
ltp_path = "/Users/alexsun/codes/nlp/ltp_practice/ltp_model/ltp_data"
@staticmethod
def cutting(sentence, table=None):
"""
:param sentence: 输入的查询语句
:param table: 数据表格对应的Table
:return: 分词结果
"""
# 根据数据库字段信息更新jieba分词词典,确保表的关键词不能分开
if table is None:
return list(jieba.cut(sentence))
suggest_words = list(table.get_column_names())
for column in table:
if column.data_type != "text":
continue
tmp = column.values_sample(100)
for v in tmp:
suggest_words.append(v)
for word in suggest_words:
if word != " " and word is not None and type(word) == str:
jieba.suggest_freq(word, True)
ans = list(jieba.cut(sentence, HMM=True))
return ans
@staticmethod
def pos_tagging(cutting_list):
pos_model_path = os.path.join(LtpParser.ltp_path, 'pos.model')
from pyltp import Postagger
pos_tagger = Postagger()
pos_tagger.load(pos_model_path)
tags = pos_tagger.postag(cutting_list)
pos_tagger.release()
return tags
@staticmethod
def entity_recognize(cutting_list, tagging_list):
ner_model_path = os.path.join(LtpParser.ltp_path, 'ner.model')
from pyltp import NamedEntityRecognizer
recognizer = NamedEntityRecognizer()
recognizer.load(ner_model_path)
ne_tags = recognizer.recognize(cutting_list, tagging_list)
recognizer.release()
return ne_tags
@staticmethod
def dependency_parsing(cutting_list, tagging_list):
"""
依存句法分析
:param cutting_list: 分词列表
:param tagging_list: 词性标注列表
:return:依存分析的结果
"""
# 依存句法分析
par_model_path = os.path.join(LtpParser.ltp_path, 'parser.model')
from pyltp import Parser
parser = Parser()
parser.load(par_model_path)
arcs = parser.parse(cutting_list, tagging_list)
parser.release()
return arcs
@staticmethod
def getting_by_cloud(sentence, api_key=default_ltp_api_key, pattern='dp'):
"""
调用语言云进行分词、词性标注、命名实体识别。
http://api.ltp-cloud.com/
(测试阶段使用,调用语言云就不能更新分词词典)
:param sentence:
:param api_key:
:param pattern: ws: 分词, pos:词性标注, ner: 命名实体识别
dp:依存语法分析, srl:语义角色标注
:return:
"""
import urllib.request
url_get_base = u"http://api.ltp-cloud.com/analysis/?"
args = {
'api_key': api_key,
'text': sentence,
'pattern': pattern,
'format': 'plain'
}
url = url_get_base + urllib.parse.urlencode(args)
# print(url)
result = urllib.request.urlopen(url)
content = result.read().strip()
return content.decode('utf-8')
@staticmethod
def getting_all_by_cloud(sentence):
patterns = ['ws', 'pos', 'ner', 'dp', 'srl']
for pattern in patterns:
result = LtpParser.getting_by_cloud(sentence, pattern=pattern)
print('pattern type:{}'.format(pattern))
print(result)
if __name__ == "__main__":
test = LtpParser
s = "查询大于1000的销售员"
print(list(test.pos_tagging(test.cutting(s))))
|
[
"suncun1995@gmail.com"
] |
suncun1995@gmail.com
|
0a50c8c89a18036424915cec2fce88ceb60f74c0
|
a2f8c4048edd7826b2584f6aea0e121eb68f57e3
|
/003_abstract_factory/abstract_factory.py
|
e67a5c74d9d7c10ef289c9f554a69ab98ad85237
|
[] |
no_license
|
kelvmg/python-oop-design-patterns
|
9aca038319f9fa10f64ad1e2715eb06b2e583910
|
dae397f8f47ca9763a15a2698d00627fd55b5ade
|
refs/heads/master
| 2020-03-29T17:49:31.780428
| 2018-07-29T06:25:02
| 2018-07-29T06:25:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,282
|
py
|
# === abstract shape classes ===
class Shape2DInterface:
def draw(self): pass
class Shape3DInterface:
def build(self): pass
class TouchInterface:
def touch(self): pass
# === concrete shape classes ===
class Circle(Shape2DInterface):
def draw(self):
print('Circle.draw')
class Square(Shape2DInterface):
def draw(self):
print('Square.draw')
class Sphere(Shape3DInterface):
def build(self):
print('Sphere.build')
class Slide(TouchInterface, Shape3DInterface):
def touch(self):
print('touching the Slide')
def build(self):
print('building the Slide...')
class Panel(TouchInterface, Shape2DInterface):
def touch(self):
print('touching the Panel')
def draw(self):
print('Drawing the Panel')
class Cube(Shape3DInterface):
def build(self):
print('Cube.build')
# === Abstract shape factory ===
class ShapeFactoryInterface:
def getShape(sides): pass
# === Concrete shape factories ===
class Shape2DFactory(ShapeFactoryInterface):
@staticmethod
def getShape(sides):
if sides == 1:
return Circle()
if sides == 4:
return Square()
assert 0, 'Bad 2D shape creation: shape factory not defined for ' + sides + ' sides'
class Shape3DFactory(ShapeFactoryInterface):
@staticmethod
def getShape(sides):
""" here, sides refers to the number faces"""
if sides == 1:
return Sphere()
if sides == 6:
return Cube()
assert 0, 'Bad 3D shape creation: shape factory not defined for ' + sides + ' sides'
class ShapeTouchFactory(ShapeFactoryInterface):
@staticmethod
def getShape(sides):
if sides == 1:
return Slide()
if sides == 2:
return Panel()
assert 0, 'Bad touch request: shape factory is not defined for ' + sides + ' sides'
shape2DFactory = Shape2DFactory()
circle = shape2DFactory.getShape(1)
print(circle)
circle.draw()
shape3DFactory = Shape3DFactory()
sphere = shape3DFactory.getShape(1)
print(sphere)
sphere.build()
touchFactory = ShapeTouchFactory()
slide = ShapeTouchFactory.getShape(1)
slide.touch()
slide.build()
panel = ShapeTouchFactory.getShape(2)
panel.touch()
panel.draw()
|
[
"evgeniy.poznyak@gmail.com"
] |
evgeniy.poznyak@gmail.com
|
7008b57841c98a0fce3652dbb38c176adda03cb3
|
563763d12d6cee291ea13291b165a57a5d13b4a1
|
/conexiones_plot.py
|
332c6d957e9e6b76cc6d03168efa5ef01c31b8f2
|
[] |
no_license
|
evalvarez12/qubits-CUDA
|
e228462d35ac6aff7fa18d0bb329421d24535e82
|
22cc1558c56dc3a3a4c56dd5b2e82584b80fdcc2
|
refs/heads/master
| 2021-01-10T18:11:18.320208
| 2015-12-23T16:43:15
| 2015-12-23T16:43:15
| 31,934,735
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
from pylab import *
lines=['o-','<-','p-','v-','*-','^-','D-','h-','>-','H-','d-','x-','s-','+-']
gammas2=[]
for i in range(1,15):
gammas2+=[round(.75*(6./sqrt(double(i))),3)]
#gammas2=[.75,.75,.75,.75,.75,.75,.75,.75,.75,.75,.75,.75,.75,.75]
l=0
g=0
for i in range(1,15) :
a=loadtxt("pur_conexiones-max-2-"+str(i)+".dat")
plot(a,lines[l],markevery=50,label="$"+str(i)+"$ $ \gamma= "+str(gammas2[g])+"$")
g+=1
l+=1
legend()
xlabel("$t$")
ylabel("$P$")
show()
|
[
"evalvarez12@gmail.com"
] |
evalvarez12@gmail.com
|
44b87f3143af69381447ea5ce8faf1a769439035
|
ffadf9541d01cf9af20c419759d48b1eb01bfd35
|
/pachong/PCdemo1/day13/quanbenspider/quanbenspider/pipelines.py
|
ca48a3c9f540a7f32550cc34dd2c6201c8bb0dcd
|
[] |
no_license
|
1987617587/lsh_py
|
b1bb1016eaafcba03bbc4a5310c1db04ae227af4
|
80eb5175cd0e5b3c6c5e2ebb906bb78d9a8f9e0d
|
refs/heads/master
| 2021-01-02T05:14:31.330287
| 2020-06-20T05:18:23
| 2020-06-20T05:18:23
| 239,498,994
| 2
| 1
| null | 2020-06-07T23:09:56
| 2020-02-10T11:46:47
|
Python
|
UTF-8
|
Python
| false
| false
| 903
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import codecs
import csv
# 爬取小说名称,url,类别,作者,更新时间,状态
class QuanbenspiderPipeline(object):
def __init__(self):
'''
初始化,打开csv文件,设置标题行
'''
self.file = codecs.open('quanben.csv', 'w', encoding='utf-8')
self.wr = csv.writer(self.file)
self.wr.writerow(['小说名称', '链接', '类别', '作者', '更新时间', '状态'])
def process_item(self, item, spider):
self.wr.writerow(
[item['name'], item['url'], item['category'], item['author'], item['update_time'], item['status']])
return item
def close_spider(self, spider):
self.file.close()
|
[
"1987617587@qq.com"
] |
1987617587@qq.com
|
286c67711c42fd4b7334736bdf01b59616956cdd
|
3869cbd5ee40e2bab5ca08b80b48115a7b4c1d5a
|
/Python-3/basic_examples/strings/raw_strings.py
|
c40bdec7c3805cc503e499724f940fd1e7e56ab2
|
[
"MIT"
] |
permissive
|
Tecmax/journaldev
|
0774c441078816f22edfd68286621493dd271803
|
322caa8e88d98cfe7c71393bcd2a67cf77368884
|
refs/heads/master
| 2020-07-08T04:05:03.028015
| 2019-08-12T09:17:48
| 2019-08-12T09:17:48
| 203,559,030
| 0
| 1
|
MIT
| 2019-08-21T10:13:47
| 2019-08-21T10:13:47
| null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
s = 'Hi\nHello'
print(s)
raw_s = r'Hi\nHello'
print(raw_s)
# raw string \ escapes quotes (',") but \ remains in the result
# raw string can't be single \ or end with odd numbers of \
raw_s = r'\''
print(raw_s)
raw_s = r'ab\\'
print(raw_s)
raw_s = R'\\\"' # prefix can be 'R' or 'r'
print(raw_s)
|
[
"pankaj.0323@gmail.com"
] |
pankaj.0323@gmail.com
|
de62eaf7c965d41ffa16ea1de8bf158d0b284ce5
|
c03be2e7744a55699d714281eb088449c55b2fa7
|
/pytorch_tutorial/evaluation/evaluator.py
|
7f564ae119e961d6b434d7e8bf5d138823b46043
|
[] |
no_license
|
kanada0727/pytorch-tutorial
|
75cde755ce949d8b06e441b6cfae69c457bafb57
|
8a9270f319eb94bdaaffcde0f1760e0ce9f0f885
|
refs/heads/master
| 2023-03-09T06:23:21.430697
| 2021-02-23T14:42:24
| 2021-02-23T14:42:24
| 341,565,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 846
|
py
|
import torch
import torch.nn.functional as F
from .evaluation_result import EvaluationResult
from .inference_result import InferenceResult
class Evaluator:
@classmethod
def run(cls, result: InferenceResult) -> EvaluationResult:
predictions = cls._predict_label(result.hidden_values)
accuracy = cls._calc_accuracy(predictions, result.labels)
loss = F.nll_loss(result.hidden_values, result.labels)
return EvaluationResult(
predictions=predictions,
accuracy=accuracy,
loss=loss,
**result.to_dict(),
)
def _predict_label(hidden_values):
return torch.max(hidden_values, 1).indices
@staticmethod
def _calc_accuracy(predictions, targets):
acc = torch.sum(predictions == targets.data) / targets.shape[0]
return acc
|
[
"kanada@pcl.cs.waseda.ac.jp"
] |
kanada@pcl.cs.waseda.ac.jp
|
5978a0ea9a20cdbbfe82ea47009adb0e11d194d1
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/dockerized-gists/297c01d677ddd711c0ebf0da56c10879/snippet.py
|
d84a511da730479880f9f7de09dc84a48c7552c9
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 4,540
|
py
|
#import gevent
#from ghost import Ghost, Session
import requests
import re
import json
import time
import socket
#from gevent import monkey; monkey.patch_all()
timeout = 90
socket.setdefaulttimeout(timeout)
#from ghost import Ghost
#ghost = Ghost()
class toolCli(object):
def __init__(self):
pass
@staticmethod
def check_port(addr, port, retry=3):
while True:
r = requests.get('http://120.26.91.33:5000/check?ip=%s&port=%s' % (addr, port), timeout=10)
if 'bad' in r:
if retry > 0:
retry -= 1
continue
return False
return True
class OpCli(object):
def __init__(self, cookie):
self.session = requests.session()
self.session.headers['user-agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
self.h = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
self.cookie = cookie
self.session.headers['cookie'] = cookie
def manage(self, vm_id):
r = self.session.get('https://panel.op-net.com/onecloud/%s/manage' % vm_id)
if 'IP: </b>' not in r.text:
return None
return {
'ip': re.search('IP: </b>([\d.]+)', r.text).group(1),
'csrf_token': re.search('"([a-z0-9]{40})"', r.text).group(1)
}
def destroy(self, vm_id, csrf_token):
data = {
'server_id': vm_id,
'action': 'destroy_vm',
'csrf_token': csrf_token
}
r = self.session.post('https://panel.op-net.com/src/onecloud_manager.php', data)
return 'success' in r.text
def open(self, vm_id, plan='Plan 01', location='14', os='linux-ubuntu-16.04-server-x86_64-min-gen2-v1'):
# get csrf_token
data = {
'x': 25,
'y': 21,
'vm_id': vm_id
}
r = self.session.post('https://panel.op-net.com/cloud/open', data, timeout=timeout)
r = re.search('"([a-z0-9]{40})"', r.text)
if not r:
raise Exception('open vm error: fetch token failed.')
# create vm
data = {
'plan': plan,
'location': location,
'os': os,
'vm_id': vm_id,
'hostname': 'a.b.c',
'root': '',
'csrf_token': r.group(1)
}
r = self.session.post('https://panel.op-net.com/cloud/open', data, timeout=timeout, allow_redirects=False)
if r.status_code != 302:
r = re.search('class="message error">([\S\s]+)?</h4>', r.text)
if not r:
raise Exception('open vm error: unknow reason.')
raise Exception('open vm error: %s' % r.group(1).strip())
######################################
vm_id = '70550'
plan = 'Plan 01'
location = '13'
cookie = 'id=0b52a5ab6c1969a254597047ce; _gat=1; _ga=GA1.2.2084353983.1511673858; _gid=GA1.2.496062921.1511794137; tz=8'
######################################
done = False
if toolCli.check_port('58.222.18.30', 80) is not True: print('tool api test failed')
def buybuybuy(tid):
cli = OpCli(cookie)
t = 3
global done
while True:
try:
if done:
return
print('check vm')
m = cli.manage(vm_id)
if m:
# if vm created
print('tid %s vm already created ip:%s' % (tid, m['ip']))
if not toolCli.check_port(m['ip'], 22):
# ip block
print('vm ip:%s blocked begin destroy ...' % m['ip'])
cli.destroy(vm_id, m['csrf_token'])
time.sleep(30)
continue
print('vm created success and ip not block by gfw.')
done = True
return
print('tid %s vm open ...' % tid)
cli.open(vm_id, plan=plan, location=location)
print('vm open successed wait vm init.') # open success wait created
time.sleep(2 * 60)
except Exception as e:
time.sleep(60)
while True:
try:
time.sleep(10)
#cli.bypass2()
break
except:
time.sleep(60)
buybuybuy(1)
#gevent.joinall([gevent.spawn(buybuybuy, i) for i in range(1)])
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
6b04ce943aa3324f00cd86f969139f23f1c45f7e
|
3fc1e29931c0df8cc3a43e9cb7fa5de035c8aa51
|
/lesson_2/1_6.py
|
2f2b0e477f2ac3a5e2de96329727abb0dd387dea
|
[] |
no_license
|
KseniaTrox/python
|
8e2cba9ab9cd6d9a039466f3b4d925d049b20772
|
75b652e24986ab9fd5f28e7a1bcb97600b969a53
|
refs/heads/main
| 2023-02-01T05:47:45.136081
| 2020-12-17T15:23:57
| 2020-12-17T15:23:57
| 310,033,796
| 1
| 1
| null | 2020-11-09T18:48:22
| 2020-11-04T14:55:10
|
Python
|
UTF-8
|
Python
| false
| false
| 766
|
py
|
n_list = []
p_list = []
s_list = []
a_list = []
i = 0
a = int(input("Введите кол-во товарных позиций "))
while i <= a - 1:
if i <= a - 1:
n = input("Введите название")
p = input("Введите цену ")
s = int(input("Введите количество "))
n_list.append(n)
p_list.append(p)
s_list.append(s)
n_dict = {"название": n_list}
p_dict = {"цена": p_list}
s_dict = {"количество": s_list}
b = tuple([i + 1, {"название": n, "цена": p, "количество ед": s }])
a_list.append(b)
i = i + 1
else:
for z in a_list:
print(z)
print(n_dict)
print(p_dict)
print(s_dict)
|
[
"troxksenija@gmail.com"
] |
troxksenija@gmail.com
|
c74b44993f4b8a578758c9b278a553f18464054c
|
42a8553f596fba8c438b9d4576f6015b9e8e5135
|
/tpRigToolkit/tools/musclespline/core/model.py
|
0b403831a9bfdc7d395c4e07df5ccd37988f6e61
|
[
"MIT"
] |
permissive
|
liangjin2007/tpRigToolkit-tools-musclespline
|
31bffce2bd450b79cedbcb1930decd2510730577
|
6a53080d8a4d866105fac44c2518769293f46c05
|
refs/heads/master
| 2023-05-18T16:01:25.864254
| 2021-03-26T04:08:58
| 2021-03-26T04:08:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,352
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Muscle Spline widget model class implementation
"""
from __future__ import print_function, division, absolute_import
from Qt.QtCore import QObject, Signal
class MuscleSplineModel(QObject, object):
nameChanged = Signal(str)
sizeChanged = Signal(float)
insertionControlsChanged = Signal(int)
controlTypeChanged = Signal(str)
drivenJointsChanged = Signal(int)
drivenTypeChanged = Signal(str)
constraintMidControlsChanged = Signal(bool)
lockControlsScaleChanged = Signal(bool)
lockJiggleAttributesChanged = Signal(bool)
enableAdvancedChanged = Signal(bool)
controlSuffixChanged = Signal(str)
jointSuffixChanged = Signal(str)
groupSuffixChanged = Signal(str)
drivenSuffixChanged = Signal(str)
createSetsChanged = Signal(bool)
mainMuscleSetNameChanged = Signal(str)
muscleSetSuffixChanged = Signal(str)
muscleSplineNameChanged = Signal(str)
controlsGroupSuffixChanged = Signal(str)
jointsGroupSuffixChanged = Signal(str)
rootGroupSuffixChanged = Signal(str)
autoGroupSuffixChanged = Signal(str)
def __init__(self):
super(MuscleSplineModel, self).__init__()
self._insertion_types = ['cube', 'circleY', 'null']
self._driven_types = ['joint', 'circleY', 'null']
self._name = 'Char01_Spine'
self._size = 1.0
self._insertion_controls = 3
self._control_type = self._insertion_types[0]
self._driven_joints = 5
self._driven_type = self._driven_types[0]
self._constraint_mid_controls = False
self._lock_controls_scale = True
self._lock_jiggle_attributes = False
self._enable_advanced = False
self._control_suffix = 'ctrl'
self._joint_suffix = 'jnt'
self._group_suffix = 'grp'
self._driven_suffix = 'drv'
self._create_sets = True
self._main_muscle_set_name = 'setMUSCLERIGS'
self._muscle_set_suffix = 'RIG'
self._muscle_spline_name = 'muscleSpline'
self._controls_group_suffix = 'ctrls'
self._joints_group_suffix = 'joints'
self._root_group_suffix = 'root'
self._auto_group_suffix = 'auto'
@property
def insertion_types(self):
return self._insertion_types
@property
def driven_types(self):
return self._driven_types
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = str(value)
self.nameChanged.emit(self._name)
@property
def size(self):
return self._size
@size.setter
def size(self, value):
self._size = float(value)
self.sizeChanged.emit(self._size)
@property
def insertion_controls(self):
return self._insertion_controls
@insertion_controls.setter
def insertion_controls(self, value):
self._insertion_controls = int(value)
self.insertionControlsChanged.emit(self._insertion_controls)
@property
def control_type(self):
return self._control_type
@control_type.setter
def control_type(self, value):
self._control_type = str(value)
self.controlTypeChanged.emit(self._control_type)
@property
def driven_joints(self):
return self._driven_joints
@driven_joints.setter
def driven_joints(self, value):
self._driven_joints = int(value)
self.drivenJointsChanged.emit(self._driven_joints)
@property
def driven_type(self):
return self._driven_type
@driven_type.setter
def driven_type(self, value):
self._driven_type = str(value)
self.drivenTypeChanged.emit(self._driven_type)
@property
def constraint_mid_controls(self):
return self._constraint_mid_controls
@constraint_mid_controls.setter
def constraint_mid_controls(self, flag):
self._constraint_mid_controls = bool(flag)
self.constraintMidControlsChanged.emit(self._constraint_mid_controls)
@property
def lock_controls_scale(self):
return self._lock_controls_scale
@lock_controls_scale.setter
def lock_controls_scale(self, flag):
self._lock_controls_scale = bool(flag)
self.lockControlsScaleChanged.emit(self._lock_controls_scale)
@property
def lock_jiggle_attributes(self):
return self._lock_jiggle_attributes
@lock_jiggle_attributes.setter
def lock_jiggle_attributes(self, flag):
self._lock_jiggle_attributes = bool(flag)
self.lockJiggleAttributesChanged.emit(self._lock_jiggle_attributes)
@property
def enable_advanced(self):
return self._enable_advanced
@enable_advanced.setter
def enable_advanced(self, flag):
self._enable_advanced = bool(flag)
self.enableAdvancedChanged.emit(self._enable_advanced)
@property
def control_suffix(self):
return self._control_suffix
@control_suffix.setter
def control_suffix(self, value):
self._control_suffix = str(value)
self.controlSuffixChanged.emit(self._control_suffix)
@property
def joint_suffix(self):
return self._joint_suffix
@joint_suffix.setter
def joint_suffix(self, value):
self._joint_suffix = str(value)
self.jointSuffixChanged.emit(self._joint_suffix)
@property
def group_suffix(self):
return self._group_suffix
@group_suffix.setter
def group_suffix(self, value):
self._group_suffix = str(value)
self.groupSuffixChanged.emit(self._group_suffix)
@property
def driven_suffix(self):
return self._driven_suffix
@driven_suffix.setter
def driven_suffix(self, value):
self._driven_suffix = str(value)
self.drivenSuffixChanged.emit(self._driven_suffix)
@property
def create_sets(self):
return self._create_sets
@create_sets.setter
def create_sets(self, flag):
self._create_sets = bool(flag)
self.createSetsChanged.emit(self._create_sets)
@property
def main_muscle_set_name(self):
return self._main_muscle_set_name
@main_muscle_set_name.setter
def main_muscle_set_name(self, value):
self._main_muscle_set_name = str(value)
self.mainMuscleSetNameChanged.emit(self._main_muscle_set_name)
@property
def muscle_set_suffix(self):
return self._muscle_set_suffix
@muscle_set_suffix.setter
def muscle_set_suffix(self, value):
self._muscle_set_suffix = str(value)
self.muscleSetSuffixChanged.emit(self._muscle_set_suffix)
@property
def muscle_spline_name(self):
return self._muscle_spline_name
@muscle_spline_name.setter
def muscle_spline_name(self, value):
self._muscle_spline_name = str(value)
self.muscleSplineNameChanged.emit(self._muscle_spline_name)
@property
def controls_group_suffix(self):
return self._controls_group_suffix
@controls_group_suffix.setter
def controls_group_suffix(self, value):
self._controls_group_suffix = str(value)
self.controlsGroupSuffixChanged.emit(self._controls_group_suffix)
@property
def joints_group_suffix(self):
return self._joints_group_suffix
@joints_group_suffix.setter
def joints_group_suffix(self, value):
self._joints_group_suffix = str(value)
self.jointsGroupSuffixChanged.emit(self._joints_group_suffix)
@property
def root_group_suffix(self):
return self._root_group_suffix
@root_group_suffix.setter
def root_group_suffix(self, value):
self._root_group_suffix = str(value)
self.rootGroupSuffixChanged.emit(self._root_group_suffix)
@property
def auto_group_suffix(self):
return self._auto_group_suffix
@auto_group_suffix.setter
def auto_group_suffix(self, value):
self._auto_group_suffix = str(value)
self.autoGroupSuffixChanged.emit(self._auto_group_suffix)
def get_properties_dict(self):
properties_dict = dict()
properties = [prop_Name for prop_Name, obj in self.__class__.__dict__.items() if isinstance(obj, property)]
for property_name in properties:
properties_dict[property_name] = getattr(self, property_name)
return properties_dict
|
[
"tpovedatd@gmail.com"
] |
tpovedatd@gmail.com
|
9ad7a30ae6326a0aef3ccb5a8131813818356190
|
3351c699aab0be1e63bd5a420476d3e6524977cd
|
/djangonautic/articles/urls.py
|
a80948ffb86ca348836b8791362dffe9c4df9398
|
[] |
no_license
|
liucaihs/django-djangonauts
|
a0fc88db1008827193d2ac6a43c7f46190371158
|
337302b9a6f0eebd14149ba3e2824d0e340571ea
|
refs/heads/master
| 2021-01-24T04:20:11.730769
| 2018-02-26T03:50:38
| 2018-02-26T03:50:38
| 122,931,272
| 1
| 0
| null | 2018-02-26T07:34:50
| 2018-02-26T07:34:50
| null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from django.urls import path
from . import views
app_name = 'articles'
urlpatterns = [
path('', views.article_list, name="list"),
path('create/', views.article_create, name="create"),
path('<slug>/', views.article_detail, name="detail"),
]
|
[
"thompson.phan@outlook.com"
] |
thompson.phan@outlook.com
|
130730c6f8a0fa94412c76922b9d4a89b233c57a
|
7136e5242793b620fa12e9bd15bf4d8aeb0bfe7a
|
/examples/adspygoogle/dfp/v201101/update_placements.py
|
73226320411276ac20a312150e1bfd8c75ee33f3
|
[
"Apache-2.0"
] |
permissive
|
hockeyprincess/google-api-dfp-python
|
534519695ffd26341204eedda7a8b50648f12ea9
|
efa82a8d85cbdc90f030db9d168790c55bd8b12a
|
refs/heads/master
| 2021-01-10T10:01:09.445419
| 2011-04-14T18:25:38
| 2011-04-14T18:25:38
| 52,676,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,942
|
py
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates all placements to allow for AdSense targeting up to
the first 500. To determine which placements exist,
run get_all_placements.py."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.append(os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle.dfp.DfpClient import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service. By default, the request is always made against
# sandbox environment.
placement_service = client.GetPlacementService(
'https://sandbox.google.com', 'v201101')
inventory_service = client.GetInventoryService(
'https://sandbox.google.com', 'v201101')
# Get the root ad unit by statement.
root_ad_unit_id = inventory_service.GetAdUnitsByStatement(
{'query': 'WHERE parentId IS NULL LIMIT 1'})[0]['results'][0]['id']
# Create a statement to select first 500 placements.
filter_statement = {'query': 'LIMIT 500'}
# Get placements by statement.
placements = placement_service.GetPlacementsByStatement(
filter_statement)[0]['results']
if placements:
# Update each local placement object by enabling AdSense targeting.
for placement in placements:
if not placement['targetingDescription']:
placement['targetingDescription'] = 'Generic description'
placement['targetingAdLocation'] = 'All images on sports pages.'
placement['targetingSiteName'] = 'http://code.google.com'
placement['isAdSenseTargetingEnabled'] = 'true'
# Update placements remotely.
placements = placement_service.UpdatePlacements(placements)
# Display results.
if placements:
for placement in placements:
ad_unit_ids = ''
if 'targetedAdUnitIds' in placement:
ad_unit_ids = ', '.join(placement['targetedAdUnitIds'])
print ('Placement with id \'%s\', name \'%s\', and AdSense targeting '
'enabled \'%s\' was updated.'
% (placement['id'], placement['name'],
placement['isAdSenseTargetingEnabled']))
else:
print 'No placements were updated.'
else:
print 'No placements found to update.'
|
[
"api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138"
] |
api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138
|
7b53455d083b8f5047bf14ffb1c20ad89b9727f7
|
7adb37b4124de158cdc4e9b37a69e49139ae19d3
|
/dp6_protoype.py
|
0f7a72d3018b9577822602beecc669e249aa9c26
|
[] |
no_license
|
joysn/python-design-patterns
|
55c5c377cb570e598dcf43d577ac0aac594bb97e
|
41bd148663c16d0fa3e3dc9e4eb085a17c9f717a
|
refs/heads/master
| 2020-04-18T21:25:53.775690
| 2019-04-29T06:42:41
| 2019-04-29T06:42:41
| 167,765,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,362
|
py
|
from copy import deepcopy
class Car:
def __init__(self):
self.__wheels = list()
self.__engine = None
self.__body = None
def setBody(self,body):
self.__body = body
def attachWheel(self,wheel):
self.__wheels.append(wheel)
def setEngine(self, engine):
self.__engine = engine
def speficification(self):
print("body: %s" % self.__body.shape)
print("engine horsepower : %d" % self.__engine.horsepower)
print("tire size : %d \'" % self.__wheels[0].size)
def clone(self):
return deepcopy(self)
class Wheel:
size = None
class Engine:
horsepower = None
class Body:
shape = None
class Director:
__builder = None
def setBuilder(self,builder):
self.__builder = builder
def getCar(self):
car = Car()
# first get the body
body = self.__builder.getBody()
car.setBody(body)
engine = self.__builder.getEngine()
car.setEngine(engine)
i = 0
while i < 4:
wheel = self.__builder.getWheel()
car.attachWheel(wheel)
i += 1
return car
class BuilderInterface:
def getWheel(self): pass
def getBody(self): pass
def getEngine(self): pass
class JeepBuilder(BuilderInterface):
def getWheel(self):
wheel = Wheel()
wheel.size = 22
return wheel
def getEngine(self):
engine = Engine()
engine.horsepower = 400
return engine
def getBody(self):
body = Body()
body.shape = "SUV"
return body
class NissanBuilder(BuilderInterface):
def getWheel(self):
wheel = Wheel()
wheel.size = 16
return wheel
def getEngine(self):
engine = Engine()
engine.horsepower = 100
return engine
def getBody(self):
body = Body()
body.shape = "hatchback"
return body
d = Director()
d.setBuilder(JeepBuilder())
jeep = d.getCar()
jeep.speficification()
jeep2 = jeep.clone()
jeep2.speficification()
from copy import deepcopy
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
print("({}, {})".format(self.x, self.y))
def move(self, x, y):
self.x += x
self.y += y
def clone(self, move_x, move_y):
obj = deepcopy(self)
obj.move(move_x, move_y)
return obj
p0 = Point(0,0)
p0.__str__()
p1 = p0.clone(1,3)
p1.__str__()
|
[
"noreply@github.com"
] |
joysn.noreply@github.com
|
6bf3c25eed6593fd3c32fea8f4a22d1f3e8f9f35
|
5ee470ec919605ba8b642d89e8ff64c7afe1d261
|
/pypmf/pmf/plot.py
|
3c3ed28f743f3427f4f30b3decea9d98fbd27efb
|
[] |
no_license
|
ageil/parallel-pmf
|
1e561b756d9dbb3e5b525c4d238c730b99cf6763
|
b1a42840fcded5bd86e4326984ca40fc4e5b52c5
|
refs/heads/main
| 2023-03-29T08:03:59.192114
| 2021-04-13T04:36:41
| 2021-04-13T04:36:41
| 345,513,479
| 0
| 1
| null | 2021-04-11T23:04:51
| 2021-03-08T02:59:47
|
C++
|
UTF-8
|
Python
| false
| false
| 7,155
|
py
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
from matplotlib import colors
from matplotlib.patches import FancyArrowPatch
from sklearn.manifold import TSNE
from mayavi import mlab
# Reference:
# https://stackoverflow.com/questions/58903383/fancyarrowpatch-in-3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, **kwargs):
FancyArrowPatch.__init__(self, posA=(0, 0), posB=(0, 0), **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, self.axes.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def loss(df_loss, outdir, display=True):
sns.set_theme()
plt.figure(figsize=(10, 8))
plt.suptitle('Log-likelihood')
sns.lineplot(data=df_loss, x='Epoch', y='Loss', marker="o")
if display:
plt.show()
def _norm_vector(vec):
return vec / np.linalg.norm(vec)
def _set_mlab_axis(scale, bg_color, axis_color):
len_axis = 2*scale
mlab.figure(bgcolor=bg_color, size=(int(scale * 0.8), int(scale * 0.6)))
mlab.plot3d([-len_axis, len_axis], [0, 0], [0, 0], color=axis_color, tube_radius=10.)
mlab.plot3d([0, 0], [-len_axis, len_axis], [0, 0], color=axis_color, tube_radius=10.)
mlab.plot3d([0, 0], [0, 0], [-len_axis, len_axis], color=axis_color, tube_radius=10.)
mlab.text3d(len_axis + 50, -50, +50, 'Attr_1', color=axis_color, scale=100.)
mlab.text3d(0, len_axis + 50, +50, 'Attr_2', color=axis_color, scale=100.)
mlab.text3d(0, -50, len_axis + 50, 'Attr_3', color=axis_color, scale=100.)
def tsne(df):
sns.set_theme()
fig = plt.figure(figsize=(80, 60))
ax = fig.add_subplot(132, projection='3d')
ax.scatter(df['attr_1'], df['attr_2'], df['attr_3'],
c=df['cluster'], s=10)
ax.set_title('t-SNE 3D', fontsize=30)
# legend
n_clusters = len(np.unique(df['cluster']))
cmap = sns.color_palette("rocket", n_colors=n_clusters)
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', linestyle='') for color in cmap]
ax.legend(markers, np.arange(n_clusters), fontsize='xx-large')
plt.show()
@mlab.show
def tsne_interactive(df, scale=1000, ratio=100):
labels = np.unique(df['cluster'])
n_clusters = len(labels)
cmap = sns.color_palette('viridis', n_colors=n_clusters)
white = colors.to_rgb('white')
black = colors.to_rgb('black')
# set axis
_set_mlab_axis(scale*2, bg_color=white, axis_color=black)
# scatter plot
for label, c in zip(labels, cmap):
df_label = df[df['cluster'] == label]
mlab.points3d(df_label['attr_1'] * ratio, df_label['attr_2'] * ratio, df_label['attr_3'] * ratio,
color=c, scale_factor=30)
def arrow(vectors):
fig = plt.figure(figsize=(20, 15))
ax = fig.add_subplot(111, projection='3d')
cmap = {0: 'r', 1: 'g', 2: 'b'}
# plot axes
x_axis = Arrow3D([-2, 2], [0, 0], [0, 0], lw=1, linestyle='dotted', arrowstyle="-", color='k')
y_axis = Arrow3D([0, 0], [-2, 2], [0, 0], lw=1, linestyle='dotted', arrowstyle="-", color='k')
z_axis = Arrow3D([0, 0], [0, 0], [-2, 2], lw=1, linestyle='dotted', arrowstyle="-", color='k')
ax.add_artist(x_axis)
ax.add_artist(y_axis)
ax.add_artist(z_axis)
# plot vectors
for vec in vectors:
vec = _norm_vector(vec)
g = Arrow3D([0, vec[0]], [0, vec[1]], [0, vec[2]],
mutation_scale=10, lw=1, arrowstyle="-|>", color=cmap[np.argmax(np.abs(vec))])
ax.add_artist(g)
ax.set_xlabel('attr_1', fontsize=20, labelpad=20)
ax.set_ylabel('attr_2', fontsize=20, labelpad=20)
ax.set_zlabel('attr_3', fontsize=20, labelpad=20)
ax.tick_params(labelsize=10)
ax.axes.set_xlim3d(-1, 1)
ax.axes.set_ylim3d(-1, 1)
ax.axes.set_zlim3d(-1, 1)
ax.set_title('Visualization of latent beta vectors (items)', fontsize=30)
plt.show()
def arrow_joint(vec_users, vec_items):
fig = plt.figure(figsize=(20, 15))
ax = fig.add_subplot(111, projection='3d')
# plot axes
x_axis = Arrow3D([-2, 2], [0, 0], [0, 0], lw=1, linestyle='dotted', arrowstyle="-", color='k')
y_axis = Arrow3D([0, 0], [-2, 2], [0, 0], lw=1, linestyle='dotted', arrowstyle="-", color='k')
z_axis = Arrow3D([0, 0], [0, 0], [-2, 2], lw=1, linestyle='dotted', arrowstyle="-", color='k')
ax.add_artist(x_axis)
ax.add_artist(y_axis)
ax.add_artist(z_axis)
# plot vectors
for vec in vec_users:
vec = _norm_vector(vec)
g = Arrow3D([0, vec[0]], [0, vec[1]], [0, vec[2]],
mutation_scale=10, lw=1, arrowstyle="-|>", color='r')
ax.add_artist(g)
for vec in vec_items:
vec = _norm_vector(vec)
g = Arrow3D([0, vec[0]], [0, vec[1]], [0, vec[2]],
mutation_scale=10, lw=1, arrowstyle="-|>", color='g')
ax.add_artist(g)
ax.axes.set_xlim3d(-1, 1)
ax.axes.set_ylim3d(-1, 1)
ax.axes.set_zlim3d(-1, 1)
ax.set_title('Joint visualization of latent theta (user) & beta (item) vectors', fontsize=30)
plt.show()
@mlab.show
def arrow_interactive(vectors, names, show_title=False, is_similar=False, scale=1000):
cmap = {0: 'r', 1: 'g', 2: 'b'}
black = colors.to_rgb('black')
white = colors.to_rgb('white')
# set axis
_set_mlab_axis(scale, bg_color=white, axis_color=black)
if is_similar:
title_x, title_y, title_z = -500, scale*2+200, scale*2+200
for vec, name in zip(vectors, names):
vec = _norm_vector(vec)
color = colors.to_rgb(cmap[np.argmax(np.abs(vec))])
vec = scale * np.array(vec)
mlab.plot3d([0, vec[0]], [0, vec[1]], [0, vec[2]], color=color, tube_radius=5.)
if show_title:
if is_similar:
mlab.text3d(title_x, title_y, title_z, name, color=black, scale=30)
title_z -= 100
else:
mlab.text3d(1.05*vec[0] , 1.05*vec[1]*1.1, 1.05*vec[2], name, color=black, scale=30)
@mlab.show
def arrow_joint_interactive(vec_users, vec_items, names, show_title=False, scale=1000):
black = colors.to_rgb('black')
white = colors.to_rgb('white')
red = colors.to_rgb('red')
green = colors.to_rgb('green')
# set axis
_set_mlab_axis(scale, bg_color=white, axis_color=black)
# plot user vectors
for vec in vec_users:
vec = _norm_vector(vec)
vec = scale * np.array(vec)
mlab.plot3d([0, vec[0]], [0, vec[1]], [0, vec[2]], color=red, tube_radius=5.)
title_x, title_y, title_z = -500, scale*2+200, scale*2+200
# plot item vectors
for vec, name in zip(vec_items, names):
vec = _norm_vector(vec)
vec = scale * np.array(vec)
mlab.plot3d([0, vec[0]], [0, vec[1]], [0, vec[2]], color=green, tube_radius=5.)
if show_title:
mlab.text3d(title_x, title_y, title_z, name, color=black, scale=30)
title_z -= 50
|
[
"yj2589@columbia.edu"
] |
yj2589@columbia.edu
|
26226def711d3e6653809608ae376361766fc41d
|
06e2fb156b32676f51bb575b5b53cae44c3f45db
|
/tests/data/arrays/Translation/arrays-basic-01.py
|
ca31e0bc8e7bd6e596ac40687e98dd3b4eb1ebef
|
[
"Apache-2.0"
] |
permissive
|
hlim1/delphi
|
2d1d6d33b1b9c123dc2ff00d5cc34613eb132ec3
|
c91044fb696436ea8266ad5f6cd6447dcc4f005e
|
refs/heads/master
| 2020-04-17T05:27:07.678543
| 2019-01-13T18:25:03
| 2019-01-13T18:25:03
| 166,279,487
| 0
| 0
|
Apache-2.0
| 2019-01-17T18:57:52
| 2019-01-17T18:57:51
| null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
from fortran_format import *
from for2py_arrays import *
def main():
arr = Array([(1,10)])
for i in range(1,10+1):
arr.set((i,), i*i)
fmt_obj = Format(['I5'])
for i in range(1,10+1):
val = arr.get((i,))
sys.stdout.write(fmt_obj.write_line([val]))
main()
|
[
"saumya.debray@gmail.com"
] |
saumya.debray@gmail.com
|
2565f7be4f8bfe16b0835c4058972e7f26185cb3
|
f7e722a8d9aa53218316e857cb0a97b85391583b
|
/docs/conf.py
|
017bfbff0d0a8a7db26c8ee072ef0623d0ff4718
|
[
"MIT"
] |
permissive
|
algobot76/dekit
|
e84b355829eda9cce6b1123b16a251c78a20d5a9
|
37a25a570bc029e5d1f74953350deb387a8aaf25
|
refs/heads/master
| 2020-08-16T05:17:45.661679
| 2019-10-16T09:32:10
| 2019-10-16T09:32:10
| 215,459,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
import dekit
# Project
project = 'Dekit'
copyright = '2019 Kaitian Xie'
author = 'Kaitian Xie'
version = dekit.__version__
release = dekit.__version__
# General
master_doc = 'index'
extensions = ["sphinx.ext.autodoc"]
exclude_patterns = ['_build']
# HTML
html_theme = 'default'
html_static_path = ['_static']
html_title = f'Dekit Documentation ({version})'
html_show_sourcelink = False
# LaTex
latex_documents = [
(master_doc, f'dekit-{version}.tex', html_title, author, 'manual'),
]
|
[
"xkaitian@gmail.com"
] |
xkaitian@gmail.com
|
0a5a3be286540d32ce476c0bccfe369d8d4d3b4f
|
b29cb86bb3f10dd1df96f288c21b9f9af1bc0fe4
|
/learrn_redis.py
|
4ee0191b2bfad5c8683dce3b1858f1ae291646f2
|
[] |
no_license
|
ccs258/python_code
|
e1b54a9652f42fc60eb57f066d339acfe7c6ec31
|
825843f19733814f46e35729543383a6cdab6af8
|
refs/heads/master
| 2020-03-27T18:59:37.487428
| 2020-02-24T07:41:07
| 2020-02-24T07:41:07
| 146,958,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
# -*- coding: utf-8 -*-
# @Time : 19-1-23 下午11:12
# @Author : ccs
kwargs = {}
kwargs.update({
'connection_class': 'SSLConnection',
'ssl_keyfile': 'ssl_keyfile',
'ssl_certfile': 'ssl_certfile',
'ssl_cert_reqs': 'ssl_cert_reqs',
'ssl_ca_certs': 'ssl_ca_certs',
})
print(kwargs)
shard_hint = kwargs.pop('shard_hint', None)
connection_class = kwargs.pop('connection_class')
print(shard_hint) #结果为None
print(connection_class) #结果为SSLConnection
####不同的层抛出不同的异常
while response is False:
try:
if HIREDIS_USE_BYTE_BUFFER:
bufflen = recv_into(self._sock, self._buffer)
if bufflen == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
else:
buffer = recv(self._sock, socket_read_size)
# an empty string indicates the server shutdown the socket
if not isinstance(buffer, bytes) or len(buffer) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket: %s" %
(e.args,))
#redis协议存取数据操作;跟数据库类比;
#建立连接,连接池操作,删除连接;
|
[
"1154911548@qq.com"
] |
1154911548@qq.com
|
16b78aa288d25076c0b793601b11a5a7595dbaeb
|
ee426299951e9d9c7f997690db8efb92f55552cc
|
/hexeditor/read_write_functions.py
|
c7956846bf7540d139956b310351df5cff3c8f99
|
[] |
no_license
|
MemeDealer5000/Hex
|
6d599f0638498c1e1570c41babba8271f6a204a6
|
2e5814d59ee4b4ab085fd74606265451054230cc
|
refs/heads/master
| 2020-12-27T13:36:15.136199
| 2020-02-03T08:46:10
| 2020-02-03T08:46:10
| 237,920,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,201
|
py
|
import sys
import curses
import os
def read_bytes(file_path):
bytes_arr = None
with open(file_path, 'rb') as f:
bytes_arr = bytes(f.read())
print(bytes_arr)
return bytes_arr
def write_bytes(file_path, bytes_arr):
with open(file_path, 'wb') as f:
f.write(bytes_arr)
def read_data(file_path):
if not os.path.isfile(file_path):
raise AssertionError()
print("OK")
bytes_arr = read_bytes(file_path)
data = [ b for b in bytes_arr ]
return data
def write_data(file_path, data):
write_bytes(file_path, "".join([ chr(b) for b in data ]))
def join_bytes(data, byte_count, little_endian=True):
while len(data) < byte_count:
data.append(0)
if little_endian:
return sum([ data[i] << 8 * i for i in range(0, byte_count) ])
else:
result = 0
for i in range(0, byte_count):
result = (result << 8) | data[i]
return result
def split_bytes(data, byte_count, little_endian=True):
if little_endian:
return [ (data >> i * 8) & 0xff for i in range(0, byte_count) ]
else:
return [ (data >> i * 8) & 0xff for i in range(byte_count - 1, -1, -1) ]
def rep_data(data, byte_count):
return ('%0' + str(byte_count * 2) + 'X') % data
|
[
"noreply@github.com"
] |
MemeDealer5000.noreply@github.com
|
ea0f332a7caabaa77840c25f9acd4415d2b94b9e
|
8b4394811f6e6929321f28b3127c0ff222018df6
|
/polls/urls.py
|
c63d528c0f3b4e626213972bb91f37174736efc2
|
[] |
no_license
|
akshay-testpress/pollsWebsite
|
d654e16260926db64deba24c9280392508be20c6
|
cdcf6eb1b3a797ace8658fd280793bd6a963598c
|
refs/heads/master
| 2020-04-01T16:57:30.608857
| 2018-10-17T11:01:56
| 2018-10-17T11:01:56
| 153,406,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 111
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url("",views.index, name="index"),
]
|
[
"akshaykumar@testpress.in"
] |
akshaykumar@testpress.in
|
d7a081e916149815ac3db674dd1b085dabded240
|
88399431f86dea162b8d67fc14a2b84613f88197
|
/data/split_data.py
|
1ba72cf4e790d67a947f63ec36aa05256bf30dc7
|
[] |
no_license
|
hugcis/cyk-parser
|
0e92ebf02400f60600db7cc6a67af735bd182169
|
fa630cd73ddbace0a1319063a8a4ee317633e50d
|
refs/heads/master
| 2020-04-29T11:12:05.231835
| 2019-03-17T17:34:07
| 2019-03-17T17:34:07
| 176,089,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
import numpy as np
np.random.seed(333)
f = open('sequoia-corpus+fct.mrg_strict')
all_lines = np.array(f.read().split('\n'))
index = np.arange(len(all_lines))
np.random.shuffle(index)
train_set = all_lines[index[:int(0.8 * len(index))]]
val_set = all_lines[index[int(0.8 * len(index)):int(0.9 * len(index))]]
test_set = all_lines[index[int(0.9 * len(index)):]]
with open('train_data.mrg_strict', 'w') as f:
f.write('\n'.join(train_set))
with open('val_data.mrg_strict', 'w') as f:
f.write('\n'.join(val_set))
with open('test_data.mrg_strict', 'w') as f:
f.write('\n'.join(test_set))
|
[
"hmj.cisneros@gmail.com"
] |
hmj.cisneros@gmail.com
|
ffba9b8f6d3f6df467973fb518836842e53c1563
|
13a4777d8e4fe38f0e70ba5014cc39030c5f9208
|
/466HW1.py
|
761efc5112ec511c3c47fafda236cc407c274191
|
[] |
no_license
|
MAT466Abbas/Homework1
|
72e684ea3df87ed62fb7fe146bb326d78263b30b
|
2c8d145d64bcf7dc4a3c81a00fcb88e91ffed4ca
|
refs/heads/master
| 2020-12-27T15:03:13.476993
| 2020-02-03T11:04:02
| 2020-02-03T11:04:02
| 237,945,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,666
|
py
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from math import *
#################################Yield-to-Maturity############################################
#dictionary in which keys are start day numbers and values are ytm arrays
#one dictionary for each curve we want to make
Y_Arrays = {}
#split 5 years into 6 month periods
T = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0])
#yield values (one array for each start day) corresponding the time to maturity value
#array T above
y2 = np.array([0.009, 0.0073, 0.0076, 0.0072, 0.0075, 0.0072, 0.0072, 0.0071, 0.0079, 0.0071])
y3 = np.array([0.009, 0.0072, 0.0075, 0.0071, 0.0074, 0.007, 0.0071, 0.007, 0.0077, 0.007])
y6 = np.array([0.009, 0.0071, 0.0073, 0.007, 0.0073, 0.007, 0.007, 0.0068, 0.0073, 0.0068])
y7 = np.array([0.009, 0.0071, 0.0074, 0.0071, 0.0074, 0.007, 0.0071, 0.007, 0.0075, 0.0069])
y8 = np.array([0.009, 0.0072, 0.0074, 0.0071, 0.0074, 0.007, 0.0071, 0.007, 0.0074, 0.0069])
y9 = np.array([0.009, 0.0073, 0.0076, 0.0072, 0.0075, 0.0071, 0.0072, 0.007, 0.0076, 0.0071])
y10 = np.array([0.009, 0.0075, 0.0076, 0.0072, 0.0075, 0.0072, 0.0072, 0.0071, 0.0076, 0.0071])
y13 = np.array([0.009, 0.0074, 0.0076, 0.0073, 0.0076, 0.0073, 0.0073, 0.0071, 0.0076, 0.0071])
y14 = np.array([0.01, 0.0075, 0.0076, 0.0073, 0.0076, 0.0072, 0.0072, 0.007, 0.0076, 0.007])
y15 = np.array([0.01, 0.0074, 0.0075, 0.0072, 0.0076, 0.0071, 0.0071, 0.007, 0.0075, 0.007])
#iterate through the days
for day in range(2,16):
if (day==4 or day==5 or day==11 or day==12):
continue
#6 month yields for this day
yields = eval("y"+str(day))
#we wil have a time array for each 6 month period. The corresponding yield
#array will be obtained by linearly interpolating between the yield values
#at the endpoints of the time array. This will give us 10 pairs of time/yield
#arrays. They will be placed in the following lists.
t_arrays = []
y_arrays = []
for i in range(0,9):
t = np.zeros(10+1)
y = np.zeros(10+1)
t[0] = T[i]
t[-1] = T[i+1]
for k in range(1, 10):
t[k]=t[k-1]+((t[-1]-t[0])/10)
t_arrays.append(t)
y[0]=yields[i]
y[-1]=yields[i+1]
#slope
m = (y[-1]-y[0])/(t[-1]-t[0])
for k in range(1, 10):
y[k]=y[k-1]+m*((t[-1]-t[0])/10)
y_arrays.append(y)
#concatenate all arrays in t_arrays_ytm into 1 (same for y_arrays_atm) to get 5 year
#time and corresponding ytm arrays for this start day
for i in range(0,len(t_arrays)):
if (i==0):
T_day = t_arrays[i]
Y_day = y_arrays[i]
else:
T_day = np.concatenate([T_day, t_arrays[i]])
Y_day = np.concatenate([Y_day, y_arrays[i]])
#array that will hold Time-to-maturity, which will be the horizontal axis of all
#our plots (do this only once)
if (day==2):
T_Array = T_day
Y_Arrays[str(day)]=Y_day
#plot yield curves
plt.figure(1)
for day in range(2,16):
if (day==4 or day==5 or day==11 or day==12):
continue
plt.plot(T_Array, Y_Arrays[str(day)], label='Ytm curve for ' + str(day) + '-Jan-2020')
plt.legend(prop={"size":5})
plt.xlabel('Time to maturity (in years)')
plt.ylabel('Yield to maturity (in %)')
plt.title('Yield-to-Maturity vs Time-to-Maturity curve')
##########################################################################################
############################### Forward Rate Curve########################################
#define a list of forward rate matrices for each day (this will be useful later)
f_rates = []
plt.figure(2)
for day in range(2,16):
if (day==4 or day==5 or day==11 or day==12):
continue
#the index for 1 year (and its corresponding ytm) is 11. Here we cut out the
#first year and its correspoding yields. These will be used for 1 year
#forward rate calculation
yields = Y_Arrays[str(day)][12:]
time = T_Array[12:]
#if (day==2):
# print time[0], time[20], time[42], time[65], time[86]
#yield at one year
yield_1=Y_Arrays[str(day)][11]
#apply forward rate formula rate formula
forward_rate=(time*yields-yield_1)/(time-1)
f_rates.append(forward_rate)
plt.plot(time, forward_rate, label='Forward Rate for ' + str(day) + '-Jan-2020')
plt.legend(loc='upper center', prop={"size":5})
plt.xlabel('Time (in years)')
plt.ylabel('Forward rate (in %)')
plt.title('1-Year Forward rate curve')
#########################################################################################
#############################Covariance Matrices#########################################
#let us begin by defining the matrix whose elements are r_{i,j}, as described
#in Q5 in the report. We do this manually
R = np.array([[0.0073,0.0072,0.0071,0.0071,0.0072,0.0073,0.0075,0.0074,0.0075,0.0074]
,[0.0072,0.0071,0.007,0.0071,0.0071,0.0072,0.0072,0.0073,0.0073,0.0072]
,[0.0072,0.007,0.007,0.007,0.007,0.0071,0.0072,0.0073,0.0072,0.0071]
,[0.0071,0.007,0.068,0.007,0.007,0.007,0.0071,0.0071,0.007,0.007]
,[0.0071,0.007,0.068,0.069,0.069,0.0071,0.0071,0.0071,0.007,0.007]])
#We now compute matrix X, whose elements are X_{i,j}, as defined in Q5
X =np.zeros((5,9))
for i in range(0,5):
for j in range(0,9):
X[i][j]=np.log(R[i][j+1]/R[i][j])
cov_X = np.cov(X)
#we now to the same thing with the one year forward rate
#we define the matrix F, whose elements are f_{i,j}, as defined in Q5
F=np.zeros((5,10))
#In each forward rate matrix (for each start day), looking at the 1-st, 21st,
#43rd,66th, and 87th elements gives us the 1-1, 1-2, 1-3, 1-4 and 1-5 forward
#rates (these were found by visually inspecting arrays in the code). This is
#the colum of F corresponding to that day.
for j in range(0, len(f_rates)):
T=f_rates[j]
F[:,j]=np.array([T[0], T[20], T[42], T[65], T[86]])
#We now compute matrix Y, whose elements are Y_{i,j}, as defined in Q5
Y =np.zeros((5,9))
for i in range(0,5):
for j in range(0,9):
Y[i][j]=np.log(F[i][j+1]/F[i][j])
cov_Y=np.cov(Y)
|
[
"noreply@github.com"
] |
MAT466Abbas.noreply@github.com
|
8f1fa0b820a4000eeae6b856c1afb201dd2649df
|
3ec86b5ae807f3e7d50e7e68a617cb1673e2f209
|
/utils/__init__.py
|
ecdc10742edce6fc66fd0a231a33a25c4debf20b
|
[] |
no_license
|
xiaohanzai/Illustris
|
6b9a408efed0168b20d62f8a22689e6e38890412
|
8ef952dbc7670fa3f8f2ed3321f5af3b9ce137f7
|
refs/heads/master
| 2021-09-06T04:55:32.398321
| 2018-02-02T14:09:51
| 2018-02-02T14:09:51
| 115,231,116
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
__all__ = ['equalNumBin', 'paths', 'util_general', 'util_illustris', 'util_shape']
from . import *
|
[
"wuxiaohan.a@gmail.com"
] |
wuxiaohan.a@gmail.com
|
2ca5bebe5e2ebd2611e4c3dfa88d9e2dffeb823d
|
b95e6288a91bd620b8c8be8641ca9b4c2460ae33
|
/src/Producer.py
|
9fa776535c9deebfa3520cb751fb7e5c404f4977
|
[] |
no_license
|
pkmoore/rrapper
|
dcebc52eb600a01ea92611d96785bfd53b41b799
|
e0ef7b8a735f536baa07093c0ecf339c21f7bc43
|
refs/heads/master
| 2021-04-12T08:15:53.167169
| 2019-10-01T05:23:56
| 2019-10-01T05:23:56
| 125,935,155
| 4
| 10
| null | 2020-05-24T20:56:06
| 2018-03-19T23:51:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,253
|
py
|
"""
<Program Name>
Producer
<Started>
July 2019
<Author>
Junjie Ge
<Purpose>
Instance of posix_omni_parser that poputales the syscall_object and trace in
trace manager
"""
import logging
from posix_omni_parser.parsers.StraceParser import StraceParser
import consts
import exceptions
class Producer:
def __init__(self, trace_file, pickle_file, tm):
# location of tracefile
self.tracefile = trace_file
self.trace_manager = tm
self.parser = StraceParser(self.tracefile, pickle_file)
def produce(self, thread_condition, backlog_size=None):
"""
<Purpose>
This method parses and adds syscalls to the list of syscalls in
TraceManager. It works like a sliding window that constantly updates the
list depending on where the slowest mutator is in that list
<Returns>
None
"""
if not backlog_size:
backlog_size = consts.BACKLOG_SIZE
with open(self.tracefile, 'r') as fh:
# Finding and ignoring everything before 'syscall_'
while True:
try:
trace_line = fh.next()
except StopIteration:
logging.debug("Incomplete Trace. Trace ended without 'syscall_'")
raise exceptions.ProducerError('Syscall_ not found in Trace. Incompletet Trace?')
syscall = self.parser.parse_line(trace_line)
if syscall and 'syscall_' in syscall.name:
break
# Adding an initial amount of traces to the list so the indexes of mutators
# can advance. This is requried otherwise mutator will have no trace to
# identify and the next loop will be can infinite loop due to the
# if backtrace_limit > 0
for i in range(0, backlog_size * 4, 2):
with thread_condition:
try:
event_line = fh.next()
trace_line = fh.next()
except StopIteration:
self.trace_manager.producer_done()
thread_condition.notify_all()
return
syscall = self.parser.parse_line(trace_line)
self.trace_manager.syscall_objects.append(syscall)
self.trace_manager.trace.append((event_line, trace_line))
thread_condition.notify()
# This part is the updating window. It deletes everything before the
# backlog of the slowest mutator and adds the same number of syscalls to
# the end
while True:
with thread_condition:
backtrace_limit = self.trace_manager.mutators[0]['index'] - backlog_size
for mutator in self.trace_manager.mutators:
if mutator['index'] - backlog_size < backtrace_limit:
backtrace_limit = mutator['index'] - backlog_size
if backtrace_limit > 0:
for i in range(backtrace_limit):
self.trace_manager.pop_front()
try:
event_line = fh.next()
trace_line = fh.next()
except StopIteration:
self.trace_manager.producer_done()
thread_condition.notify_all()
return
self.trace_manager.syscall_objects.append(self.parser.parse_line(trace_line))
self.trace_manager.trace.append((event_line, trace_line))
thread_condition.notify()
|
[
"prestonkmoore@pm.me"
] |
prestonkmoore@pm.me
|
b1e1bfe13e233dcfc6301cebbc179e883ccb6a3c
|
286d1b73634a5f0169b2e66501a3ba25a87cf921
|
/baidu/utils/files_provider.py
|
9a61536769c3722eefd558be416406a87a2861a0
|
[
"Apache-2.0"
] |
permissive
|
amatkivskiy/baidu
|
63497dbda1add3deecf366cafd98f2ffd9571953
|
d599f41fa3fda3428c38a0d05661aaac152806da
|
refs/heads/master
| 2016-09-05T20:02:00.864937
| 2015-04-22T13:56:47
| 2015-04-22T13:56:47
| 32,919,676
| 2
| 1
| null | 2015-07-29T16:16:38
| 2015-03-26T09:58:16
|
Python
|
UTF-8
|
Python
| false
| false
| 492
|
py
|
from string import Template
__author__ = 'maa'
templates_folder = 'file_templates_folder\\'
def create_and_full_fill_file(template_file_name, destination_file_path, kwargs):
template_file = open(template_file_name, 'r')
file_content = template_file.read()
template_file.close()
template = Template(file_content)
final_content = template.substitute(kwargs)
final_file = open(destination_file_path, 'w')
final_file.write(final_content)
final_file.close()
|
[
"andriy.matkivskiy@gmail.com"
] |
andriy.matkivskiy@gmail.com
|
2244bcc7b98578156696e4b908e8c3edd4248bac
|
7a6013e19a2e963f537c381e11a916d6e0f096b0
|
/app/app/settings.py
|
92a237b50b06c0ef8d12c321b37341bf785c91bc
|
[
"MIT"
] |
permissive
|
camilalemos/todo-challenge
|
23dc3e5a99bfa10d5ba763e6d3b78903a4db1e19
|
2743a08b344284f39c9f8ff5724f673e7f720919
|
refs/heads/main
| 2023-09-01T19:17:08.920598
| 2021-10-22T20:13:14
| 2021-10-22T20:13:14
| 417,930,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,286
|
py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-1=9y0=j$0n%kdt8t^$1j!*&+qa1^1a-*692lz7n6^^4m-&7wtt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ToDoList.apps.TodolistConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Argentina/Cordoba'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"cami.sole.lemos@gmail.com"
] |
cami.sole.lemos@gmail.com
|
7b5d355f6902bab25c1b13cf7ee0184dccc9a694
|
c6f936e59ec6c2ca3b84e540188013c040c91d9d
|
/07 project Euler/Problem 9.py
|
96d88274ef961bdce7a07ba0888f2f4452d3f85e
|
[] |
no_license
|
KubaBBB/Programming-Challenges
|
4f78923f8ed72950ee5e1867de26457c4d00751f
|
228572ecaf97d2f540f32648a8cbbf25bad6ce60
|
refs/heads/master
| 2021-01-19T17:27:28.374178
| 2017-09-05T11:42:23
| 2017-09-05T11:42:23
| 101,059,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
# a< b <c
# a^2 + b^2 = c^2
# c = 1000 - a - b
#
# find a*b*c
#
def func():
global c
stat = 1000
for a in range(1,stat +1 ):
for b in range(1,stat+1):
c = stat - a - b
if c**2 == a**2 + b**2:
print("a: " + str(a) +" b: " +str(b)+" c: "+str(c))
print(str(a * b * c))
if __name__ == '__main__':
func()
|
[
"jakubboliglowa1996@gmail.com"
] |
jakubboliglowa1996@gmail.com
|
40aa58f318a778e39072fb25a804f963f1d01eaf
|
29fdcdde4aae7502d8170cc44f4af635fbcce5b5
|
/Python_2021/asyncio_4.py
|
24cd8a9eaea87e02d16ac7ce77f1b7eca07f7f12
|
[] |
no_license
|
shivam0071/exploringPython
|
bcbd0d03f8f473077d3777d1396ef4638e5c6cee
|
ace9646e59ba20068e383704430efe4946833090
|
refs/heads/master
| 2022-05-05T00:34:24.642204
| 2022-04-01T06:13:38
| 2022-04-01T06:13:38
| 81,352,255
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
#!/usr/bin/env python3
# rand.py
import asyncio
import random
# ANSI colors
c = (
"\033[0m", # End of color
"\033[36m", # Cyan
"\033[91m", # Red
"\033[35m", # Magenta
)
async def makerandom(idx: int, threshold: int = 6) -> int:
print(c[idx + 1] + f"Initiated makerandom({idx}).")
i = random.randint(0, 10)
while i <= threshold:
print(c[idx + 1] + f"makerandom({idx}) == {i} too low; retrying.")
await asyncio.sleep(idx + 1)
i = random.randint(0, 10)
print(c[idx + 1] + f"---> Finished: makerandom({idx}) == {i}" + c[0])
return i
async def main():
res = await asyncio.gather(*(makerandom(i, 10 - i - 1) for i in range(3)))
return res
if __name__ == "__main__":
random.seed(444)
r1, r2, r3 = asyncio.run(main())
print()
print(f"r1: {r1}, r2: {r2}, r3: {r3}")
|
[
"shivamkoundal01@gmail.com"
] |
shivamkoundal01@gmail.com
|
227a97642cadf2fcd73851b1c2a4d5961068848c
|
29d9f0cf22a6a750a281f8922903a0742cae6d64
|
/mysite/settings.py
|
045879020c3512d8a3d0b2386269778960267ea5
|
[] |
no_license
|
NadzeyaBobrovnik/djangog
|
57741a559868c1a4020e5564780c5c714d0f76a4
|
68c1bc839bd2e03fe85067c7ac51bdbcb075e4ba
|
refs/heads/master
| 2020-07-29T19:53:27.917103
| 2019-09-21T15:59:52
| 2019-09-21T15:59:52
| 209,940,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,210
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'aznckgv!&b$2in%1^^qq3wjeh^&y81(k9w%c1v#952a=w+p7iu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"nadzeya.bobrovnik@gmail.com"
] |
nadzeya.bobrovnik@gmail.com
|
20f58429c97b7fdf709f27f84e6940302d752323
|
da01f5d47294fe9553d8dfbd0c5403bf3f46d22a
|
/final.py
|
755a9d3aae989d8e9532d94b4b60a7d424933db1
|
[] |
no_license
|
abdullahbm09/Blockchain
|
97e5c7d0bb0738938a5fcb37e064f578878b94b2
|
f7e6959583e4ae0a2ee9c6925142fcbdbc9d8214
|
refs/heads/master
| 2020-04-08T01:58:45.302024
| 2018-11-24T08:34:03
| 2018-11-24T08:34:03
| 158,917,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,975
|
py
|
# associated medium post: https://medium.com/@ethervolution/ethereum-create-raw-json-rpc-requests-with-python-for-deploying-and-transacting-with-a-smart-7ceafd6790d9
''' =========================== Defining Libraries ============================ '''
import requests
import json
import web3 # Release 4.0.0-beta.8
import pprint
import time
import serial
''' =========================== INTIALIZING AND DEFINING VARIABLES ============================ '''
# create persistent HTTP connection
session = requests.Session()
w3 = web3.Web3()
pp = pprint.PrettyPrinter(indent=2)
requestId = 0 # is automatically incremented at each request
URL = 'http://localhost:8501' # url of my geth node
PATH_GENESIS = '/home/abdullah/Downloads/devnet/genesis.json'
PATH_SC_TRUFFLE = '/home/abdullah/Downloads/devnet/workspace/' # smart contract path
# extracting data from the genesis file
genesisFile = json.load(open(PATH_GENESIS))
CHAINID = genesisFile['config']['chainId']
PERIOD = genesisFile['config']['clique']['period']
GASLIMIT = int(genesisFile['gasLimit'],0)
# compile your smart contract with truffle first
truffleFile = json.load(open(PATH_SC_TRUFFLE + '/build/contracts/AdditionContract.json'))
abi = truffleFile['abi']
bytecode = truffleFile['bytecode']
# Don't share your private key !
myAddress = '0x29a86118C1Ff89d474E9497D8B3FA890D9F7e30C' # address funded in genesis file
myPrivateKey = '0xff8c4769d2e1d6f7bee613c422a1f9243f189bf5f9764c15b19c6439c0f56cd9'
#Addition CONTRACT ADDRESS
contractAddress1 = '0x5422d0aad3c977baeb95b7351f10919f8110c4bd'
arduinoWrite = serial.Serial("/dev/ttyACM0",9600)
arduinoRead = serial.Serial("/dev/ttyACM0" ,timeout=1)
data_raw=[]
value1=[]
''' =========================== SOME FUNCTIONS ============================ '''
# see http://www.jsonrpc.org/specification
# and https://github.com/ethereum/wiki/wiki/JSON-RPC
def createJSONRPCRequestObject(_method, _params, _requestId):
return {"jsonrpc":"2.0",
"method":_method,
"params":_params, # must be an array [value1, value2, ..., valueN]
"id":_requestId}, _requestId+1
def postJSONRPCRequestObject(_HTTPEnpoint, _jsonRPCRequestObject):
response = session.post(_HTTPEnpoint,
json=_jsonRPCRequestObject,
headers={'Content-type': 'application/json'})
return response.json()
# ARDUINO FUNCTIONS
def led_on():
arduinoWrite.write(b'1')
def led_off():
arduinoWrite.write(b'0')
#addition contract
contractAddress = w3.toChecksumAddress(contractAddress1)
''' ================= SEND A TRANSACTION TO SMART CONTRACT ================'''
while True:
### get your nonce
requestObject, requestId = createJSONRPCRequestObject('eth_getTransactionCount', [myAddress, 'latest'], requestId)
responseObject = postJSONRPCRequestObject(URL, requestObject)
myNonce = w3.toInt(hexstr=responseObject['result'])
print('nonce of address {} is {}'.format(myAddress, myNonce))
#Reading Serial Data
print("Incoming Serial Data for Temperation Sensor")
print(arduinoRead.readline())
### prepare the data field of the transaction
# function selector and argument encoding
# https://solidity.readthedocs.io/en/develop/abi-spec.html#function-selector-and-argument-encoding
value1= arduinoRead.readline() # data from sensor is in contract
function = 'add(uint256)' # from smart contract
methodId = w3.sha3(text=function)[0:4].hex()
param1 = (value1).to_bytes(32, byteorder='big').hex()
data = '0x' + methodId + param1
transaction_dict = {'from':myAddress,
'to':contractAddress,
'chainId':CHAINID,
'gasPrice':1, # careful with gas price, gas price below the threshold defined in the node config will cause all sorts of issues (tx not bieng broadcasted for example)
'gas':2000000, # rule of thumb / guess work
'nonce':myNonce,
'data':data}
### sign the transaction
signed_transaction_dict = w3.eth.account.signTransaction(transaction_dict, myPrivateKey)
params = [signed_transaction_dict.rawTransaction.hex()]
### send the transacton to your node
print('executing {} with value {}'.format(function, value1))
requestObject, requestId = createJSONRPCRequestObject('eth_sendRawTransaction', params, requestId)
responseObject = postJSONRPCRequestObject(URL, requestObject)
transactionHash = responseObject['result']
print('transaction hash {}'.format(transactionHash))
### wait for the transaction to be mined
while(True):
requestObject, requestId = createJSONRPCRequestObject('eth_getTransactionReceipt', [transactionHash], requestId)
responseObject = postJSONRPCRequestObject(URL, requestObject)
receipt = responseObject['result']
if(receipt is not None):
if(receipt['status'] == '0x1'):
print('transaction successfully mined')
else:
pp.pprint(responseObject)
raise ValueError('transacation status is "0x0", failed to deploy contract. Check gas, gasPrice first')
break
time.sleep(PERIOD/10)
''' ============= READ YOUR SMART CONTRACT STATE USING GETTER =============='''
# we don't need a nonce since this does not create a transaction but only ask
# our node to read it's local database
### prepare the data field of the transaction
# function selector and argument encoding
# https://solidity.readthedocs.io/en/develop/abi-spec.html#function-selector-and-argument-encoding
# state is declared as public in the smart contract. This creates a getter function
methodId = w3.sha3(text='state()')[0:4].hex()
data = '0x' + methodId
transaction_dict = {'from':myAddress,
'to':contractAddress,
'chainId':CHAINID,
'data':data}
params = [transaction_dict, 'latest']
requestObject, requestId = createJSONRPCRequestObject('eth_call', params, requestId)
responseObject = postJSONRPCRequestObject(URL, requestObject)
state = w3.toInt(hexstr=responseObject['result'])
print('using getter for public variables: result is {}'.format(state))
''' ============= READ YOUR SMART CONTRACT STATE GET FUNCTIONS =============='''
# we don't need a nonce since this does not create a transaction but only ask
# our node to read it's local database
### prepare the data field of the transaction
# function selector and argument encoding
# https://solidity.readthedocs.io/en/develop/abi-spec.html#function-selector-and-argument-encoding
# state is declared as public in the smart contract. This creates a getter function
methodId = w3.sha3(text='getState()')[0:4].hex()
data = '0x' + methodId
transaction_dict = {'from':myAddress,
'to':contractAddress,
'chainId':CHAINID,
'data':data}
params = [transaction_dict, 'latest']
requestObject, requestId = createJSONRPCRequestObject('eth_call', params, requestId)
responseObject = postJSONRPCRequestObject(URL, requestObject)
state = w3.toInt(hexstr=responseObject['result'])
print('using getState() function: result is {}'.format(state))
''' prints
nonce of address 0xF464A67CA59606f0fFE159092FF2F474d69FD675 is 4
contract submission hash 0x64fc8ce5cbb5cf822674b88b52563e89f9e98132691a4d838ebe091604215b25
newly deployed contract at address 0x7e99eaa36bedba49a7f0ea4096ab2717b40d3787
nonce of address 0xF464A67CA59606f0fFE159092FF2F474d69FD675 is 5
executing add(uint256,uint256) with value 10,32
transaction hash 0xcbe3883db957cf3b643567c078081343c0cbd1fdd669320d9de9d05125168926
transaction successfully mined
using getter for public variables: result is 42
using getState() function: result is 42
'''
|
[
"abdullahbm09@gmail.com"
] |
abdullahbm09@gmail.com
|
cd184fc6a59c48bcbf6c19506b222408dba49b47
|
2c0f7ac97dc004ac36ca65661c0bec862647508e
|
/financepeer/asgi.py
|
8b301e0f91b67cc7457726f9356455efd3f7b951
|
[] |
no_license
|
mit1911/FINANCEPEER
|
c2082cdb5929b3e6aae94017a9af547f0a5745c1
|
28d1507f978f7bdb50013acc5aeea7f0c1d31aa2
|
refs/heads/main
| 2023-07-31T05:14:28.768325
| 2021-09-21T12:14:24
| 2021-09-21T12:14:24
| 408,806,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
ASGI config for financepeer project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'financepeer.settings')
application = get_asgi_application()
|
[
"pk07788131@gmail.com"
] |
pk07788131@gmail.com
|
25a1970c77d8d6905b534f80bede9ab6ccccbd68
|
a30bf0899947c36b63bba3032935c404b4bde16e
|
/ws.py
|
97870589f6322afd24c15f833dee4d8c22705f5e
|
[] |
no_license
|
Riyo-Aloshyas/webscraping-python
|
a592a5ed996bca28e3c4a5e5e5c47cb7464e980c
|
fcab77ae3ddba96f6d6ce604e3ee02c27bf52d3e
|
refs/heads/master
| 2022-12-31T00:23:05.578302
| 2020-09-24T22:35:01
| 2020-09-24T22:35:01
| 278,694,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,536
|
py
|
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import re
my_url = 'https://www.newegg.com/p/pl?N=100252375&ActiveSearchResult=True&SrchInDesc=pokemon'
#opens connection & grabs html page
uClient = uReq(my_url)
#offloads webpage content into variable
page_html = uClient.read()
uClient.close()
#html parsing
page_soup= soup(page_html, "html.parser")
#page_soup.h1
#page_soup.p
#page_soup.body.span
#grabs each product
containers = page_soup.findAll("div", {"class": {"item-container"}})
#gets num of products
# len(containers)
#view html of nth product
contain = containers[0]
container = containers[0]
#container.find("div","item-info").div
#filename = "products.csv"
#f = open(filename, "w")
#headers = "brand, product_name, price\n"
#f.write(headers)
for container in containers:
# grabs product's company name
###### ADD IF/ELSE FOR VALIDATION
brand = container.find("div","item-info").div.a.img["title"]
title_container = container.findAll("a",{"class":"item-title"})
#grabs product name
product_name = title_container[0].text
str = container.findAll("li",{"class":"price-current"})[0].text
str_nums = re.findall(r"[-+]?\d*\.\d+|\d+", str)
#grabs product price
product_price = str_nums[0]
print("brand: " + brand)
print("product name: " + product_name)
print("product price: " + product_price)
# f.write(brand + "," + product_name.replace(",","|") + "," + product_price + "\n")
f.close()
|
[
"noreply@github.com"
] |
Riyo-Aloshyas.noreply@github.com
|
3d970c89462f7c18de8e742bb65e55b75f2dfb72
|
a5d943cab88d1e3f65b5706d12581c2998d40247
|
/tests/test_mas.py
|
012e430c3ba978a84746014689afe780163ebaf9
|
[
"Apache-2.0"
] |
permissive
|
copdai/copdai_core
|
bf6654db7e115dd2d225ad584609fd3cb2680222
|
d493bcb8ba34b2e98799c8ab4499177192e7080a
|
refs/heads/master
| 2021-01-01T17:17:50.281482
| 2017-12-18T07:57:09
| 2017-12-18T07:57:09
| 98,038,976
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
# -*- coding: utf-8 -*-
from .context import mas
import unittest
class MASTestSuite(unittest.TestCase):
"""COPDAI core test cases."""
def setUp(self):
self.agent = mas.Agent()
def test_agent_default_state(self):
self.assertEqual(self.agent.state, mas.AgentState.UNKNOWN)
def tearDown(self):
self.agent = None
if __name__ == '__main__':
unittest.main()
|
[
"mrabbah@gmail.com"
] |
mrabbah@gmail.com
|
cb9e250d22b73d85785aec488fb18895709a4295
|
f1689b52221d1118f905b841448c2ab1fd142817
|
/school/ningxia/NUN.py
|
8421459e218c190a57a36e4dd5a24b7fb3bd6ed8
|
[] |
no_license
|
littlemoon13/jiandan_job
|
950584ca62f431f10bd10a12a33bdffecbba2453
|
2232a8fb7559cb5180130c09345af5b2398eb9fc
|
refs/heads/master
| 2020-05-25T10:34:08.891561
| 2019-04-20T15:34:49
| 2019-04-20T15:34:49
| 187,762,485
| 1
| 0
| null | 2019-05-21T04:37:56
| 2019-05-21T04:37:55
| null |
UTF-8
|
Python
| false
| false
| 961
|
py
|
import tomxin.getInfo
def getNUN():
#北方民族大学
# if __name__ == '__main__':
url = "http://jyc.nun.edu.cn/eweb/jygl/zpfw.so?modcode=jygl_scfwzpxx&subsyscode=zpfw&type=searchZprd&sysType=TPZPFW&zpxxType=new"#高校就业网的网址
html = tomxin.getInfo.get_source(url,"utf-8")
info = tomxin.getInfo.get_info(html,'z_newsl','z_pages')
title = tomxin.getInfo.get_content(info,'viewZpxx.+?>','</a>')
url = tomxin.getInfo.get_content(info,"viewZpxx.+?'","'")
i=0
for u in url[:5]:
r_city="宁夏"
r_school="北方民族大学"
r_title=title[i]
r_trait = "NUN" + u#这里要自己写提取规则
r_url = "http://jyc.nun.edu.cn/eweb/jygl/zpfw.so?modcode=jygl_scfwzpxx&subsyscode=zpfw&type=viewZpxx&id=" + u
i += 1
r_content = tomxin.getInfo.get_url_content(r_url, "utf-8", '<div class="z_content">', '<script type="text/javascript')
print(r_title + "\n" + r_url)
|
[
"221360@nd.com"
] |
221360@nd.com
|
641de574211cdde44de54f9fa8765f1e1ac583fd
|
5d070e9cabaef1ac4ebdc30f9f855f94b49fdb0c
|
/PyShop/settings.py
|
578ea3b4276649273d86d94046c22621de96c660
|
[] |
no_license
|
Suruchi21/PyShop-Django
|
97e770ca55026cba87a63272e487289ab03f04d0
|
4411c05cf1b1744ed08a261a3b3e5a1ef6a251c9
|
refs/heads/main
| 2023-03-03T14:22:06.521542
| 2021-02-11T09:53:48
| 2021-02-11T09:53:48
| 337,985,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,252
|
py
|
"""
Django settings for PyShop project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x+c30$m#ihu)hj6w6^aj5aws64hxwz=r4-=p&wwbh@j@)y8jce'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'products.apps.ProductsConfig',
'accounts.apps.AccountsConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'PyShop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'PyShop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'Pyshop',
'USER': 'postgres',
'PASSWORD': '1234',
'HOST': 'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"suruchicsk@gmail.com"
] |
suruchicsk@gmail.com
|
b53d5dc4e19e7d59c999ae84949a65d79c3ea6c2
|
9b4fc226a1c2ade532f4e1e7a8cd588d77b81093
|
/SVMs/square.py
|
49ba82ec02e7fb49e55c08e6624336c22cab0b06
|
[
"MIT"
] |
permissive
|
wmaciel/flowerSpotter
|
1af29e4bcb4931120ff1ad437ff3c299fa2f2482
|
1f6223f5ab2b0956ea7c576e5d8aa8b0f32281f9
|
refs/heads/master
| 2021-01-12T19:24:49.084516
| 2015-12-06T19:31:11
| 2015-12-06T19:31:11
| 46,642,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,421
|
py
|
from __future__ import division
import math
import sys, glob, os
import Image
top_folder = sys.argv[1]
side_length = 256
input_image_dir = top_folder + '/jpg'
output_image_dir = top_folder + '/square_images'
#output_square_dir = 'square_images128'
if not os.path.isdir(output_image_dir):
os.mkdir(output_image_dir)
for infile in glob.glob(input_image_dir + '/*.jpg'):
filename, ext = os.path.splitext(infile)
basename = os.path.basename(filename)
im = Image.open(infile)
[width, height] = im.size
if width < height:
ratio = height / width
new_width = side_length
new_height = int(math.ceil(ratio * new_width))
else:
ratio = width / height
new_height = side_length
new_width = int(math.ceil(ratio * new_height))
resized_im = im.resize((new_width, new_height), Image.ANTIALIAS)
if new_width != side_length:
left_bound = int((new_width - side_length) / 2)
upper_bound = 0
right_bound = int((new_width + side_length) / 2)
lower_bound = side_length
else:
left_bound = 0
upper_bound = int((new_height - side_length) / 2)
right_bound = side_length
lower_bound = int((new_height + side_length) / 2)
cropped_im = resized_im.crop((left_bound, upper_bound, right_bound, lower_bound))
cropped_im.save(output_image_dir + '/' + basename + '_square.jpg', 'JPEG')
|
[
"jdsarr@gmail.com"
] |
jdsarr@gmail.com
|
b7f54e96c4d3f8ae8c832c922c307e2c760362cc
|
a128963cfbb945a45e8136e55a5f4945e09bc7e8
|
/doppio.py
|
8b9bcb4c474946bbb41248ea5a7c932201922feb
|
[] |
no_license
|
Edivad1234/helloword
|
bf90c83febb69cbafe8a793cc176a14d5312336e
|
74f71312a958d14800ff2d4b3c4d801b6863066a
|
refs/heads/master
| 2020-07-03T18:48:10.887470
| 2017-01-13T10:52:55
| 2017-01-13T10:52:55
| 74,238,229
| 0
| 0
| null | 2016-11-19T22:12:00
| 2016-11-19T22:04:47
| null |
UTF-8
|
Python
| false
| false
| 82
|
py
|
numero = input("Inserisci un numero: ")
print ("Il doppio e': "), numero + numero
|
[
"davide.antonucci5@gmail.com"
] |
davide.antonucci5@gmail.com
|
9fe946f4c2a1bb91c966856d49f60d1bf22614bb
|
f83a0bad55662d9214729234cff97d5d20823d37
|
/setup.py
|
9875f4bb030a428dfaa1528170bd3c71b2e5c18e
|
[
"MIT"
] |
permissive
|
wriazati/CarND-LaneLines-P1
|
be137c811ddadbebea7759e3d91d96b6f582ce94
|
37d9ba53e1ba2acc28795c87cb637d32898dded6
|
refs/heads/master
| 2020-03-26T01:27:31.611829
| 2018-08-11T08:36:10
| 2018-08-11T08:36:10
| 144,368,075
| 0
| 0
|
MIT
| 2018-08-11T08:18:10
| 2018-08-11T08:18:10
| null |
UTF-8
|
Python
| false
| false
| 1,221
|
py
|
#! /usr/bin/env python
from os.path import dirname, realpath, join
from setuptools import setup, find_packages
####
# Basic metadata.
####
project_name = 'CarND-LaneLines-P1'
package_name = project_name.replace('-', '_')
repo_name = project_name
description = 'CarND-LaneLines-P1'
url = 'https://github.com/wriazati/' + repo_name
author = 'wriazati'
author_email = author + '@yahoo.com'
####
# Requirements.
####
reqs = [
# Our packages.
'ipython',
'numpy',
'matplotlib',
'scipy',
'jupyter',
'pillow',
]
extras = {
'test' : [
'pytest'
],
'dev' : [
],
}
####
# Packages and scripts.
####
entry_points = {
'console_scripts': [
'run = mnist_cnn:main',
],
}
####
# Import __version__.
####
project_dir = dirname(realpath(__file__))
####
# Install.
####
setup(
name = project_name,
version = "1.0",
author = author,
author_email = author_email,
url = url,
description = description,
zip_safe = False,
install_requires = reqs,
tests_require = extras['test'],
extras_require = extras,
entry_points = entry_points,
)
|
[
"wriazati@apple.com"
] |
wriazati@apple.com
|
6e78cd3f2fb1c812c523391d20eda99cc2c103b0
|
ab6b315e323a9d3ae49d047b4aa75a6acbc25774
|
/sbf_120/02 CAC Next 20/04 Analyse pages_next20.py
|
8c12976334e465f6b650be6bada817c074b122e0
|
[] |
no_license
|
TchidahNdiaye/Web-scraping
|
0b73ef092b1e08a0b29d6b63acb0d5e2d5b99521
|
b11ef05901087140ea1f8bf60ea17753e4047a92
|
refs/heads/master
| 2023-05-02T14:37:07.814251
| 2021-05-13T22:03:11
| 2021-05-13T22:03:11
| 297,629,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,356
|
py
|
# -*- coding: utf8 -*-
import pdb
import re
import os
import pickle
import html
import csv
L=os.listdir('pages_next20')
Base=[['firme','price','market_cap','price_to_earnings','beta','earnings_per_share','dividend','shares_outstanding','sector','employees','revenue','indices']]
for k in L:
with open('pages_next20/'+k,'r',encoding='utf8') as output:
content = output.read()
content = html.unescape(content)
pattern1 = '<h1 class="float_lang_base_1 relativeAttr"\n\tdir="ltr" itemprop="name">(.+?(?=\t</h1>))'
firme = re.findall(pattern1,content)
pattern2 = 'Clôture précédente</span><span class="float_lang_base_2 bold">(.+?(?=</span></div>))'
price = re.findall(pattern2,content)
pattern3 = 'Cap. boursière</span><span class="float_lang_base_2 bold">(.+?(?=</span></div>))'
market_cap = re.findall(pattern3,content)
pattern4 = 'PER</span><span class="float_lang_base_2 bold">(.+?(?=</span></div>))'
price_to_earnings = re.findall(pattern4,content)
pattern5 = 'Bêta</span><span class="float_lang_base_2 bold">(.+?(?=</span></div>))'
beta = re.findall(pattern5,content)
pattern6 = 'BPA</span><span class="float_lang_base_2 bold">(.+?(?=</span></div>))'
earnings_per_share = re.findall(pattern6,content)
pattern7 = 'Dividende</span><span class="float_lang_base_2 bold">(.+?(?= .{4,9}</span></div>))'
dividend = re.findall(pattern7,content)
pattern8 = 'Act. en circulation</span><span class="float_lang_base_2 bold">(.+?(?=</span></div>))'
shares_outstanding = re.findall(pattern8,content)
pattern9 = '<div>Secteur<a href="/stock-screener/?sp=country::22|sector::9|industry::a|equityType::a<eq_market_cap;1">(.+?(?=</a></div>))'
sector = re.findall(pattern9,content)
pattern10 = '<div>Employés<p class="bold">(.+?(?=</p></div>))'
employees = re.findall(pattern10,content)
pattern11 = 'Performance</span><span class="float_lang_base_2 bold">(.+?(?=</span></div>))'
revenue = re.findall(pattern11,content)
indices = "cac next 20"
if len(firme)==1 and len(price)==1 and len(market_cap)==1 and len(price_to_earnings)==1 and len(beta)==1 and len(earnings_per_share)==1 and len(dividend)==1 and len(shares_outstanding)==1 and len(sector)==1 and len(employees)==1 and len(revenue)==1:
print("Extraction réaliser avec succès ",k)
else:
if len(firme)!=1:
pattern1 = '<h1 class="float_lang_base_1 relativeAttr" n/dir="ltr" itemprop="name">(.+?(?= </h1>))'
firme = re.findall(pattern1,content)
if len(price) !=1:
pattern2 = 'Prev. Close</span><span class="float_lang_base_2 bold">(.+?(?=</span></div>))'
price = re.findall(pattern2,content)
if len(market_cap) !=1:
pattern3 ='Market Cap</span><span class="float_lang_base_2 bold">(.+?(?=</span></div>))'
market_cap = re.findall(pattern3,content)
if len(price_to_earnings) !=1:
pattern4 = 'P/E Ratio</span><span class="float_lang_base_2 bold">(.+?(?=</span></div>))'
price_to_earnings = re.findall(pattern4,content)
if len(beta)!=1:
pattern5 = 'Beta</span><span class="float_lang_base_2 bold">(.+?(?=</span></div>))'
beta = re.findall(pattern5,content)
if len(earnings_per_share) !=1:
pattern6 = 'EPS</span><span class="float_lang_base_2 bold">(.+?(?=</span></div>))'
earnings_per_share = re.findall(pattern6,content)
if len(dividend) !=1:
dividend = "NaN"
if len(shares_outstanding) !=1:
pattern8 = 'Shares Outstanding</span><span class="float_lang_base_2 bold">(.+?(?=</span></div>))'
shares_outstanding = re.findall(pattern8,content)
if len(employees) !=1:
pattern10 ='Employees<p class="bold">(.+?(?=</p></div>))'
employees = re.findall(pattern10,content)
if len(revenue) !=1:
pattern11 = 'Revenue</span><span class="float_lang_base_2 bold">(.+?(?=</span></div>))'
revenue = re.findall(pattern11,content)
firme = firme[0]
price = price[0]
market_cap = market_cap[0]
price_to_earnings = price_to_earnings[0]
beta = beta[0]
earnings_per_share = earnings_per_share[0]
dividend = dividend[0]
shares_outstanding = shares_outstanding[0]
sector = sector[0]
employees = employees[0]
revenue = revenue[0]
indices = indices[0]
Result = [firme,price,market_cap,price_to_earnings,beta,earnings_per_share,dividend,shares_outstanding,sector,employees,revenue]
Base.append(Result)
with open("cac_next20_base.csv", "w",encoding='utf8') as outfile:
data=csv.writer(outfile,delimiter=',',lineterminator='\n')
for b in Base:
data.writerow(b)
|
[
"noreply@github.com"
] |
TchidahNdiaye.noreply@github.com
|
5118a73f1e1c300da7ba3f6c35bfd3e1b8208a71
|
e0629a85794027f39c20ab4d1d10e6cdacc0f438
|
/Problem Solving/Data Structures/Tree- Postorder Traversal.py
|
1117deec3b28988c63bde044dbfa24c96a91e166
|
[] |
no_license
|
harshitpoddar09/HackerRank-Solutions
|
ffa675179a732416f729f61015ba5292af9fa052
|
62dd5a059247833ff455efa905c17ef654c2abe3
|
refs/heads/main
| 2023-08-13T20:53:27.478883
| 2021-09-27T06:43:31
| 2021-09-27T06:43:31
| 319,535,195
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,475
|
py
|
class Node:
def __init__(self, info):
self.info = info
self.left = None
self.right = None
self.level = None
def __str__(self):
return str(self.info)
class BinarySearchTree:
def __init__(self):
self.root = None
def create(self, val):
if self.root == None:
self.root = Node(val)
else:
current = self.root
while True:
if val < current.info:
if current.left:
current = current.left
else:
current.left = Node(val)
break
elif val > current.info:
if current.right:
current = current.right
else:
current.right = Node(val)
break
else:
break
"""
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.info (the value of the node)
"""
def postOrder(root):
#Write your code here
if not root:
return
postOrder(root.left)
postOrder(root.right)
print(root.info,end=' ')
tree = BinarySearchTree()
t = int(input())
arr = list(map(int, input().split()))
for i in range(t):
tree.create(arr[i])
postOrder(tree.root)
|
[
"noreply@github.com"
] |
harshitpoddar09.noreply@github.com
|
78273eeae20753e4cc0f9378fc1446e3f44bdc04
|
89aacee070fba2cc9e970a9a516c0c67b2605642
|
/SDN/RYU_ODL_LLDP/multi_ctrl_links_wsgi.py
|
5765d0368dd2d7ef6d6da265319be92430007b83
|
[] |
no_license
|
shheychen/Lab_code
|
abd9a5b619e4df8b036afc1a5096640164af46ea
|
004a816086126172f2dcf3bc53a249e6f95f14d3
|
refs/heads/master
| 2021-01-22T21:17:42.532348
| 2017-07-25T14:26:39
| 2017-07-25T14:26:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,294
|
py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# 2016.08.04 kshuang
from ryu.base import app_manager
from ryu.ofproto import ofproto_v1_3
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller import ofp_event
from ryu.ofproto import ofproto_v1_3_parser
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.lib.packet import lldp
from ryu.lib.packet import packet
from ryu import utils
from ryu.app.wsgi import ControllerBase, WSGIApplication, route
from ryu.lib import dpid as dpid_lib
from webob import Response
import json
import socket
import urllib2
import base64
myryu_instance_name = 'MyRyu'
rlinkurl = '/ryulink/{dpid}'
olinkurl= '/odllink'
clinkurl = '/crosslink/{dpid}'
class MyRyu(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
ryu_links = {}
ODL2RYU={}
_CONTEXTS = {'wsgi': WSGIApplication}
# register wsgi class
def __init__(self, *args, **kwargs):
super(MyRyu, self).__init__(*args, **kwargs)
wsgi = kwargs['wsgi']
wsgi.register(MyRyuAPI, {myryu_instance_name: self})
# get event when new switch connect to ryu and do openflow version negotiated
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
self.send_port_stats_request(datapath)
def send_port_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPPortDescStatsRequest(datapath, 0, ofp.OFPP_ANY)
datapath.send_msg(req)
@set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER)
def port_stats_reply_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# LLDP packet to controller
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_LLDP)
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER)]
self.add_flow(datapath, 65535, match, actions)
for stat in ev.msg.body:
if stat.port_no < ofproto.OFPP_MAX:
self.send_lldp_packet(datapath, stat.port_no, stat.hw_addr)
def add_flow(self, datapath, priority, match, actions):
ofp = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=priority, command=ofp.OFPFC_ADD, match=match, instructions=inst)
datapath.send_msg(mod)
def send_lldp_packet(self, datapath, port_no, hw_addr):
ofp = datapath.ofproto
pkt = packet.Packet()
pkt.add_protocol(ethernet.ethernet(ethertype=ether_types.ETH_TYPE_LLDP, src=hw_addr, dst=lldp.LLDP_MAC_NEAREST_BRIDGE))
tlv_chassis_id = lldp.ChassisID(subtype=lldp.ChassisID.SUB_LOCALLY_ASSIGNED, chassis_id=str(datapath.id))
tlv_port_id = lldp.PortID(subtype=lldp.PortID.SUB_LOCALLY_ASSIGNED, port_id=str(port_no))
tlv_ttl = lldp.TTL(ttl=10)
tlv_end = lldp.End()
tlvs = (tlv_chassis_id, tlv_port_id, tlv_ttl, tlv_end)
pkt.add_protocol(lldp.lldp(tlvs))
pkt.serialize()
data = pkt.data
parser = datapath.ofproto_parser
actions = [parser.OFPActionOutput(port=port_no)]
out = parser.OFPPacketOut(datapath=datapath, buffer_id=ofp.OFP_NO_BUFFER, in_port=ofp.OFPP_CONTROLLER, actions=actions, data=data)
datapath.send_msg(out)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
port = msg.match['in_port']
pkt = packet.Packet(data=msg.data)
pkt_ethernet = pkt.get_protocol(ethernet.ethernet)
if not pkt_ethernet:
return
pkt_lldp = pkt.get_protocol(lldp.lldp)
if pkt_lldp:
self.handle_lldp(datapath, port, pkt_ethernet, pkt_lldp)
def handle_lldp(self, datapath, port, pkt_ethernet, pkt_lldp):
# Our LLDP Packet
if(pkt_lldp.tlvs[0].subtype == lldp.ChassisID.SUB_LOCALLY_ASSIGNED):
ryu_src_dpid = int(pkt_lldp.tlvs[0].chassis_id)
ryu_src_port = int(pkt_lldp.tlvs[1].port_id)
ryu_dst_dpid = int(datapath.id)
ryu_dst_port = int(port)
self.ryu_links["openflow:"+str(ryu_src_dpid)+":"+str(ryu_src_port)]={
"src":{
"dpid":ryu_src_dpid,
"port_no":ryu_src_port
},
"dst":{
"dpid":ryu_dst_dpid,
"port_no":ryu_dst_port
}
}
print self.ryu_links
# OpenDayLight LLDP Packet
elif(pkt_lldp.tlvs[0].subtype == lldp.ChassisID.SUB_MAC_ADDRESS):
odl_dpid = pkt_lldp.tlvs[3].tlv_info
odl_port = pkt_lldp.tlvs[1].port_id
ryu_dpid = int(datapath.id)
ryu_port = int(port)
self.ODL2RYU[odl_dpid+":"+odl_port]={
"src":{
"source-node":odl_dpid,
"source-tp":odl_dpid+":"+odl_port
},
"dst":{
"dpid":ryu_dpid,
"port_no":ryu_port
}
}
print self.ODL2RYU
@set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
def error_msg_handler(self, ev):
msg = ev.msg
self.logger.debug('OFPErrorMsg received: type=0x%02x code=0x%02x message=%s', msg.type, msg.code, utils.hex_array(msg.data))
class MyRyuAPI(ControllerBase):
def __init__(self, req, link, data, **config):
super(MyRyuAPI, self).__init__(req, link, data, **config)
self.myryu_instance = data[myryu_instance_name]
@route('ryu_link', rlinkurl, methods=['GET'])
def list_ryu_connect(self, req, **kwargs):
myryu = self.myryu_instance
dpid = int(kwargs['dpid'])
if dpid == 0:
links = myryu.ryu_links.items()
body = json.dumps(links, indent=4, sort_keys=True) + '\n'
return Response(content_type='application/json', body=body)
re_key="openflow:"+str(dpid)+":"
links=[]
for key, value in myryu.ryu_links.iteritems():
if key.startswith(re_key):
links.append(value)
body = json.dumps(links, indent=4, sort_keys=True) + '\n'
return Response(content_type='application/json', body=body)
@route('cross_link', clinkurl, methods=['GET'])
def list_cross_connect(self, req, **kwargs):
myryu = self.myryu_instance
dpid = int(kwargs['dpid'])
if dpid == 0:
links = myryu.ODL2RYU.items()
body = json.dumps(links, indent=4, sort_keys=True) + '\n'
return Response(content_type='application/json', body=body)
re_key="openflow:"+str(dpid)+":"
links=[]
for key, value in myryu.ODL2RYU.iteritems():
if key.startswith(re_key):
links.append(value)
body = json.dumps(links, indent=4, sort_keys=True) + '\n'
return Response(content_type='application/json', body=body)
@route('odl_link', olinkurl, methods=['GET'])
def list_odl_connect(self, req, **kwargs):
url = "http://192.168.89.129:8181/restconf/operational/network-topology:network-topology"
user = "admin"
passwd = "admin"
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url, user, passwd)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
pagehandle = urllib2.urlopen(url)
response = json.load(pagehandle)
links = response['network-topology']['topology'][0]['link']
body = json.dumps(links, indent=4, sort_keys=True) + '\n'
return Response(content_type='application/json', body=body)
|
[
"st505102000@gmail.com"
] |
st505102000@gmail.com
|
3846fbfeb7667930d321e066356737f76513a6e0
|
3e19b88639e12dcc49b24b030f9788eb77b22cf5
|
/event/tests/test_models.py
|
551a297b68543747f910eda1854d81bc2fecce61
|
[] |
no_license
|
MarcinP94/django-google-calendar
|
ffdb52b80bdf17ff8c61efd10f48863ead2bdf69
|
b73fe8f86f5b9e5abdf269470d7c4d77dc46a08c
|
refs/heads/master
| 2021-12-26T05:12:59.432989
| 2021-09-27T06:08:26
| 2021-09-27T06:08:26
| 237,622,056
| 4
| 1
| null | 2021-09-27T06:09:19
| 2020-02-01T13:53:52
|
Python
|
UTF-8
|
Python
| false
| false
| 357
|
py
|
from django.test import TestCase
from django.urls import reverse, resolve
from event.models import Post
from datetime import datetime, timedelta
class TestModels(TestCase):
def setUp(self):
pass
def test_title_string_representation(self):
event = Post(title="My entry title")
self.assertEqual(str(event), event.title)
|
[
"marcin.podolak1994@gmail.com"
] |
marcin.podolak1994@gmail.com
|
a94ef4c867d0db3ad0814d14df2d8ff57e9e987f
|
fcb3e07d953b29848b1844b684c502c297c8d4cb
|
/Stone-Paper-Scissors.py
|
b151e53a2b182d808c9fcdd385d6f173036edd4a
|
[] |
no_license
|
Arnav-17/Random-Problems
|
02617fde9ab52a7713cae4b9a63cbce74b3838bd
|
a6d106cf24dfa5445cbafd5c7310d26e0652edc9
|
refs/heads/main
| 2023-01-23T08:21:46.649528
| 2020-12-06T21:04:12
| 2020-12-06T21:04:12
| 319,130,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
import random
print('Welcome to Stone-Paper-Scissors Game')
b = ['St', 'Sc', 'P']
d = 0
e = 0
for i in range(10):
c = random.choice(b)
a = input('Enter Sc for Scissor, P for Paper, St for Stone\n')
if c == "St":
if a == 'P':
d += 1
elif a == "Sc":
e += 1
elif c == 'Sc':
if a == 'St':
d += 1
elif a == 'P':
e += 1
elif c == 'P':
if a == 'Sc':
d += 1
elif a == 'St':
e += 1
print(c)
print(f'Your Score is {d}')
print(f"Computer's Score is {e}")
if e > d:
print("Sorry, You Lost")
elif e == d:
print("Its a tie")
elif e < d:
print("Congratulations, You Won!!")
|
[
"noreply@github.com"
] |
Arnav-17.noreply@github.com
|
75ab5fbaf3a4b0722700d471b09dfcbe179e593a
|
25f8fe705786434fdc414c0315f43b097b50c652
|
/experiments/bubs/bubs/scores.py
|
927fa287c6fb1acb99afe857c0e92cfa42fab1de
|
[
"MIT"
] |
permissive
|
openstates/legacy-openstates.org
|
bc8b822eeca7d648b0bdeade70b5ba6b3b2fcf65
|
4cc573b06fa0736916122135ecffcf5a884f9a01
|
refs/heads/master
| 2021-10-09T04:09:18.709716
| 2018-12-20T23:16:22
| 2018-12-20T23:16:22
| 18,355,422
| 0
| 0
|
BSD-3-Clause
| 2018-12-20T16:21:13
| 2014-04-02T05:15:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 13,808
|
py
|
import sys
import json
import logging
import socket
from itertools import product, combinations
from collections import Counter, defaultdict
from operator import itemgetter
import rpy2
import networkx
import numpy as np
from pandas import DataFrame
from sunlight import response_cache, openstates
from pscl import Rollcall
from core import (
mongo, IterBills, IterLegislators,
TooFewBillsError, DictSetDefault)
response_cache.enable('mongo')
response_cache.logger.setLevel(100)
class DataQualityError(Exception):
'''Raised if calculation is aborted due to data quality issues.
'''
class ScoreCalculator(object):
'''Given a state, chamber, and term or session, calculate
the cosponsorship pagerank, effectiveness, and ideal point
scores for each legislator.
'''
def __init__(self, abbr, chamber, meta, term=None, session=None):
self.meta = meta
self.abbr = abbr
self.session = session
self.term = term
self.chamber = chamber
self.bills = IterBills(
abbr, chamber, session=session, term=term)
self.legislators = IterLegislators(abbr, chamber)
self.leg_deets = {}
def get_pagerank(self):
'''Create a co-sponsorship digraph based on the information from
the Open States API and calculate the pagerank of each legislator.
'''
ids = set()
G = networkx.DiGraph()
number_of_bills = 0
for bill in self.bills:
sponsors = bill['sponsors']
# if len(sponsors) < 2:
# continue
# Separate sponsors into primary, secondary.
primary = []
secondary = []
for sponsor in sponsors:
if sponsor['leg_id'] is None:
continue
if sponsor['type'] == 'primary':
primary.append(sponsor['leg_id'])
else:
secondary.append(sponsor['leg_id'])
ids.add(sponsor['leg_id'])
# Add them to the network.
if primary and secondary:
for primary, secondary in product(primary, secondary):
try:
G[secondary][primary]['weight'] += 1
except KeyError:
G.add_edge(secondary, primary, weight=1)
elif primary:
for edge in combinations(primary, r=2):
for p1, p2 in [edge, edge[::-1]]:
try:
G[p1][p2]['weight'] += 1
except KeyError:
G.add_edge(p1, p2, weight=1)
if not G.nodes():
# Known offenders: CO, AR, CT, ID, and others.
# Reuturn all ones.
return dict.fromkeys(ids, 1)
data = dict(abbr=self.abbr, chamber=self.chamber)
msg = ("Can't generate PageRank scores due to lack of secondary "
"sponsorship data: %r.")
raise DataQualityError(msg % (data,))
return networkx.pagerank_numpy(G)
def get_effectiveness(self):
'''Create an effectiveness score for each legislator relative to
all the others based on the extent to which bills by each leg'r
are passed on the chamber of origin, the other chamber, or into law.
'''
# Multipliers used below.
multipliers = dict(
passed_own=1,
passed_other=2,
signed=20)
legislators = defaultdict(Counter)
number_of_bills = 0
chamber = self.chamber
# Calculate the scores.
for bill in self.bills:
sponsors = bill['sponsors']
# Separate sponsors into primary, secondary.
primary = []
secondary = []
for sponsor in sponsors:
if sponsor['type'] == 'primary':
primary.append(sponsor['leg_id'])
else:
secondary.append(sponsor['leg_id'])
for sponsor in primary:
if chamber == 'upper':
other_chamber = 'lower'
else:
other_chamber = 'upper'
if bill['action_dates']['passed_%s' % self.chamber]:
legislators[sponsor]['passed_own'] += 1
if bill['action_dates']['passed_%s' % other_chamber]:
legislators[sponsor]['passed_other'] += 1
if bill['action_dates']['signed']:
legislators[sponsor]['signed'] += 1
# Compute the scores.
vals = []
detail = self.legislators.detail
for leg_id, counter in legislators.items():
if leg_id is None:
continue
with DictSetDefault(self.leg_deets, leg_id, detail(leg_id)) as deets:
for key, multiplier in multipliers.items():
score = counter[key] * multiplier
vals.append(score)
deets['eff_stats'] = dict(counter, score=score)
if not vals:
raise DataQualityError('No effectiveness data available.')
scoresdict = {}
for key in multipliers:
scoresdict[key] = [d[key] for d in legislators.values()]
percentiles = defaultdict(dict)
for key, scores in scoresdict.items():
for n in range(1, 101):
percentiles[key][n] = np.percentile(scores, n)
# Normalize the scores.
vals = np.array(map(float, vals))
normed = (vals / sum(vals) * 250)
normed = dict(zip(vals, normed))
newvals = {}
for leg_id in legislators:
if leg_id is None:
continue
leg_deets = self.leg_deets[leg_id]
with DictSetDefault(leg_deets, 'eff_stats', {}) as eff_stats:
for key, percentiledict in percentiles.items():
score = eff_stats.get(key, 0)
eff_stats[key] = score
percentile = 0
if set(percentiledict.values()) == set([0.0]):
percentile = 0
else:
for n, val in percentiledict.items():
if score < val:
break
else:
percentile = n
eff_stats[key + '_percentile'] = percentile
newvals[leg_id] = normed.get(eff_stats.get('score', 0))
return newvals
def get_idealpoints(self):
'''Get ideal point for each legislator.
'''
YES = float(1)
NO = float(2)
OTHER = float(3)
votedata = defaultdict(dict)
vote_vals = dict(yes_votes=YES, no_votes=NO, other_votes=OTHER)
leg_ids = set()
chamber_ids = [leg['id'] for leg in self.legislators.metadata]
vote_keys = 'yes_votes, no_votes, other_votes'.split(', ')
for vote in self.bills.itervotes():
for k in vote_keys:
for voter in vote[k]:
leg_id = voter['leg_id']
if leg_id is None:
continue
if leg_id not in chamber_ids:
continue
leg_ids.add(leg_id)
votedata[vote['id']][leg_id] = vote_vals[k]
# Convert the dict into a pandas DataFrame.
dataframe = DataFrame(votedata, index=leg_ids)
dataframe.fillna(value=9)
# Create a rollcall object similar to pscl's.
rollcall = Rollcall.from_dataframe(dataframe,
yea=[YES],
nay=[NO],
missing=[OTHER],
not_in_legis=0.0,
legis_names=tuple(leg_ids))
# Here they are.
xbar = rollcall.ideal().xbar
# Now guess the polarity.
polarities = defaultdict(list)
parties = {}
polarity_parties = defaultdict(Counter)
for legislator in self.legislators:
leg_id = legislator['leg_id']
if leg_id not in xbar:
continue
parties[leg_id] = legislator.get('party', 'o')
sign = 0 < xbar[leg_id]
polarities[sign].append(leg_id)
for polarity, leg_ids in polarities.items():
for leg_id in leg_ids:
party = parties[leg_id]
letter = party.lower()[0]
if letter not in 'rd':
letter = 'o'
polarity_parties[polarity][letter] += 1
# If a the parties are clustered on distinct sides use that,
# else on the side where most are clustered, assign that
# side to the most frequently occuring party.
polarity_results = {}
for polarity, partydict in polarity_parties.items():
most_frequent = max(partydict, key=partydict.get)
polarity_results[polarity] = most_frequent
# If the polarity appears to be backwards, reverse it.
if polarity_results[True] != 'r':
xbar = {leg_id: -n for (leg_id, n) in xbar.items()}
return xbar
def get_scores(self):
'''Helper function for ScoreCalculator monster.
'''
logging.info('Starting %r' % ([self.abbr, self.chamber, self.term],))
logging.info('Starting pagerank calculation...')
pageranks = self.get_pagerank()
logging.info('...done')
logging.info('Starting effectiveness calculation...')
effectiveness = self.get_effectiveness()
logging.info('...done')
logging.info('Starting ideal point calculation...')
idealpoints = self.get_idealpoints()
logging.info('...done')
return dict(
effectiveness=effectiveness,
pageranks=pageranks,
idealpoints=idealpoints)
def import_scores(self, meta):
'''Write the scores into mongo.
'''
keep_keys = (
'first_name', 'last_name', 'party', 'eff_stats',
'photo_url', 'district', 'full_name', 'id')
def party_letter(party):
parties = 'rd'
letter = party.lower()[0]
if letter in parties:
return letter
else:
return 'o'
scores = self.get_scores()
# Get a set of all ids.
ids = set(scores['idealpoints'].keys())
ids = filter(None, ids)
points = []
leg_deets = self.leg_deets
for leg_id in ids:
legislator = self.leg_deets.get(leg_id)
if legislator is None:
legislator = self.legislators.detail(leg_id)
party = party_letter(legislator.get('party', 'o'))
logging.debug('Party is %r' % party)
leg_keys = (
'first_name', 'last_name', 'district',
'photo_url', 'full_name', 'id', 'eff_stats')
for key in tuple(legislator):
if key not in leg_keys:
legislator.pop(key)
# Calculate the point data.
point = dict(
x=scores['idealpoints'][leg_id],
# If no effectiveness score, s/he got no bills passed.
y=scores['effectiveness'].get(leg_id, 0),
# If no PR score, s/he had no consponsorships.
size=scores['pageranks'].get(leg_id, 0),
party=party,
legislator=legislator,
)
points.append(point)
report = dict(
name=self.meta['name'],
term=self.term,
term_name='%s Term' % self.term,
chamber_name=self.meta['chambers'][self.chamber]['name'],
abbr=self.abbr,
chamber=self.chamber,
points=points)
mongo.reports.save(report)
return report
def import_all(*abbrs):
for state in openstates.all_metadata():
if abbrs and state['abbreviation'] not in abbrs:
continue
abbr = state['abbreviation']
# if abbr in ('co', 'ar', 'ct', 'al', 'dc', 'id'):
# continue
meta = openstates.state_metadata(abbr)
for chamber in meta['chambers']:
latest_term = sorted(meta['terms'], key=itemgetter('start_year'))
term = latest_term.pop()
spec = dict(
term=term['name'],
abbr=abbr,
chamber=chamber)
if mongo.reports.find_one(spec):
logging.debug('Skipping %r' % spec)
continue
try:
calc = ScoreCalculator(abbr, chamber, meta, term=term['name'])
report = calc.import_scores(meta)
except DataQualityError as exc:
logging.exception(exc)
logging.error('No party data: skipping %r' % ([abbr, chamber, term],))
# import pdb; pdb.set_trace()
except TooFewBillsError as exc:
logging.exception(exc)
logging.error('Too few bills found: skipping %r' % ([abbr, chamber, term],))
# import pdb; pdb.set_trace()
except rpy2.rinterface.RRuntimeError as exc:
logging.exception(exc)
logging.error('R error: skipping %r' % ([abbr, chamber, term],))
# import pdb; pdb.set_trace()
if __name__ == '__main__':
# import_scores(*sys.argv[1:])
# import sunlight.services.openstates.service
# service_url = 'http://localhost:8000/api/v1'
logging.basicConfig(level=logging.ERROR)
socket.setdefaulttimeout(5)
#mongo.reports.drop()
# import sys.argv
import_all()
|
[
"twneale@gmail.com"
] |
twneale@gmail.com
|
4d341628c833a54549f509112ce07e540e9a4ce5
|
3c89602d36c7bc11a24537056dcd2ed1aaa40479
|
/build/cryptography/tests/hazmat/backends/test_multibackend.py
|
81a64ce0e075d266b6da30772422bd6ea92d5166
|
[
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
link39/205-pi
|
5a551c83b6f833bba80095e395a2177652f8f5d8
|
5f51d191ac472a76c76e7b34a7e29f5d596ef52b
|
refs/heads/master
| 2021-01-01T17:21:16.867641
| 2016-01-11T19:30:51
| 2016-01-11T19:30:51
| 28,787,160
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,826
|
py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import (
UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import (
CMACBackend, CipherBackend, DERSerializationBackend, DSABackend,
EllipticCurveBackend, HMACBackend, HashBackend, PBKDF2HMACBackend,
PEMSerializationBackend, RSABackend, X509Backend
)
from cryptography.hazmat.backends.multibackend import MultiBackend
from cryptography.hazmat.primitives import cmac, hashes, hmac
from cryptography.hazmat.primitives.asymmetric import ec, padding
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from ...utils import raises_unsupported_algorithm
@utils.register_interface(CipherBackend)
class DummyCipherBackend(object):
def __init__(self, supported_ciphers):
self._ciphers = supported_ciphers
def cipher_supported(self, cipher, mode):
return (type(cipher), type(mode)) in self._ciphers
def create_symmetric_encryption_ctx(self, cipher, mode):
if not self.cipher_supported(cipher, mode):
raise UnsupportedAlgorithm("", _Reasons.UNSUPPORTED_CIPHER)
def create_symmetric_decryption_ctx(self, cipher, mode):
if not self.cipher_supported(cipher, mode):
raise UnsupportedAlgorithm("", _Reasons.UNSUPPORTED_CIPHER)
@utils.register_interface(HashBackend)
class DummyHashBackend(object):
def __init__(self, supported_algorithms):
self._algorithms = supported_algorithms
def hash_supported(self, algorithm):
return type(algorithm) in self._algorithms
def create_hash_ctx(self, algorithm):
if not self.hash_supported(algorithm):
raise UnsupportedAlgorithm("", _Reasons.UNSUPPORTED_HASH)
@utils.register_interface(HMACBackend)
class DummyHMACBackend(object):
def __init__(self, supported_algorithms):
self._algorithms = supported_algorithms
def hmac_supported(self, algorithm):
return type(algorithm) in self._algorithms
def create_hmac_ctx(self, key, algorithm):
if not self.hmac_supported(algorithm):
raise UnsupportedAlgorithm("", _Reasons.UNSUPPORTED_HASH)
@utils.register_interface(PBKDF2HMACBackend)
class DummyPBKDF2HMACBackend(object):
def __init__(self, supported_algorithms):
self._algorithms = supported_algorithms
def pbkdf2_hmac_supported(self, algorithm):
return type(algorithm) in self._algorithms
def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,
key_material):
if not self.pbkdf2_hmac_supported(algorithm):
raise UnsupportedAlgorithm("", _Reasons.UNSUPPORTED_HASH)
@utils.register_interface(RSABackend)
class DummyRSABackend(object):
def generate_rsa_private_key(self, public_exponent, key_size):
pass
def rsa_padding_supported(self, padding):
pass
def generate_rsa_parameters_supported(self, public_exponent, key_size):
pass
def load_rsa_private_numbers(self, numbers):
pass
def load_rsa_public_numbers(self, numbers):
pass
@utils.register_interface(DSABackend)
class DummyDSABackend(object):
def generate_dsa_parameters(self, key_size):
pass
def generate_dsa_private_key(self, parameters):
pass
def generate_dsa_private_key_and_parameters(self, key_size):
pass
def dsa_hash_supported(self, algorithm):
pass
def dsa_parameters_supported(self, p, q, g):
pass
def load_dsa_private_numbers(self, numbers):
pass
def load_dsa_public_numbers(self, numbers):
pass
def load_dsa_parameter_numbers(self, numbers):
pass
@utils.register_interface(CMACBackend)
class DummyCMACBackend(object):
def __init__(self, supported_algorithms):
self._algorithms = supported_algorithms
def cmac_algorithm_supported(self, algorithm):
return type(algorithm) in self._algorithms
def create_cmac_ctx(self, algorithm):
if not self.cmac_algorithm_supported(algorithm):
raise UnsupportedAlgorithm("", _Reasons.UNSUPPORTED_CIPHER)
@utils.register_interface(EllipticCurveBackend)
class DummyEllipticCurveBackend(object):
def __init__(self, supported_curves):
self._curves = supported_curves
def elliptic_curve_supported(self, curve):
return any(
isinstance(curve, curve_type)
for curve_type in self._curves
)
def elliptic_curve_signature_algorithm_supported(
self, signature_algorithm, curve
):
return (
isinstance(signature_algorithm, ec.ECDSA) and
self.elliptic_curve_supported(curve)
)
def generate_elliptic_curve_private_key(self, curve):
if not self.elliptic_curve_supported(curve):
raise UnsupportedAlgorithm(_Reasons.UNSUPPORTED_ELLIPTIC_CURVE)
def load_elliptic_curve_private_numbers(self, numbers):
if not self.elliptic_curve_supported(numbers.public_numbers.curve):
raise UnsupportedAlgorithm(_Reasons.UNSUPPORTED_ELLIPTIC_CURVE)
def load_elliptic_curve_public_numbers(self, numbers):
if not self.elliptic_curve_supported(numbers.curve):
raise UnsupportedAlgorithm(_Reasons.UNSUPPORTED_ELLIPTIC_CURVE)
def elliptic_curve_exchange_algorithm_supported(self, algorithm, curve):
return (
isinstance(algorithm, ec.ECDH) and
self.elliptic_curve_supported(curve)
)
@utils.register_interface(PEMSerializationBackend)
class DummyPEMSerializationBackend(object):
def load_pem_private_key(self, data, password):
pass
def load_pem_public_key(self, data):
pass
@utils.register_interface(DERSerializationBackend)
class DummyDERSerializationBackend(object):
def load_der_private_key(self, data, password):
pass
def load_der_public_key(self, data):
pass
@utils.register_interface(X509Backend)
class DummyX509Backend(object):
def load_pem_x509_certificate(self, data):
pass
def load_der_x509_certificate(self, data):
pass
def load_pem_x509_crl(self, data):
pass
def load_der_x509_crl(self, data):
pass
def load_pem_x509_csr(self, data):
pass
def load_der_x509_csr(self, data):
pass
def create_x509_csr(self, builder, private_key, algorithm):
pass
def create_x509_certificate(self, builder, private_key, algorithm):
pass
class TestMultiBackend(object):
def test_ciphers(self):
backend = MultiBackend([
DummyHashBackend([]),
DummyCipherBackend([
(algorithms.AES, modes.CBC),
])
])
assert backend.cipher_supported(
algorithms.AES(b"\x00" * 16), modes.CBC(b"\x00" * 16)
)
assert not backend.cipher_supported(
algorithms.TripleDES(b"\x00" * 16), modes.CBC(b"\x00" * 16)
)
cipher = Cipher(
algorithms.AES(b"\x00" * 16),
modes.CBC(b"\x00" * 16),
backend=backend
)
cipher.encryptor()
cipher.decryptor()
cipher = Cipher(
algorithms.Camellia(b"\x00" * 16),
modes.CBC(b"\x00" * 16),
backend=backend
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.encryptor()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.decryptor()
def test_hashes(self):
backend = MultiBackend([
DummyHashBackend([hashes.MD5])
])
assert backend.hash_supported(hashes.MD5())
assert not backend.hash_supported(hashes.SHA256())
hashes.Hash(hashes.MD5(), backend=backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_HASH):
hashes.Hash(hashes.SHA1(), backend=backend)
def test_hmac(self):
backend = MultiBackend([
DummyHMACBackend([hashes.MD5])
])
assert backend.hmac_supported(hashes.MD5())
assert not backend.hmac_supported(hashes.SHA256())
hmac.HMAC(b"", hashes.MD5(), backend=backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_HASH):
hmac.HMAC(b"", hashes.SHA1(), backend=backend)
def test_pbkdf2(self):
backend = MultiBackend([
DummyPBKDF2HMACBackend([hashes.MD5])
])
assert backend.pbkdf2_hmac_supported(hashes.MD5())
backend.derive_pbkdf2_hmac(hashes.MD5(), 10, b"", 10, b"")
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_HASH):
backend.derive_pbkdf2_hmac(hashes.SHA1(), 10, b"", 10, b"")
def test_rsa(self):
backend = MultiBackend([
DummyRSABackend()
])
backend.generate_rsa_private_key(
key_size=1024, public_exponent=65537
)
backend.rsa_padding_supported(padding.PKCS1v15())
backend.generate_rsa_parameters_supported(65537, 1024)
backend.load_rsa_private_numbers("private_numbers")
backend.load_rsa_public_numbers("public_numbers")
backend = MultiBackend([])
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
backend.generate_rsa_private_key(key_size=1024, public_exponent=3)
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
backend.rsa_padding_supported(padding.PKCS1v15())
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
backend.generate_rsa_parameters_supported(65537, 1024)
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
backend.load_rsa_private_numbers("private_numbers")
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
backend.load_rsa_public_numbers("public_numbers")
def test_dsa(self):
backend = MultiBackend([
DummyDSABackend()
])
backend.generate_dsa_parameters(key_size=1024)
parameters = object()
backend.generate_dsa_private_key(parameters)
backend.generate_dsa_private_key_and_parameters(key_size=1024)
backend.dsa_hash_supported(hashes.SHA1())
backend.dsa_parameters_supported(1, 2, 3)
backend.load_dsa_private_numbers("numbers")
backend.load_dsa_public_numbers("numbers")
backend.load_dsa_parameter_numbers("numbers")
backend = MultiBackend([])
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
backend.generate_dsa_parameters(key_size=1024)
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
backend.generate_dsa_private_key(parameters)
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
backend.generate_dsa_private_key_and_parameters(key_size=1024)
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
backend.dsa_hash_supported(hashes.SHA1())
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
backend.dsa_parameters_supported('p', 'q', 'g')
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
backend.load_dsa_private_numbers("numbers")
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
backend.load_dsa_public_numbers("numbers")
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
backend.load_dsa_parameter_numbers("numbers")
def test_cmac(self):
backend = MultiBackend([
DummyCMACBackend([algorithms.AES])
])
fake_key = b"\x00" * 16
assert backend.cmac_algorithm_supported(algorithms.AES(fake_key))
assert not backend.cmac_algorithm_supported(
algorithms.TripleDES(fake_key)
)
cmac.CMAC(algorithms.AES(fake_key), backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cmac.CMAC(algorithms.TripleDES(fake_key), backend)
def test_elliptic_curve(self):
backend = MultiBackend([
DummyEllipticCurveBackend([
ec.SECT283K1
])
])
assert backend.elliptic_curve_supported(ec.SECT283K1()) is True
assert backend.elliptic_curve_signature_algorithm_supported(
ec.ECDSA(hashes.SHA256()),
ec.SECT283K1()
) is True
backend.generate_elliptic_curve_private_key(ec.SECT283K1())
backend.load_elliptic_curve_private_numbers(
ec.EllipticCurvePrivateNumbers(
1,
ec.EllipticCurvePublicNumbers(
2,
3,
ec.SECT283K1()
)
)
)
backend.load_elliptic_curve_public_numbers(
ec.EllipticCurvePublicNumbers(
2,
3,
ec.SECT283K1()
)
)
assert backend.elliptic_curve_supported(ec.SECT163K1()) is False
assert backend.elliptic_curve_signature_algorithm_supported(
ec.ECDSA(hashes.SHA256()),
ec.SECT163K1()
) is False
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_ELLIPTIC_CURVE):
backend.generate_elliptic_curve_private_key(ec.SECT163K1())
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_ELLIPTIC_CURVE):
backend.load_elliptic_curve_private_numbers(
ec.EllipticCurvePrivateNumbers(
1,
ec.EllipticCurvePublicNumbers(
2,
3,
ec.SECT163K1()
)
)
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_ELLIPTIC_CURVE):
backend.load_elliptic_curve_public_numbers(
ec.EllipticCurvePublicNumbers(
2,
3,
ec.SECT163K1()
)
)
assert backend.elliptic_curve_exchange_algorithm_supported(
ec.ECDH(), ec.SECT283K1()
)
backend2 = MultiBackend([DummyEllipticCurveBackend([])])
assert not backend2.elliptic_curve_exchange_algorithm_supported(
ec.ECDH(), ec.SECT163K1()
)
def test_pem_serialization_backend(self):
backend = MultiBackend([DummyPEMSerializationBackend()])
backend.load_pem_private_key(b"keydata", None)
backend.load_pem_public_key(b"keydata")
backend = MultiBackend([])
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_SERIALIZATION):
backend.load_pem_private_key(b"keydata", None)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_SERIALIZATION):
backend.load_pem_public_key(b"keydata")
def test_der_serialization_backend(self):
backend = MultiBackend([DummyDERSerializationBackend()])
backend.load_der_private_key(b"keydata", None)
backend.load_der_public_key(b"keydata")
backend = MultiBackend([])
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_SERIALIZATION):
backend.load_der_private_key(b"keydata", None)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_SERIALIZATION):
backend.load_der_public_key(b"keydata")
def test_x509_backend(self):
backend = MultiBackend([DummyX509Backend()])
backend.load_pem_x509_certificate(b"certdata")
backend.load_der_x509_certificate(b"certdata")
backend.load_pem_x509_crl(b"crldata")
backend.load_der_x509_crl(b"crldata")
backend.load_pem_x509_csr(b"reqdata")
backend.load_der_x509_csr(b"reqdata")
backend.create_x509_csr(object(), b"privatekey", hashes.SHA1())
backend.create_x509_certificate(object(), b"privatekey", hashes.SHA1())
backend = MultiBackend([])
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_X509):
backend.load_pem_x509_certificate(b"certdata")
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_X509):
backend.load_der_x509_certificate(b"certdata")
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_X509):
backend.load_pem_x509_crl(b"crldata")
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_X509):
backend.load_der_x509_crl(b"crldata")
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_X509):
backend.load_pem_x509_csr(b"reqdata")
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_X509):
backend.load_der_x509_csr(b"reqdata")
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_X509):
backend.create_x509_csr(object(), b"privatekey", hashes.SHA1())
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_X509):
backend.create_x509_certificate(
object(), b"privatekey", hashes.SHA1()
)
|
[
"root@raspberrypi.(none)"
] |
root@raspberrypi.(none)
|
dd83f7a24c846e9ba8608e182ee79032934c4025
|
2dd4cfdc8395b2bb840d91b2dd0050443635da86
|
/105-construct-binary-tree-from-preorder-and-inorder-traversal/ken/105.py
|
f0e39691b4b72c12219695a8432d499a1b13e76b
|
[] |
no_license
|
dennis2030/leetcodeStudyGroup
|
b10bde7e79cd86d6eb46409d0a3ba82d963de657
|
fbd42d8c0cc142aa56531b4fe127bf4bc2996abd
|
refs/heads/master
| 2021-01-14T08:10:55.432693
| 2018-05-18T15:55:18
| 2018-05-18T15:55:18
| 81,933,655
| 6
| 2
| null | 2017-04-02T13:43:00
| 2017-02-14T10:26:24
|
Python
|
UTF-8
|
Python
| false
| false
| 649
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
if len(preorder) == 0:
return None
root = TreeNode(preorder[0])
idx = inorder.index(root.val)
root.left = self.buildTree(preorder[1:idx+1], inorder[0:idx])
root.right = self.buildTree(preorder[idx+1:], inorder[idx+1:])
return root
|
[
"kenliang@synology.com"
] |
kenliang@synology.com
|
1e42208d7fd278e6169e82de05f03ad5d51d82c8
|
8f5033275b020f09026e628b07c89eea5dbff3b6
|
/crawl_modules/iter_class/base_condition_functions.py
|
f48ab7591f2697644f994c7453726dc59e84ca5b
|
[
"BSD-2-Clause"
] |
permissive
|
keonsunkim/python-easy-data-gatherer
|
6f3cbce839d56c01392674451f4bdc72d13aa2e3
|
bffce1fd6b6b5b93aa84cce2f2a4ad85a5fde434
|
refs/heads/main
| 2023-01-21T21:11:31.810770
| 2020-12-02T13:20:32
| 2020-12-02T13:20:32
| 311,241,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41
|
py
|
# iter_class.base_condition_functions.py
|
[
"keonsunkim@gmail.com"
] |
keonsunkim@gmail.com
|
a4dfc25aacd790665eb99a9b7207ea3b03357252
|
f4ec4206d410c1179f8233ca37cd1f3dc9a1e8c0
|
/alpha.py
|
7c5c0dbb1ed4d0dadc0aab9762c8766c6b9c7123
|
[] |
no_license
|
kli512/tetris_cheating
|
a8cf2a71b887093d4098ebabc7346762119dfdbb
|
2657d85b0911978b2838a05b2a1b59bb13607917
|
refs/heads/main
| 2023-03-07T00:19:29.318391
| 2020-12-29T21:37:33
| 2020-12-29T21:37:33
| 340,754,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,530
|
py
|
from scipy.spatial import Delaunay
import numpy as np
def alpha_shape(points, alpha, only_outer=True):
"""
Compute the alpha shape (concave hull) of a set of points.
:param points: np.array of shape (n,2) points.
:param alpha: alpha value.
:param only_outer: boolean value to specify if we keep only the outer border
or also inner edges.
:return: set of (i,j) pairs representing edges of the alpha-shape. (i,j) are
the indices in the points array.
"""
assert points.shape[0] > 3, "Need at least four points"
def add_edge(edges, i, j):
"""
Add an edge between the i-th and j-th points,
if not in the list already
"""
if (i, j) in edges or (j, i) in edges:
# already added
assert (j, i) in edges, "Can't go twice over same directed edge right?"
if only_outer:
# if both neighboring triangles are in shape, it's not a boundary edge
edges.remove((j, i))
return
edges.add((i, j))
tri = Delaunay(points)
edges = set()
# Loop over triangles:
# ia, ib, ic = indices of corner points of the triangle
for ia, ib, ic in tri.vertices:
pa = points[ia]
pb = points[ib]
pc = points[ic]
# Computing radius of triangle circumcircle
# www.mathalino.com/reviewer/derivation-of-formulas/derivation-of-formula-for-radius-of-circumcircle
a = np.sqrt((pa[0] - pb[0]) ** 2 + (pa[1] - pb[1]) ** 2)
b = np.sqrt((pb[0] - pc[0]) ** 2 + (pb[1] - pc[1]) ** 2)
c = np.sqrt((pc[0] - pa[0]) ** 2 + (pc[1] - pa[1]) ** 2)
s = (a + b + c) / 2.0
area = np.sqrt(s * (s - a) * (s - b) * (s - c))
circum_r = a * b * c / (4.0 * area)
if circum_r < alpha:
add_edge(edges, ia, ib)
add_edge(edges, ib, ic)
add_edge(edges, ic, ia)
return edges
if __name__ == '__main__':
from matplotlib.pyplot import *
# Constructing the input point data
np.random.seed(0)
x = 3.0 * np.random.rand(2000)
y = 2.0 * np.random.rand(2000) - 1.0
inside = (x ** 2 + y ** 2 > 1.0) & ((x - 3) ** 2 + y ** 2 > 1.0)
points = np.vstack([x[inside], y[inside]]).T
# Computing the alpha shape
edges = alpha_shape(points, alpha=0.25, only_outer=True)
# Plotting the output
figure()
axis('equal')
plot(points[:, 0], points[:, 1], '.')
for i, j in edges:
plot(points[[i, j], 0], points[[i, j], 1])
show()
|
[
"li.kevin512@gmail.com"
] |
li.kevin512@gmail.com
|
2acf598735221ca3f6bdcec5e6c1cf0915da06a5
|
e2d23d749779ed79472a961d2ab529eeffa0b5b0
|
/pipeline/contrib/external_plugins/models/source.py
|
68da04c02bb907c4abb449d0282141b98ea14778
|
[
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
manlucas/atom
|
9fa026b3f914e53cd2d34aecdae580bda09adda7
|
94963fc6fdfd0568473ee68e9d1631f421265359
|
refs/heads/master
| 2022-09-30T06:19:53.828308
| 2020-01-21T14:08:36
| 2020-01-21T14:08:36
| 235,356,376
| 0
| 0
|
NOASSERTION
| 2022-09-16T18:17:08
| 2020-01-21T14:04:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,470
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from pipeline.conf import settings
from pipeline.contrib.external_plugins.utils.importer import (
GitRepoModuleImporter,
S3ModuleImporter,
FSModuleImporter
)
from pipeline.contrib.external_plugins.models.base import (
GIT,
S3,
FILE_SYSTEM,
package_source,
ExternalPackageSource
)
@package_source
class GitRepoSource(ExternalPackageSource):
repo_raw_address = models.TextField(_(u"文件托管仓库链接"))
branch = models.CharField(_(u"分支名"), max_length=128)
@staticmethod
def type():
return GIT
def importer(self):
return GitRepoModuleImporter(name=self.name,
repo_raw_url=self.repo_raw_address,
branch=self.branch,
modules=self.packages.keys(),
proxy=settings.EXTERNAL_PLUGINS_SOURCE_PROXY,
secure_only=settings.EXTERNAL_PLUGINS_SOURCE_SECURE_RESTRICT)
def details(self):
return {
'repo_raw_address': self.repo_raw_address,
'branch': self.branch
}
@package_source
class S3Source(ExternalPackageSource):
service_address = models.TextField(_(u"对象存储服务地址"))
bucket = models.TextField(_(u"bucket 名"))
access_key = models.TextField(_(u"access key"))
secret_key = models.TextField(_(u"secret key"))
@staticmethod
def type():
return S3
def importer(self):
return S3ModuleImporter(name=self.name,
modules=self.packages.keys(),
service_address=self.service_address,
bucket=self.bucket,
access_key=self.access_key,
secret_key=self.secret_key,
secure_only=settings.EXTERNAL_PLUGINS_SOURCE_SECURE_RESTRICT)
def details(self):
return {
'service_address': self.service_address,
'bucket': self.bucket,
'access_key': self.access_key,
'secret_key': self.secret_key
}
@package_source
class FileSystemSource(ExternalPackageSource):
path = models.TextField(_(u"文件系统路径"))
@staticmethod
def type():
return FILE_SYSTEM
def importer(self):
return FSModuleImporter(name=self.name,
modules=self.packages.keys(),
path=self.path)
def details(self):
return {
'path': self.path
}
|
[
"lucaswang@canway.net"
] |
lucaswang@canway.net
|
f1b34bc374aa7e84f9804d0aa40e0b25c02dd5c5
|
3522fd96405315d282dc9c242ab46f110a6c8537
|
/butter/deploy.py
|
02f77d271820122c565322465ea13e9067d936a8
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
ombu/butter
|
b76be7cb5a07557b1d28c4adedbf58d394ba7a32
|
aea16099bb34a2c6a6706ee6386fd90e9bd1e301
|
refs/heads/master
| 2021-01-17T07:40:12.651434
| 2018-12-13T23:47:44
| 2018-12-13T23:47:44
| 3,538,270
| 1
| 4
| null | 2015-01-13T20:01:45
| 2012-02-24T18:11:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,577
|
py
|
from __future__ import with_statement
from fabric.operations import run
from fabric.api import task, env, cd, hide
from fabric.contrib import files
from time import gmtime, strftime
import os
@task
def log():
"""
Tail a the deployment log of a host
"""
print('+ Reading deployment log...')
with cd(env.host_site_path):
with hide('running', 'stdout'):
out = run('cat DEPLOYMENTS')
print(out)
def mark(parsed_ref):
"""
Mark a deployment
"""
from time import gmtime, strftime
print('+ Logging deployment')
with cd(env.host_site_path):
with hide('running', 'stdout'):
if not files.exists('DEPLOYMENTS'):
print('+ No DEPLOYMENTS file found. Creating one.')
run('touch DEPLOYMENTS');
date= strftime("%Y.%m.%d at %H:%M:%SUTC", gmtime())
run('echo "%s by %s: %s" >> DEPLOYMENTS' % (date, os.getlogin(), parsed_ref))
@task
def clean(age=15):
"""
Clean a `path` from files older than `age` days
"""
with hide('running', 'stdout'):
with cd('%s/changesets' % env.host_site_path):
# count how many we'll delete
count = run("""find . -maxdepth 1 -type d -mtime +%s ! -iname '\.'| wc -l""" %
age)
# delete
if count != '0':
print('+ Removing %s deployments older than %s days' % (count, age))
run("""find . -maxdepth 1 -type d -mtime +%s ! -iname '\.' -print0 \
| xargs -0 rm -rf""" % age)
|
[
"martin@ombuweb.com"
] |
martin@ombuweb.com
|
0a007663043ed2e9283b8fef34002aa634b5823a
|
4116dc4681a9ea321d35f65a0588ef556e7e413d
|
/app/aplicaciones/preguntas/models.py
|
7462b74bff033c87d9998b0ab94af14480c24f24
|
[] |
no_license
|
MarcheloJacome/ingWeb
|
6eba0cdb172a3a46ab9b7fe5b3bf85e0878d11d0
|
1624ae59c9dea5eec741e664f275969db00e9f8d
|
refs/heads/main
| 2023-06-04T00:28:16.115738
| 2021-06-18T14:55:02
| 2021-06-18T14:55:02
| 374,448,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 964
|
py
|
from django.contrib.auth.models import User
from django.db import models
from django.db.models.deletion import CASCADE
from aplicaciones.tests.models import Test
# Create your models here.
class Pregunta(models.Model):
id = models.AutoField(primary_key=True)
texto = models.CharField(max_length=300)
test = models.ForeignKey(Test, on_delete=models.CASCADE)
fecha_creacion = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.texto)
def get_respuestas(self):
return self.respuesta_set.all()
class Respuesta(models.Model):
id = models.AutoField(primary_key=True)
texto = models.CharField(max_length=300)
valor = models.IntegerField()
pregunta = models.ForeignKey(Pregunta, on_delete=models.CASCADE)
fecha_creacion = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"pregunta: {self.pregunta.texto}, respuesta: {self.texto}, valor: {self.valor}"
|
[
"marcelo.jacome@hotmail.com"
] |
marcelo.jacome@hotmail.com
|
49594d8571f7f181dda69ef7441143a14b4dc198
|
880aa3abebcc42721471bb6622d887098ce70bbe
|
/tasks/workers.py
|
7e0e7a8f4bffa14c7493a26eeebb0efa79b48e07
|
[] |
no_license
|
GitPython225/celery_zhilian
|
1204a82f2235e6dd34d7c01414d96618aacbd753
|
f9eb291a66fc995bac52c4267f1ad6c9548c38f5
|
refs/heads/master
| 2020-03-22T23:29:16.385865
| 2018-07-13T08:15:06
| 2018-07-13T08:15:06
| 140,813,252
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,822
|
py
|
# coding:utf-8
"""celery worker相关配置"""
import os
from datetime import datetime
from celery import Celery, platforms
from kombu import Exchange, Queue
from datetime import timedelta
"""Kombu是一个为Python写的消息库,目标是为AMQ协议提供一个傻瓜式的高层接口,让Python中的消息传递变得尽可能简单,并且也提供一些常见消息传递问题的解决方案"""
# 加载config配置信息
# from config.conf import (
# get_broker_and_backend
# get_redis_master
# )
# root模式下启动celery, 默认不能root启动celery
platforms.C_FORCE_ROOT = True
# 获取到日志路径
worker_log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)) + '/logs', 'celery.log')
beat_log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)) + 'logs', 'beat.log')
# broker_and_backend = get_broker_and_backend()
# tasks = ['tasks.area', 'tasks.zhilist', 'tasks.zhidetail']
tasks = ['tasks.listpage']
# 'tasks.area',# 创建celery app实例
# if isinstance(broker_and_backend, list):
# broker, backend = broker_and_backend
app = Celery('zhi_task', include=tasks, broker='redis://:Btxrrvt.1@localhost:6379/7',
backend='redis://:Btxrrvt.1@localhost/8')
# CELERY_IMPORTS = ('tasks.listpage',)
app.conf.update(
CELERY_TIMEZONE='Asia/Shanghai',
CELERY_ENABLE_UTC=True,
CELERY_ACCEPT_CONTENT=['json'],
CELERY_TASK_SERIALIZER='json',
CELERY_RESULT_SERIALIZER='json',
CELERYBEAT_SCHEDULE={
'list_task': {
'task': 'tasks.listpage.excute_list_task',
'schedule': 1, # 抓取时间间隔
'options': {'queue': 'crawler_list_queue', 'routing_key': 'list_info'}
},
# 'detail_task': {
# 'task': 'tasks.zlpt.excute_detail_task',
# 'schedule': 1, # 抓取时间间隔
# 'options': {'queue': 'crawler_detail_queue', 'routing_key': 'detail_info'}
# },
# # # 'area_task': {
# # # 'task': 'tasks.area.excute_area_task',
# # # 'schedule': timedelta(hours=1),
# # # 'options': {'queue': 'crawler_area_queue', 'routing_key': 'area_info'}
# # # }
},
CELERY_QUEUES=(
Queue('crawler_list_queue', exchange=Exchange('crawler_list_queue', type='direct'), routing_key='list_info'),
#
# Queue('crawler_detail_queue', exchange=Exchange('crawler_detail_queue', type='direct'),
# routing_key='detail_info'),
# Queue('crawler_area_queue', exchange=Exchange('crawler_area_queue', type='direct'), routing_key='area_info'),
# Queue('crawler_town_queue', exchange=Exchange('crawler_town_queue', type='direct'), routing_key='road_info'),
# 此处可能有错误
),
)
|
[
"18368918916@163.com"
] |
18368918916@163.com
|
248023aeaca19bd25ed9dc8ecf23b7f9bdc0a885
|
26531c8c4716e28313a34eefaa0978952cec72a4
|
/calendall/calendall/urls.py
|
1b7cdead2581ef32b8d97cb5258999f71e68b130
|
[
"BSD-3-Clause"
] |
permissive
|
xala3pa/calendall
|
3e72cd2a7989a8fc8993c2b2d72683096d0bad96
|
9e5daf72dbbf93d988d14e3eee04f85d6326cf0a
|
refs/heads/master
| 2020-12-25T19:38:46.201038
| 2015-01-01T21:47:28
| 2015-01-01T21:47:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'calendall.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
[
"slok69@gmail.com"
] |
slok69@gmail.com
|
e0a2e0600318bfa3310f8f5a52aada349ecfbc18
|
9aad4e1b73608ef767552fd4ca1653997e425ec0
|
/lib/BRadar/rasterize.py
|
041b2ed85c6ef3c71b324e9a996b6e3484944bcf
|
[
"BSD-3-Clause"
] |
permissive
|
WeatherGod/BRadar
|
e7e3022c2bc7b59c63f343f844558ccca5ef0747
|
a0af2fffd324c18ea7df60323359f144d2858428
|
refs/heads/master
| 2021-01-18T21:33:47.604128
| 2014-08-13T03:22:57
| 2014-08-13T03:22:57
| 1,343,563
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,935
|
py
|
from __future__ import print_function
import numpy as np
from matplotlib.nxutils import points_inside_poly
from maputils import sph2latlon, latlon2pix, makerefmat
from multiprocessing import Pool
def Rastify(statLat, statLon, origData, azimuths,
rangeGates, elevAngle, deltaAz, deltaR,
cellSize=None, lonAxis=None, latAxis=None,
mask=False) :
"""
RASTIFY Covert data in spherical domain into rectilinear
lat/lon domain
Rastify(...) takes the vector or matrix of data points and places
the points into a 2-D matrix organized by latitudes and longitudes.
The original data has a parallel vector or matrix of azimuths (in
degrees North) and rangeGates (in meters) as well as a scalar elevAngle
(in degrees). The origin of the spherical coordinates is given by
the statLat (in degrees North) and statLon (in degrees East). deltaAz
denotes the width of the beam in degrees while the deltaR scalar
denotes the width the of range gate in meters.
For the final grid, the user can specify the latitude and/or longitude
axes with the *lonAxis* and *latAxis* kwargs. For which ever axis not
specified, the user can specify a resultion (in degrees) with the
*cellSize* kwarg, and the axis will be automatically determined by
the limits of the supplied inputs.
Author: Benjamin Root
"""
if (latAxis is None or lonAxis is None) and cellSize is None :
raise ValueError("Must specify *cellSize* if *latAxis* and/or"
"*lonAxis* is not given")
goodVals = (~np.isnan(origData) | ~mask)
origData = origData[goodVals]
azimuths = azimuths[goodVals]
rangeGates = rangeGates[goodVals]
# These arrays are for creating the verticies of the resolution volume
# in 2-D.
deltaAzMult = np.array([-1, -1, 1, 1])
deltaRMult = np.array([-1, 1, 1, -1])
# Getting the lat/lon locations of all the verticies.
tmpLat, tmpLon = sph2latlon(statLat, statLon,
(azimuths[:, np.newaxis] +
(deltaAzMult[np.newaxis, :] * deltaAz)),
(rangeGates[:, np.newaxis] +
(deltaRMult[np.newaxis, :] * deltaR)),
elevAngle)
# Automatically determine the domain,
# note that this isn't friendly to crossing the prime-meridian.
if latAxis is None :
latlim = (tmpLat.min(), tmpLat.max())
latAxis = np.arange(latlim[0], latlim[1] + cellSize, cellSize)
else :
latlim = (latAxis.min(), latAxis.max())
latRes = np.abs(np.median(np.diff(latAxis)))
if lonAxis is None :
lonlim = (tmpLon.min(), tmpLon.max())
lonAxis = np.arange(lonlim[0], lonlim[1] + cellSize, cellSize)
else :
lonlim = (lonAxis.min(), lonAxis.max())
lonRes = np.abs(np.median(np.diff(lonAxis)))
# Automatically determine the grid size from the calculated axes.
gridShape = (len(latAxis), len(lonAxis))
# Reference matrix is used to perform the affine transformation from
# lat/lon to the x-y coordinates that we need.
# This can be adjusted later to allow for the user to specify a
# different resolution for x direction from the resolution in the y
# direction.
R = makerefmat(lonlim[0], latlim[0], lonRes, latRes)
# Getting the x and y locations for each and every verticies.
(tmpys, tmpxs) = latlon2pix(R, tmpLat, tmpLon)
# I wonder if it is computationally significant to get the min/max's of
# each polygon's coordinates in one shot. What about storage
# requirements?
# Initializing the data matrix.
rastData = np.empty(gridShape)
rastData[:] = np.nan
#p = Pool(6)
#
#results = [p.apply_async(_raster_points,
# (tmpx,tmpy,gridShape)) for
# tmpx, tmpy in zip(tmpxs, tmpys)]
#p.close()
#p.join()
# Take the original data value, and assign it to each rasterized
# gridpoint that fall within its voxel.
#for tmpx, tmpy, val in zip(tmpxs, tmpys, origData) :
for tmpx, tmpy, val in zip(tmpxs, tmpys, origData) :
pts = _raster_points(tmpx, tmpy, gridShape)
#pts = res.get()
# Assign values to the appropriate locations (the grid points that
# were inside the polygon), given that the data value that might
# already be there is less-than the value to-be-assigned, or if
# there hasn't been a data-value assigned yet (NAN).
# This method corresponds with the method used by NEXRAD.
for containedPoint in zip(*pts) :
if (np.isnan(rastData[containedPoint])
or (rastData[containedPoint] < val)) :
rastData[containedPoint] = val
return (rastData, latAxis, lonAxis)
def _raster_points(tmpx, tmpy, gridShape) :
"""
Find the raster grid points that lie within the voxel
"""
if (max(tmpx) < 0 or max(tmpy) < 0 or
min(tmpx) >= gridShape[1] or min(tmpy) >= gridShape[0]) :
# points lie outside the rasterization grid
# so, none of them are good.
return ([], [])
resVol = zip(tmpx[[0, 1, 2, 3, 0]],
tmpy[[0, 1, 2, 3, 0]])
# Getting all of the points that the polygon has, and then some.
# This meshed grid is bounded by the domain.
bbox = ((int(max(np.floor(min(tmpy)), 0)),
int(min(np.ceil(max(tmpy)), gridShape[0] - 1))),
(int(max(np.floor(min(tmpx)), 0)),
int(min(np.ceil(max(tmpx)), gridShape[1] - 1))))
(ygrid, xgrid) = np.meshgrid(np.arange(bbox[0][0], bbox[0][1] + 1),
np.arange(bbox[1][0], bbox[1][1] + 1))
gridPoints = zip(xgrid.flat, ygrid.flat)
if len(gridPoints) == 0 :
print("Bad situation...:", bbox, gridShape, min(tmpy), max(tmpy), \
min(tmpx), max(tmpx))
gridPoints = np.zeros((0, 2), dtype='i')
# Determines which points fall within the resolution volume. These
# points will be the ones that will be assigned the value of the
# original data point that the resolution volume represents.
goodPoints = points_inside_poly(gridPoints, resVol)
return (ygrid.flat[goodPoints], xgrid.flat[goodPoints])
def point_inside_polygon(pnts, poly):
n = len(poly)
pnts = np.asanyarray(pnts)
inside = np.zeros(pnts.shape[0], dtype=bool)
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
locs = ((pnts[:, 1] > min(p1y,p2y)) &
(pnts[:, 1] <= max(p1y, p2y)) &
(pnts[:, 0] <= max(p1x, p2x)) &
((p1x == p2x) | (pnts[:, 0] <= ((pnts[:, 1] - p1y)*(p2x-p1x)/(p2y-p1y)+p1x))))
inside[locs] = ~inside[locs]
p1x,p1y = p2x,p2y
return inside
|
[
"ben.v.root@gmail.com"
] |
ben.v.root@gmail.com
|
1964e30ae17774848a190dc3d8225c1f8d552b47
|
fce300b6c85ab7f4111597b47873f9dc59adc6f9
|
/lib/datasets/pascal_voc.py
|
85fb1dda9326044842eaef13160b852444d58417
|
[
"MIT"
] |
permissive
|
Lynkzhang/tf-fast-rcnn-BCCD
|
047a9de71ac7fddb1b69a9994ec7f6b878807c09
|
fd4d15823817ee83344f1e4945f4e7a4969516ce
|
refs/heads/master
| 2020-04-01T19:29:03.192103
| 2018-02-02T09:27:33
| 2018-02-02T09:27:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,422
|
py
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from datasets.imdb import imdb
import datasets.ds_utils as ds_utils
import xml.etree.ElementTree as ET
import numpy as np
import scipy.sparse
import scipy.io as sio
import utils.cython_bbox
import pickle
import subprocess
import uuid
from .voc_eval import voc_eval
from model.config import cfg
class pascal_voc(imdb):
def __init__(self, image_set, year, use_diff=False):
name = 'voc_' + year + '_' + image_set
if use_diff:
name += '_diff'
imdb.__init__(self, name)
self._year = year
self._image_set = image_set
self._devkit_path = self._get_default_path()
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
self._classes = ('__background__', # always index 0
# 'aeroplane', 'bicycle', 'bird', 'boat',
# 'bottle', 'bus', 'car', 'cat', 'chair',
# 'cow', 'diningtable', 'dog', 'horse',
# 'motorbike', 'person', 'pottedplant',
# 'sheep', 'sofa', 'train', 'tvmonitor')
'rbc', 'wbc', 'platelets')
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.gt_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'use_diff': use_diff,
'matlab_eval': False,
'rpn_file': None}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
if index[-1] == 'g':
image_path = os.path.join(self._data_path, 'JPEGImages',
index)
else:
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
try:
roidb = pickle.load(fid)
except:
roidb = pickle.load(fid, encoding='bytes')
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = pickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
# if len(non_diff_objs) != len(objs):
# print 'Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs))
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
path = os.path.join(
self._devkit_path,
'results',
'VOC' + self._year,
'Main',
filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric, use_diff=self.config['use_diff'])
aps += [ap]
print(('AP for {} = {:.4f}'.format(cls, ap)))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['matlab_eval']:
self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
from datasets.pascal_voc import pascal_voc
d = pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed;
embed()
|
[
"csg19971016@gmail.com"
] |
csg19971016@gmail.com
|
e472c0bf287069bc6529421719d18beb9f0514e5
|
b3b31138caf891b80eb337c95ae24ba33f08d4d5
|
/instance/number_remember.py
|
044df6cb37917a78e9e1722336f3241eb3ea60f6
|
[] |
no_license
|
caiyunz/learning
|
3a2d39e3baf8d5d7d8f415ba4554b6d7d8007ccb
|
fe81ae166ca63caad71d236b2818e3b3939984c1
|
refs/heads/master
| 2021-01-19T23:24:33.643409
| 2018-09-13T09:00:44
| 2018-09-13T09:00:44
| 101,265,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
#!/usr/bin/python3
import json
def input_number():
number=input("Input the you liked number:")
number_file='numbers.txt'
with open(number_file,'w')as f_obj:
json.dump(number,f_obj)
def output_number():
number_file='numbers.txt'
try:
with open(number_file) as f_obj:
number=json.load(f_obj)
except FileNotFoundError:
input_number()
else:
print("I know your favorite numbers!It's "+number)
#input_number()
output_number()
|
[
"noreply@github.com"
] |
caiyunz.noreply@github.com
|
6795158aabbfec57bf84cb3efa1e1e4b62c14bd5
|
af2da79da5d7a1fa5597db7264df8dd99700720b
|
/sql_queries.py
|
b6d45d1034b14aff3b5541c25bbf09378263b308
|
[] |
no_license
|
jrwils/sparkifydb_redshift
|
1e8547d554a8f88fe8f145396749698f270dc1b6
|
4a8e93c5873713532ad44597db489a8c92049dab
|
refs/heads/master
| 2023-07-16T03:21:32.155361
| 2021-08-06T17:59:16
| 2021-08-06T17:59:16
| 393,460,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,518
|
py
|
import configparser
# CONFIG
config = configparser.ConfigParser()
config.read('dwh.cfg')
# DROP TABLES
staging_events_table_drop = "DROP TABLE IF EXISTS staging_events;"
staging_songs_table_drop = "DROP TABLE IF EXISTS staging_songs;"
songplay_table_drop = "DROP TABLE IF EXISTS songplays;"
user_table_drop = "DROP TABLE IF EXISTS users;"
song_table_drop = "DROP TABLE IF EXISTS songs;"
artist_table_drop = "DROP TABLE IF EXISTS artists;"
time_table_drop = "DROP TABLE IF EXISTS time;"
# CREATE TABLES
staging_events_table_create = (
"""
CREATE TABLE IF NOT EXISTS staging_events (
artist VARCHAR(128),
auth VARCHAR(16),
firstName VARCHAR(128),
gender VARCHAR(1),
itemInSession INTEGER,
lastName VARCHAR(128),
length NUMERIC(10, 5),
level VARCHAR(4),
location VARCHAR(256),
method VARCHAR(5),
page VARCHAR(16),
registration NUMERIC,
sessionId INTEGER,
song VARCHAR(256),
status INTEGER,
ts BIGINT,
userAgent TEXT,
userId INTEGER
);
"""
)
staging_songs_table_create = (
"""
CREATE TABLE IF NOT EXISTS staging_songs (
num_songs INTEGER,
artist_id VARCHAR(18),
artist_latitude NUMERIC(10, 5),
artist_longitude NUMERIC(10, 5),
artist_location VARCHAR(256),
artist_name VARCHAR(256),
song_id VARCHAR(18),
title VARCHAR(256),
duration NUMERIC(10, 5),
year INTEGER
);
"""
)
user_table_create = (
"""
CREATE TABLE IF NOT EXISTS users (
user_id INTEGER NOT NULL,
first_name VARCHAR(128),
last_name VARCHAR(128),
gender VARCHAR(1),
level VARCHAR(4),
PRIMARY KEY(user_id)
);
"""
)
artist_table_create = (
"""
CREATE TABLE IF NOT EXISTS artists (
artist_id VARCHAR(18) NOT NULL,
name VARCHAR(256),
location VARCHAR(256),
latitude NUMERIC(10, 5),
longitude NUMERIC(10, 5),
PRIMARY KEY(artist_id)
) DISTKEY(artist_id);
"""
)
song_table_create = (
"""
CREATE TABLE IF NOT EXISTS songs (
song_id VARCHAR(18) NOT NULL,
title VARCHAR(256) NOT NULL,
artist_id VARCHAR(18) NOT NULL,
year INT,
duration NUMERIC(10, 5),
PRIMARY KEY(song_id),
FOREIGN KEY(artist_id) REFERENCES artists(artist_id)
) DISTKEY(song_id);
"""
)
time_table_create = (
"""
CREATE TABLE IF NOT EXISTS time (
start_time TIMESTAMP NOT NULL UNIQUE,
hour INTEGER NOT NULL,
day INTEGER NOT NULL,
week INTEGER NOT NULL,
month INTEGER NOT NULL,
year INTEGER NOT NULL,
weekday INTEGER NOT NULL,
PRIMARY KEY(start_time)
);
"""
)
songplay_table_create = (
"""
CREATE TABLE IF NOT EXISTS songplays (
songplay_id INTEGER IDENTITY(0, 1) NOT NULL,
start_time TIMESTAMP NOT NULL,
user_id INTEGER NOT NULL,
level VARCHAR(4),
song_id VARCHAR(18) NOT NULL,
artist_id VARCHAR(18) NOT NULL,
session_id INTEGER NOT NULL,
location VARCHAR(256),
user_agent TEXT,
PRIMARY KEY(songplay_id),
FOREIGN KEY(user_id) REFERENCES users(user_id),
FOREIGN KEY(song_id) REFERENCES songs(song_id),
FOREIGN KEY(artist_id) REFERENCES artists(artist_id),
FOREIGN KEY(start_time) REFERENCES time(start_time)
) DISTKEY(song_id) SORTKEY(start_time);
"""
)
# STAGING TABLES
staging_events_copy = (
"""
COPY staging_events FROM '{}'
iam_role '{}'
json 'auto ignorecase';
""").format(
config.get('S3', 'LOG_DATA'),
config.get('IAM_ROLE', 'ROLE_ARN')
)
staging_songs_copy = (
"""
COPY staging_songs FROM '{}'
iam_role '{}'
json 'auto ignorecase';
""").format(
config.get('S3', 'SONG_DATA'),
config.get('IAM_ROLE', 'ROLE_ARN')
)
# FINAL TABLES
user_table_insert = (
"""
INSERT INTO users (
SELECT se.userId, firstName, lastName, gender, uts_levels.level
FROM staging_events se
-- joined subqueries are here
-- in order to get the most recent 'level' value
INNER JOIN (
SELECT userId, max(ts) as mts from staging_events
group by userId
) max_ts on (max_ts.userId = se.userId)
INNER JOIN (
SELECT userId, ts, level from staging_events
) uts_levels on (
max_ts.userId = uts_levels.userId
and max_ts.mts = uts_levels.ts
)
GROUP BY se.userId, firstName, lastName, gender, uts_levels.level
ORDER by se.userId
);
"""
)
artist_table_insert = (
"""
INSERT into artists (
SELECT
artist_id,
artist_name,
artist_location,
artist_latitude,
artist_longitude
FROM staging_songs
GROUP BY
artist_id,
artist_name,
artist_location,
artist_latitude,
artist_longitude
);
"""
)
song_table_insert = (
"""
INSERT into songs (
SELECT song_id, title, artist_id, year, duration
FROM staging_songs
GROUP BY song_id, title, artist_id, year, duration
);
"""
)
time_table_insert = (
"""
INSERT into time (
SELECT (timestamp 'epoch' + ts * interval '.001 seconds') as start_time,
EXTRACT(
HOUR from (timestamp 'epoch' + ts * interval '.001 seconds')
) as hour,
EXTRACT(
DAY from (timestamp 'epoch' + ts * interval '.001 seconds')
) as day,
EXTRACT(
WEEK from (timestamp 'epoch' + ts * interval '.001 seconds')
) as week,
EXTRACT(
MONTH from (timestamp 'epoch' + ts * interval '.001 seconds')
) as month,
EXTRACT(
YEAR from (timestamp 'epoch' + ts * interval '.001 seconds')
) as year,
EXTRACT(
DOW from (timestamp 'epoch' + ts * interval '.001 seconds')
) as weekday
FROM staging_events
GROUP BY start_time, hour, day, week, month, year, weekday
);
"""
)
songplay_table_insert = (
"""
INSERT into songplays (
start_time,
user_id,
level,
song_id,
artist_id,
session_id,
location,
user_agent
) (
SELECT (timestamp 'epoch' + ts * interval '.001 seconds') as start_time,
userId,
level,
sng.song_id,
art.artist_id,
sessionId,
se.location,
userAgent
FROM staging_events se
INNER JOIN artists art on (se.artist = art.name)
INNER JOIN songs sng on (art.artist_id = sng.artist_id)
WHERE se.page = 'NextSong'
);
"""
)
# QUERY LISTS
create_table_queries = [
staging_events_table_create,
staging_songs_table_create,
user_table_create,
artist_table_create,
song_table_create,
time_table_create,
songplay_table_create
]
drop_table_queries = [
staging_events_table_drop,
staging_songs_table_drop,
songplay_table_drop,
time_table_drop,
song_table_drop,
artist_table_drop,
user_table_drop
]
copy_table_queries = [
staging_events_copy,
staging_songs_copy
]
insert_table_queries = [
user_table_insert,
artist_table_insert,
song_table_insert,
time_table_insert,
songplay_table_insert,
]
|
[
"joe.wlsn@gmail.com"
] |
joe.wlsn@gmail.com
|
1063535556d6dffc047f65f250e9d2745bdbd1a1
|
53a9a98aef2fabd21f1577911c147f417a4e2282
|
/CS/Profile/views.py
|
c4c9c72f01596a84869160b78542f1acfce44238
|
[] |
no_license
|
edwin141999/Django
|
3037789052700ef36d093a0e87aa8fe0b946bb8c
|
dd8e50c53850965dc1b4a7b9efa76b26ab5f2c02
|
refs/heads/master
| 2021-09-25T09:37:48.855734
| 2020-02-14T04:02:47
| 2020-02-14T04:02:47
| 237,686,224
| 0
| 0
| null | 2021-09-22T18:32:26
| 2020-02-01T22:23:04
|
Python
|
UTF-8
|
Python
| false
| false
| 5,288
|
py
|
#from django.shortcuts import render
# Create your views here.
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.authtoken.models import Token
from django.shortcuts import get_object_or_404
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import generics
from Profile.models import Profile
from Profile.models import modelOcupacion
from Profile.models import modelCiudad
from Profile.models import modelEstado
from Profile.models import modelEstado_Civil
from Profile.models import modelGenero
from Profile.serializer import ProfileSerializers
from Profile.serializer import CiudadSerializers
from Profile.serializer import Estado_CivilSerializers
from Profile.serializer import EstadoSerializers
from Profile.serializer import GeneroSerializers
from Profile.serializer import OcupacionSerializers
import coreapi
from rest_framework.schemas import AutoSchema
class ProfileLisViewSchema(AutoSchema):
def get_manual_fields(self,path,method):
extra_fields = []
if method.lower() in ('post','get'):
extra_fields = [
coreapi.Field('nombre')
]
manual_fields =super().get_manual_fields(path,method)
return manual_fields + extra_fields
class ProfileList(APIView):
permission_classes = []
schema = ProfileLisViewSchema()
def get(self,request,format=None):
print("Metodo get filter")
queryset = Profile.objects.filter(delete = False)
serializer = ProfileSerializers(queryset,many=True)
return Response(serializer.data)
def post(self,request,format=None):
serializer = ProfileSerializers(data = request.data)
if serializer.is_valid():
serializer.save()
datas =serializer.data
return Response(datas)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
class ProfileOcupacion(APIView):
permission_classes = []
schema = ProfileLisViewSchema()
def get(self,request,format=None):
print("Metodo get filter")
queryset = modelOcupacion.objects.filter(delete = False)
serializer = OcupacionSerializers(queryset,many=True)
return Response(serializer.data)
def post(self,request,format=None):
serializer = OcupacionSerializers(data = request.data)
if serializer.is_valid():
serializer.save()
datas =serializer.data
return Response(datas)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
class ProfileCiudad(APIView):
permission_classes = []
schema = ProfileLisViewSchema()
def get(self,request,format=None):
print("Metodo get filter")
queryset = modelCiudad.objects.filter(delete = False)
serializer = CiudadSerializers(queryset,many=True)
return Response(serializer.data)
def post(self,request,format=None):
serializer = CiudadSerializers(data = request.data)
if serializer.is_valid():
serializer.save()
datas =serializer.data
return Response(datas)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
class ProfileEstado(APIView):
permission_classes = []
schema = ProfileLisViewSchema()
def get(self,request,format=None):
print("Metodo get filter")
queryset = modelEstado.objects.filter(delete = False)
serializer = EstadoSerializers(queryset,many=True)
return Response(serializer.data)
def post(self,request,format=None):
serializer = EstadoSerializers(data = request.data)
if serializer.is_valid():
serializer.save()
datas =serializer.data
return Response(datas)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
class ProfileEstado_Civil(APIView):
permission_classes = []
schema = ProfileLisViewSchema()
def get(self,request,format=None):
print("Metodo get filter")
queryset = modelEstado_Civil.objects.filter(delete = False)
serializer = Estado_CivilSerializers(queryset,many=True)
return Response(serializer.data)
def post(self,request,format=None):
serializer = Estado_CivilSerializers(data = request.data)
if serializer.is_valid():
serializer.save()
datas =serializer.data
return Response(datas)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
class ProfileGenero(APIView):
permission_classes = []
schema = ProfileLisViewSchema()
def get(self,request,format=None):
print("Metodo get filter")
queryset = modelGenero.objects.filter(delete = False)
serializer = GeneroSerializers(queryset,many=True)
return Response(serializer.data)
def post(self,request,format=None):
serializer = GeneroSerializers(data = request.data)
if serializer.is_valid():
serializer.save()
datas =serializer.data
return Response(datas)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
|
[
"183433@ids.upchiapas.edu.mx"
] |
183433@ids.upchiapas.edu.mx
|
dd6eb7278fb8de6248cdbc2cf3ad00132a3e9d08
|
886453bda7757f75b45c6c3b9d93e1a684bf227f
|
/DjangoXAdminDRF/settings.py
|
f680fe01dac534389dfe9ea094571d7df0d01fe4
|
[] |
no_license
|
Lockeysama/DjangoXAdminDRF
|
e445d013d432cc472b5e6bb73433b945355e479b
|
c056909a7473f1c585db831c3cab0eb6cc8209e7
|
refs/heads/master
| 2022-12-17T17:17:14.235361
| 2020-09-14T01:18:51
| 2020-09-14T01:18:51
| 294,709,811
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,049
|
py
|
"""
Django settings for DjangoXAdminDRF project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import datetime
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from django.conf.locale.zh_Hans import formats
version = '1.1.0'
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, BASE_DIR)
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
sys.path.insert(0, os.path.join(BASE_DIR, 'extra_apps'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-yp=mz-cttem-ap*d2x=pi2!8n$r4yqkfv+v2qf_c*-)!-*fdm'
if SECRET_KEY == '-yp=mz-cttem-ap*d2x=pi2!8n$r4yqkfv+v2qf_c*-)!-*fdm':
# SECRET_KEY 必须妥善保存,如果写在在代码中,一旦暴露在网上,有很大风险;可以使用环境变量等方式
raise Exception('SECURITY WARNING: keep the secret key used in production secret! ')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DXDRF_DEBUG', False) == 'true'
DEBUG = True
DATETIME_FORMAT = 'y-m-d HH:MM:SS'
if not os.path.exists('./logs'):
os.makedirs('./logs')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S",
},
'simple': {'format': '%(levelname)s %(message)s'},
},
'handlers': {
'file': {
'level': 'DEBUG' if DEBUG else 'INFO',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': './logs/dxdrf.log',
'when': 'D',
'interval': 1,
'backupCount': 7,
'formatter': 'verbose',
},
'console': {'class': 'logging.StreamHandler', 'formatter': 'verbose'},
},
'loggers': {
'': {'handlers': ['file', 'console'], 'level': 'DEBUG' if DEBUG else 'INFO'},
'django': {
'handlers': ['console', 'file'],
'level': 'DEBUG' if DEBUG else 'INFO',
'propagate': False,
},
},
}
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'xadmin',
'crispy_forms',
'rest_framework',
'apps.users',
'apps.test_app',
]
LOGIN_URL = '/xadmin/login/'
LOGOUT_URL = '/xadmin/logout/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
AUTH_USER_MODEL = 'users.Account'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DjangoXAdminDRF.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
},
}
]
WSGI_APPLICATION = 'DjangoXAdminDRF.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get('DXDRF_MYSQL_NAME', 'dxdrf'),
'USER': os.environ.get('DXDRF_MYSQL_USER', 'root'),
'PASSWORD': os.environ.get('DXDRF_MYSQL_PASSWD', 'secret'),
'HOST': os.environ.get('DXDRF_MYSQL_HOST', '127.0.0.1'),
'PORT': os.environ.get('DXDRF_MYSQL_PORT', 3306),
'OPTIONS': {'init_command': 'SET default_storage_engine=INNODB;'},
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'
},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator'},
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema',
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',
'DEFAULT_VERSION': 'v1',
'ALLOWED_VERSIONS': ['v1'],
'VERSION_PARAM': 'version',
}
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
JWT_AUTH = {'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=86400)}
formats.DATETIME_FORMAT = 'Y-n-j H:i:s'
|
[
"196349143@qq.com"
] |
196349143@qq.com
|
5563ce2c97673ab3096d4abfb84a18b5d593e3e8
|
7463a66dfa00572f4e4d8ef4349309531f0105ae
|
/TrainerDL/Projects/FLL_flaw_detection/segmentation/Unet/eval.py
|
f1fed7ca781068c51b7d895a0f9ffd33b008e59d
|
[] |
no_license
|
fx19940824/DetectionModel
|
f2e380fd21f4b31a17fd175a6dea1067b8f0d5cc
|
edc0d2f9eea481d2bc6f3abb2f222b59fdc25538
|
refs/heads/master
| 2022-12-20T19:58:32.224829
| 2019-05-30T01:16:05
| 2019-05-30T01:16:05
| 188,800,679
| 2
| 0
| null | 2022-11-22T02:39:23
| 2019-05-27T08:13:38
|
Python
|
UTF-8
|
Python
| false
| false
| 609
|
py
|
import torch
import torch.nn.functional as F
from Unet.dice_loss import dice_coeff
def eval_net(net, dataset):
"""Evaluation without the densecrf with the dice coefficient"""
net.eval()
tot = 0
for i, b in enumerate(dataset):
img = b[0]
true_mask = b[1]
img = torch.from_numpy(img).unsqueeze(0)
true_mask = torch.from_numpy(true_mask)
img = img.cuda()
true_mask = true_mask.cuda()
mask_pred = net(img)[0]
mask_pred = (mask_pred > 0.5).float()
tot += dice_coeff(mask_pred, true_mask).item()
return tot / (i + 1)
|
[
"you@example.com"
] |
you@example.com
|
f3c2d4de3b88eb36470e5a5068ab46e67554748c
|
62d22863789f737c03851c184ebfc9e987f9dced
|
/src/main/python/handlers/gauth.py
|
bc306d2dce88b7e6a645e7470df63e1cd14ff876
|
[] |
no_license
|
mattrjacobs/Prdict
|
4c160d31c63b5573fe502e3a4a1ffe872cfe2f41
|
bf4fd3fbad30dbddcbf09f6a38b12dd2284cdaf0
|
refs/heads/master
| 2016-09-05T14:27:26.367260
| 2012-03-07T12:21:06
| 2012-03-07T12:21:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,631
|
py
|
#Taken from : http://code.activestate.com/recipes/577217-routines-for-programmatically-authenticating-with-/
"""
Routines for programmatically authenticating with the Google Accounts system at
Google App-Engine.
This takes two calls, one to the ClientLogin service of Google Accounts,
and then a second to the login frontend of App Engine.
User credentials are provided to the first, which responds with a token.
Passing that token to the _ah/login GAE endpoint then gives the cookie that can
be used to make further authenticated requests.
Give the ACSID cookie to the client so it stays logged in with the GAE integrated users
system.
One last issue, after succesful authentication the current user's ID is still
missing; User(email).user_id() won't work. Here I think a HTTP redirect
should make the client re-request (using the cookie) and login, but the client
would need to support that. Alternatively the ID can be fetched within the
current request by a r/w round trip to the datastore, see:
http://stackoverflow.com/questions/816372/how-can-i-determine-a-user-id-based-on-an-email-address-in-app-engine
See also: http://markmail.org/thread/tgth5vmdqjacaxbx
"""
import logging, md5, urllib, urllib2
def do_auth(appname, hostname, user, password, dev=False, admin=False):
"This is taken from bits of appcfg, specifically: "
" google/appengine/tools/appengine_rpc.py "
"It returns the cookie send by the App Engine Login "
"front-end after authenticating with Google Accounts. "
if dev:
return do_auth_dev_appserver(user, admin)
# get the token
try:
auth_token = get_google_authtoken(appname, user, password)
except AuthError, e:
if e.reason == "BadAuthentication":
logging.error( "Invalid username or password." )
if e.reason == "CaptchaRequired":
logging.error(
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
if e.reason == "NotVerified":
logging.error( "Account not verified.")
if e.reason == "TermsNotAgreed":
logging.error( "User has not agreed to TOS.")
if e.reason == "AccountDeleted":
logging.error( "The user account has been deleted.")
if e.reason == "AccountDisabled":
logging.error( "The user account has been disabled.")
if e.reason == "ServiceDisabled":
logging.error( "The user's access to the service has been "
"disabled.")
if e.reason == "ServiceUnavailable":
logging.error( "The service is not available; try again later.")
raise
# now get the cookie
cookie = get_gae_cookie(appname, hostname, auth_token)
assert cookie
return cookie
def do_auth_dev_appserver(email, admin):
"""Creates cookie payload data.
Args:
email, admin: Parameters to incorporate into the cookie.
Returns:
String containing the cookie payload.
"""
admin_string = 'False'
if admin:
admin_string = 'True'
if email:
user_id_digest = md5.new(email.lower()).digest()
user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20]
else:
user_id = ''
return 'dev_appserver_login="%s:%s:%s"; Path=/;' % (email, admin_string, user_id)
def get_gae_cookie(appname, hostname, auth_token):
"""
Send a token to the App Engine login, again stating the name of the
application to gain authentication for. Returned is a cookie that may be used
to authenticate HTTP traffic to the application at App Engine.
"""
continue_location = "http://%s/" % hostname
args = {"continue": continue_location, "auth": auth_token}
url = "http://%s/_ah/login?%s" % (hostname,
urllib.urlencode(args))
opener = get_opener() # no redirect handler!
req = urllib2.Request(url)
try:
response = opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code,
response.msg, response.headers, response.fp)
cookie = response.headers.get('set-cookie')
assert cookie and cookie.startswith('ACSID')
return cookie.replace('; HttpOnly', '')
def get_google_authtoken(appname, email_address, password):
"""
Make secure connection to Google Accounts and retrieve an authorisation
token for the stated appname.
The token can be send to the login front-end at appengine using
get_gae_cookie(), which will return a cookie to use for the user session.
"""
opener = get_opener()
# get an AuthToken from Google accounts
auth_uri = 'https://www.google.com/accounts/ClientLogin'
authreq_data = urllib.urlencode({ "Email": email_address,
"Passwd": password,
"service": "ah",
"source": appname,
"accountType": "HOSTED_OR_GOOGLE" })
req = urllib2.Request(auth_uri, data=authreq_data)
try:
response = opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise AuthError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
class AuthError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
def get_opener(cookiejar=None):
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
opener.add_handler(urllib2.HTTPSHandler())
if cookiejar:
opener.add_handler(urllib2.HTTPCookieProcessor(cookiejar))
return opener
|
[
"mattrjacobs@gmail.com"
] |
mattrjacobs@gmail.com
|
d942280782cce1058e4b42c396d37dd3f131ff78
|
96f259173a1857ab3164b80fc791c7057be8c8ee
|
/RFiles/adTrainer.py
|
ea8d2572f11a8e240039daeb7d902450a518e6f2
|
[] |
no_license
|
BilalZQ/Face-Recognition-Snippet
|
a720c2957a9ee8b4c0e6badf1d1cf8f1c9b43cd5
|
fa111407ca5f8fdd99be6eb0cce35ffb1221f19c
|
refs/heads/master
| 2020-03-23T21:41:23.903575
| 2018-07-24T08:04:18
| 2018-07-24T08:04:18
| 142,121,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,077
|
py
|
import os
import numpy as np
import cv2
from PIL import Image
# For face recognition we will the the LBPH Face Recognizer
recognizer = cv2.createLBPHFaceRecognizer();
path="C:\\Users\\Bilal\\Desktop\\face recognition\\dataSet"
def getImagesWithID(path):
imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
# print image_path
#getImagesWithID(path)
faces = []
IDs = []
for imagePath in imagePaths:
# Read the image and convert to grayscale
facesImg = Image.open(imagePath).convert('L')
faceNP = np.array(facesImg, 'uint8')
# Get the label of the image
ID= int(os.path.split(imagePath)[1].split(".")[1])
print ID
# Detect the face in the image
faces.append(faceNP)
IDs.append(ID)
cv2.imshow("Adding faces for traning",faceNP)
cv2.waitKey(10)
return np.array(IDs), faces
Ids,faces = getImagesWithID(path)
recognizer.train(faces,Ids)
recognizer.save("C:\\Users\\Bilal\\Desktop\\face recognition\\recognizer\\trainingData.yml")
cv2.destroyAllWindows()
|
[
"bilalqb095@gmail.com"
] |
bilalqb095@gmail.com
|
d3f7862c32fbab6e246ad1a1bb504ec88a13de6e
|
c2447fa5b69cccfab8ba302fe385e87bae8e8473
|
/examples/advanced-user-guide/settings_and_env_variables.py
|
29a613e2ead31c21a0596ed2599e54771dd3de9f
|
[] |
no_license
|
srujanprophet/Learn-FastAPI
|
173f3fba2aeff0dc3b2fb232f2c510121a9cbaa0
|
f54fecbba05af8e4bd3da5a4a293eea730038143
|
refs/heads/main
| 2023-07-15T12:39:05.356215
| 2021-08-15T15:53:44
| 2021-08-15T15:53:44
| 396,410,094
| 1
| 0
| null | 2021-08-15T16:35:35
| 2021-08-15T16:12:07
| null |
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
"""
Import `BaseSettings` from Pydantic and create a sub-class, very much like with a Pydantic model.
The same way as with Pydantic models, we declare class atteributes with type annotations, and possibly default values.
We can use all the same validation features and tools we use for Pydantic models, like different data types and additional validations with `Field()`.
Then , when we create an instance of that `Settings` class (in this case, in the `settings` object), Pydantic will read the environment variables in a case-insensitive way, so, an upper-case variable `APP_NAME` will still be read for the attribute `app_name`.
Next it will convert and validate the data. So, when we use that `settings` object, we will have data of the types we declared (e.g. `items_per_user` will be an `int`)
Next, we would run the server passing the configurations as environment variables, for example we could set an `ADMIN_EMAIL` and `APP_NAME`.
"""
from fastapi import FastAPI
from pydantic import BaseSettings
class Settings(BaseSettings):
app_name: str = "Awesome API"
admin_email: str
items_per_user: int = 50
settings = Settings()
app = FastAPI()
@app.get("/info")
async def info():
"""Using the new `settings` object in our application
"""
return {
"app_name": settings.app_name,
"admin_email": settings.admin_email,
"items_per_user": settings.items_per_user
}
|
[
"srujanprophet@gmail.com"
] |
srujanprophet@gmail.com
|
c79bfe905b142bd559c466fa6407bd3e215e80d8
|
be0a3aa7b83b87c5d2c257b538545bdded39c051
|
/Chatbot_KG/common/_404_view.py
|
134402933a7afae8bf704a70387c41bf31ad6ae2
|
[
"Apache-2.0"
] |
permissive
|
water123li/Chatbot_CN
|
480e3bc6d6c0d8b6b0823452556acef14df1c2c3
|
e63808030c6cc516020075cdcd0c332120a998fc
|
refs/heads/master
| 2022-01-25T10:34:34.726243
| 2019-06-13T10:44:44
| 2019-06-13T10:44:44
| 192,504,292
| 1
| 0
|
Apache-2.0
| 2019-06-18T09:01:55
| 2019-06-18T09:01:55
| null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
# -*- coding: utf-8 -*-
#from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators import csrf
def _404_(request): # 404页面
context = {}
return render(request, '404.html', context)
|
[
"charlesxu86@163.com"
] |
charlesxu86@163.com
|
3bccafa57028ebc40f4d76552548b4b2d24e0dba
|
590ad70ace0734cb73356aafb0098035144840ac
|
/Book/myproject/myapp/migrations/0001_initial.py
|
1215e30bfc4a151005566d46d283d4e43805de03
|
[] |
no_license
|
sujithasrajan/Book-Application
|
79cdcfe98b9e027c902145544bec817502e92edb
|
7587ddb24dea158816677e05f2d7aa560c332879
|
refs/heads/master
| 2021-02-18T21:15:33.576025
| 2020-03-05T18:27:58
| 2020-03-05T18:27:58
| 245,238,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
# Generated by Django 2.1.3 on 2018-11-28 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.CharField(max_length=200, unique=True, verbose_name='URL: ')),
('name', models.CharField(max_length=200, verbose_name='Name: ')),
('isbn', models.CharField(max_length=500, verbose_name='ISBN: ')),
('publisher_year', models.IntegerField(verbose_name='Year of publication: ')),
('publisher_name', models.CharField(max_length=300, verbose_name='Publisher: ')),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"sujitha@terpmail.umd.edu"
] |
sujitha@terpmail.umd.edu
|
d0eb1adf8565bdef7765fe8687ae73189ee23590
|
58ac6b21074198f02c47cc0984545915ac05e023
|
/venv/bin/django-admin.py
|
b79aafade74c053bd2944340a96d399bd32c9891
|
[] |
no_license
|
ProxyServices/ecommerce
|
a386d6dab23b3055928a53914a0d1180b60ce75b
|
313f8cc279387ebe64c4e4d72d7a66497b494a96
|
refs/heads/master
| 2022-12-13T01:21:19.956469
| 2019-08-02T05:12:18
| 2019-08-02T05:12:18
| 200,083,712
| 0
| 0
| null | 2022-05-25T02:13:44
| 2019-08-01T16:21:42
|
Python
|
UTF-8
|
Python
| false
| false
| 157
|
py
|
#!/home/dnyt/Desktop/django/merce/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"enockomondike@gmail.com"
] |
enockomondike@gmail.com
|
f25bf38bb511ac729dcceab59bbe2ed92792e28a
|
b0392d704af841622b8cc78788d8d852044d8afd
|
/2019_3_Cooper_Type/RoboFont/draw_with_pen.py
|
f93fa63157d379aae69edcca19c69397e7a6a70b
|
[
"MIT"
] |
permissive
|
benkiel/python_workshops
|
0ad7d52123f71a1c4e44bb8d9bf1eefa863a9eb5
|
9483c1fd5f7dd87e595289efb7376e1b81ff5ede
|
refs/heads/master
| 2021-04-15T07:24:11.265604
| 2020-11-14T20:57:56
| 2020-11-14T20:57:56
| 126,245,708
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
font = CurrentFont()
glyph = font.newGlyph("a")
glyph.width = 500
pen = glyph.getPen()
pen.moveTo((100, 100))
pen.lineTo((800, 100))
pen.curveTo((1000, 300), (1000, 600), (800, 800))
pen.lineTo((100, 800))
pen.lineTo((100, 100))
pen.closePath()
glyph.update()
|
[
"ben@typefounding.com"
] |
ben@typefounding.com
|
099d4caac895f0192fef64dafe1128023b434a40
|
6cbb3824c59f9b3ea9a55682b086744925895f47
|
/tests/differ.py
|
5e53854294d6ee21bd6f8fb78c264a1459fec09a
|
[
"BSD-2-Clause"
] |
permissive
|
rbock/kiss-templates
|
e984317ad305fa828291beb1f9e7bab6ef6bb111
|
20fd9df05331d5dc8fa79c4145b165f4286c863f
|
refs/heads/develop
| 2020-03-26T16:16:30.638095
| 2017-03-16T05:56:03
| 2017-03-16T05:56:03
| 40,734,928
| 46
| 9
| null | 2017-03-16T05:49:24
| 2015-08-14T20:34:23
|
C++
|
UTF-8
|
Python
| false
| false
| 1,142
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import sys
def normalize(content):
# Ignore newline differences (e.g. on Windows, std::endl is "\r\n") and any extra whitespace
# at beginning/end of content
return content.replace('\r\n', '\n').strip()
if __name__ == '__main__':
assert len(sys.argv) == 4
test_name = sys.argv[1]
a_file_path = sys.argv[2]
b_file_path = sys.argv[3]
assert a_file_path != b_file_path
with open(a_file_path, 'rb') as a:
with open(b_file_path, 'rb') as b:
a_content = normalize(a.read())
b_content = normalize(b.read())
if a_content == b_content:
exit(0)
import difflib
diff = difflib.ndiff(a_content.splitlines(), b_content.splitlines(),
charjunk=lambda c: False)
diff = list(diff)
if diff:
print('Output headers of test %s differ:' % test_name, file=sys.stderr)
for line in diff:
print(line, file=sys.stderr)
exit(1)
|
[
"andreas.sommer@ppro.com"
] |
andreas.sommer@ppro.com
|
4897cc5f53a6f7791247a903bedaec8d90b7b3fb
|
d9918f70c67cfcb12716515110b9a47cf11495c3
|
/courses/urls.py
|
2dd69a82b0185ea3c8949ad835f16bb0f74abbc8
|
[] |
no_license
|
richieyrich/edu_site
|
31e68fc5c66509602cf37d26b113e34a61bc786b
|
847548cfcc2a798bee7ddccdda3863d4eb45bbb0
|
refs/heads/main
| 2023-06-22T20:51:40.841282
| 2021-07-23T09:29:29
| 2021-07-23T09:29:29
| 388,749,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 708
|
py
|
from django.urls import path
from .views import Home, CourseView, Signup, Login, signout, CheckoutView, PaymentSuccess
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', Home.as_view() , name='home' ),
path('login', Login.as_view() , name='login' ),
path('signup', Signup.as_view() , name='signup' ),
path('logout', signout , name='logout' ),
path('course/<str:slug>', CourseView.as_view() , name='course_page' ),
path('checkout/<str:slug>', CheckoutView.as_view() , name='checkout_page' ),
path('payment_success', PaymentSuccess.as_view() , name='payment' ),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"richram79@gmail.com"
] |
richram79@gmail.com
|
3afe83be9197a3e0e23a22dbb67e9ef0d9f3cd12
|
79797e982b04c443eba25b563837d45479fc5188
|
/outline/outline/spiders/events_spider.py
|
0acfbe7642bbe8f57bd11541507e8b884cb2e89f
|
[] |
no_license
|
JavaDevVictoria/OutlineWebsiteScraper
|
7e80f704c9aaa2c321c7ddc8def31c6096e0be57
|
6df30a62bcd4d8fec77682b61e3fc5ff0ae53c19
|
refs/heads/master
| 2021-01-20T13:58:49.471500
| 2017-02-21T20:50:53
| 2017-02-21T20:50:53
| 82,725,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
import scrapy
class EventsSpider(scrapy.Spider):
name = "events"
def start_requests(self):
start_urls = [
'http://www.outlineonline.co.uk/events',
]
def parse(self, response):
page = response.url.split("/")[-2]
filename = 'events-%s.html' % page
with open(filename, 'wb') as f:
f.write(response.body)
|
[
"noreply@github.com"
] |
JavaDevVictoria.noreply@github.com
|
a832406ce534f7a5f5814b9ffbe6137f646767cd
|
091f397adba3eeb36557ff84f6b0845e2952ba48
|
/roles/.venv/bin/pre-commit-validate-manifest
|
e472204bced643efd07bd0e262c664adbb563eb5
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
Alex-Mbulankende/testingshit
|
0d6d07d0754704abb8a76cb6fe5e620ff96b47d5
|
6cbe5c8da2c030d1e3c989b86adf7b740ac65418
|
refs/heads/master
| 2022-03-12T04:11:00.763799
| 2019-12-01T05:31:19
| 2019-12-01T05:31:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
#!/Users/tjblogumas/blogumas/development/testingshit/roles/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pre_commit.clientlib import validate_manifest_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(validate_manifest_main())
|
[
"tj.blogumas@codeblogkc.com"
] |
tj.blogumas@codeblogkc.com
|
|
922b3e60b603750b8da23e2765690a8d370e140e
|
e912af291e1457c61606642f1c7700e678c77a27
|
/python/345_reverse_vowels_of_a_string.py
|
4c20425cacb9377f87bd830ce08937932298232f
|
[] |
no_license
|
MakrisHuang/LeetCode
|
325be680f8f67b0f34527914c6bd0a5a9e62e9c9
|
7609fbd164e3dbedc11308fdc24b57b5097ade81
|
refs/heads/master
| 2022-08-13T12:13:35.003830
| 2022-07-31T23:03:03
| 2022-07-31T23:03:03
| 128,767,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
class Solution:
def reverseVowels(self, s: str) -> str:
vowels = set(["a", "e", "i", "o", "u", "A", "E", "I", "O", "U"])
left, right = 0, len(s) - 1
arr = list(s)
while left < right:
while left < right and arr[left] not in vowels: left += 1
while left < right and arr[right] not in vowels: right -= 1
if left < right:
arr[left], arr[right] = arr[right], arr[left]
left += 1
right -= 1
return "".join(arr)
|
[
"tai-chia.huang@sv.cmu.edu"
] |
tai-chia.huang@sv.cmu.edu
|
f7f90a42c57e1322931e26f9cd9aae5ab71f6070
|
9c6e055fc8c15d5a2e1dbec40cf2ab6534b49793
|
/nourisher/collects/collector.py
|
e0479a36dadb162e8b569030d632aa9469aff3b5
|
[] |
no_license
|
hnykda/nourisher
|
0d70bac6a19ed95656e7ef51a4bb18e8b4aae9d2
|
6049d5b47c9004b3badfee89bcf7c259c84c4442
|
refs/heads/master
| 2021-01-20T16:12:04.940861
| 2015-07-27T16:42:04
| 2015-07-27T16:42:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,712
|
py
|
import logging
log = logging.getLogger(__name__)
from .feeder import feed_that_all
import time
from utiliser import scraper_prep, get_webdriver
class Collector():
""" Wrapper for collecting
"""
def __init__(self, wdriver_name, maternal_scrapers = ["urlm", "websiteout", "ranks", "alexa"]):
self.wdriver_name = wdriver_name
#self.driver = get_webdriver(wdriver_name)
self.load_scrappers(maternal_scrapers)
def load_scrappers(self, scrapers_names):
for name in scrapers_names:
setattr(self, name, scraper_prep(name, self.wdriver_name))
def collect_maternal(self, finUrls, origUrl):
total = {}
# alexa must be first, because she returns the
# true address
log.debug("Nechavama alexu uhadnout adresu.")
if finUrls != []:
article_url = finUrls[0] # url of first article
else:
article_url = origUrl # if no articles present, try the original one
self.alexa.get_maternal(article_url)
maternal_url = self.alexa.guessed_maternal_url
try:
log.debug("Alexa sbira data.")
self.alexa.collect_that_all()
total.update({"alexa": self.alexa.scrapedData})
except RuntimeError:
log.debug("Alexa nic nema.")
total.update({"alexa" : None})
rest = {"urlm" : self.urlm,
"websiteout": self.websiteout,
"ranks" : self.ranks}
for sname, scrpr in rest.items():
log.debug(sname + " sbira data.")
try:
scrpr.get_maternal(maternal_url)
scrpr.collect_that_all()
total.update({sname: scrpr.scrapedData})
log.debug("Succeded.")
#sleep(ST)
except RuntimeError:
log.debug("Scrapper neuspel")
total.update({sname: None})
scrpr.driver.quit()
return total, maternal_url
def collect_for_orig(self, orig_url):
startTime = time.time()
total = {}
feedInfo, finUrls = feed_that_all(orig_url)
maternalInfo, maternal_url = self.collect_maternal(finUrls, orig_url)
total.update({"feedInfo": feedInfo})
total.update(maternalInfo)
total.update({"origURL": orig_url})
total.update({"maternalURL": maternal_url})
total.update({"datetime_of_collection" : time.strftime("%Y-%m-%d %H:%M:%S")})
log.info("Collecting data took: {0}".format(time.time() - startTime) + " seconds")
return total
def restart_driver(self):
self.driver.quit()
self.driver = get_webdriver(self.wdriver_name)
|
[
"kotrfa@gmail.com"
] |
kotrfa@gmail.com
|
64e92c8157fc4b2dcc35b653dbef2a372a6f61cf
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_016/ch45_2020_04_12_05_06_07_036297.py
|
ec7433dee7f927c217ddb1c6613998b97ea50d59
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
lista1 = []
lista2 = []
exercicio = True
while exercicio:
a = int(input('Qual o número? '))
if a > 0:
lista1.append(a)
else:
break
i = 1
exercicio2 = True
while exercicio2:
lista2.append(lista1[len(lista1) - i]
if len(lista1) == len (lista2):
break
print(lista2)
|
[
"you@example.com"
] |
you@example.com
|
7907cf31a5c324d467a40e8790b57d1d15d3eb7d
|
f6abf7c6d32549d09af60546be50b2438ba3c1f6
|
/finalproject1.0/pyscript/runtest.py
|
696798d84bf0b6f84cfe541c0e687e91b4d1586c
|
[] |
no_license
|
Guo-Dong-Zhang/junitserver
|
f041c6376aa62754b947d19eecfcaa0ff0645587
|
1e9bb51f3d2e7a5ecee8da1e106d6816e1ce310d
|
refs/heads/master
| 2022-11-13T11:20:28.595011
| 2020-07-04T02:47:21
| 2020-07-04T02:47:21
| 270,307,601
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,824
|
py
|
import os
import re
import listdir
javadic = listdir.javadic
junitdic = listdir.junitdic
projectpath = listdir.projectpath
junitPrefix = "java org.junit.runner.JUnitCore";
filePrefix = "com.junit."
for k,v in junitdic.items():
allfiles = ""
v = [filePrefix+re.sub(".java","",x) for x in v]
for filename in v:
allfiles = allfiles + " "+filename
junitdic[k]=allfiles
def run():
os.system("rm -rf "+projectpath+"/testcontainer/com/*") #reset testconainer/com folder
for k,v in junitdic.items():
address1 = projectpath+"/compileresult/"+k+"/com/java/ "
address3 = projectpath+"/testcontainer/com"
os.system("cp -r "+ address1+address3)
for stid in junitdic.keys():
address2 = projectpath+"/compileresult/"+stid+"/com/junit/ "
os.system("cp -r "+ address2+address3)
os.chdir(projectpath+"/testcontainer/")
os.system(junitPrefix + junitdic[stid])
os.system("rm -rf "+projectpath+"/testcontainer/com/junit")
print("\n\n\n"+k+"&&"+stid+"test already!\n\n\n")
os.system("rm -rf "+projectpath+"/testcontainer/com/*")
run()
'''
def run():
for k,v in junitdic.items():
os.chdir(projectpath+"/compileresult/"+k)
os.system(junitPrefix+v)
print(junitPrefix+v)
print(k+" is done!!!!!!!!!!!!\n\t")
print(junitdic)
run()
'''
'''
{'s1': ' com.junit.calculatetest com.junit.calculatetest2', 's2': ' com.junit.calculatetest', 's3': ' com.junit.calculatetest com.junit.calculatetest2 com.junit.calculatetest3'}
'''
'''
{'s1': ['com.trustie.test.calculatetest2', 'com.trustie.test.calculatetest'], 's2': ['com.trustie.test.calculatetest'], 's3': ['com.trustie.test.calculatetest', 'com.trustie.test.calculatetest2', 'com.trustie.test.calculatetest3']}
'''
|
[
"zhangguodong627@gmail.com"
] |
zhangguodong627@gmail.com
|
6b3683b6b1666261f589bd5180f2941e0203c566
|
81774da9e72d723fd8f80a44f9900852cf24ba03
|
/Expense.py
|
fc0dbd496ecb3bb5df5d7a70cfde7cc69914642d
|
[] |
no_license
|
Catalin-David/Expenses
|
010071d7c8e07d87de3530ef83ae420a2251aa98
|
368770edf6fd90766ef3b1a684cc61e2cc38dbd9
|
refs/heads/master
| 2020-09-04T09:19:51.561041
| 2019-11-05T09:01:28
| 2019-11-05T09:01:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,211
|
py
|
class Expense:
def __init__(self, day= 1, amount=0, tip=""):
'''
Function initializes a new Expense
params: day (default:1) - day of the expense (integer between 1-30)
amount (default:0) - amount that is paid (integer)
tip - type of expense/object that was purchased
'''
try:
self._day = int(day)
if self._day < 1 or self._day > 30:
raise ValueError("Day should be between 1 and 30")
except:
raise ValueError("Day should be an integer")
try:
self._amount = int(amount)
if self._amount < 0:
raise ValueError("Amount of expense should be positive")
except:
raise ValueError("Amount of expense should be an integer")
self._type = tip
@property
def Day(self):
'''
Property that returns the day of an expense
'''
return self._day
@Day.setter
def Day(self, value):
'''
Function is a setter for parameter day of an expense
'''
try:
value = int(value)
except:
raise ValueError("Day should be an integer")
if value < 1 or value > 30:
raise ValueError("Day should be between 1 and 30")
self._day = value
@property
def Amount(self):
'''
Property that returns the amount of an expense
'''
return self._amount
@Amount.setter
def Amount(self, value):
'''
Function is a setter for parameter amount of an expense
'''
try:
value = int(value)
except:
raise ValueError("Amount of expense should be an integer")
if value < 0:
raise ValueError("Amount of expense should be positive")
self._amount = value
@property
def Type(self):
'''
Property that returns the type of an expense
'''
return self._type
@Type.setter
def Type(self, value):
'''
Function is a setter for parameter type of an expense
'''
self._type = value
def __str__(self):
'''
Function creates a model for priting an object of type Expense
'''
return "(Day: " +str(self.Day) + ", Amount: " + str(self.Amount) + ", Type: " + self.Type + ")"
def __lt__(self, other):
'''
Function creates a model for comparing two objects of type Expense
'''
return self._day < other._day
def tests():
try:
expense = Expense("32", "100", "Food")
assert False
except ValueError:
assert True
try:
expense = Expense("10.5", "100", "Food")
assert False
except ValueError:
assert True
try:
expense = Expense("30", "100.5", "Food")
assert False
except ValueError:
assert True
try:
expense = Expense("1", "-100", "Food")
assert False
except ValueError:
assert True
expense1 = Expense("25", "100", "Food")
expense2 = Expense("18", "2500", "Gucci")
assert expense2 < expense1
tests()
|
[
"cata02dav@yahoo.com"
] |
cata02dav@yahoo.com
|
0495efaa9c6dc1693907ddc055b5150e11a8b084
|
d313e6ae0e59903338cb669fa5126a831b8a7e06
|
/venv/lib/python3.8/site-packages/aws_cdk/aws_codepipeline/_jsii/__init__.py
|
6547e161efccdee80ac426b94fb29add46f30e1a
|
[
"MIT-0"
] |
permissive
|
harun-vit/aws-cdk-pipelines-demo
|
9b0ec4550f3cc03bd364872433495cced980cc81
|
7e7faeee112c3dca718613fa8a1fba80d2116bac
|
refs/heads/main
| 2023-08-11T10:55:14.449259
| 2021-09-14T10:34:21
| 2021-09-14T10:34:21
| 406,144,409
| 0
| 0
|
MIT-0
| 2021-09-13T22:12:26
| 2021-09-13T22:12:25
| null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
import aws_cdk.aws_codestarnotifications._jsii
import aws_cdk.aws_events._jsii
import aws_cdk.aws_iam._jsii
import aws_cdk.aws_kms._jsii
import aws_cdk.aws_s3._jsii
import aws_cdk.core._jsii
import constructs._jsii
__jsii_assembly__ = jsii.JSIIAssembly.load(
"@aws-cdk/aws-codepipeline",
"1.118.0",
__name__[0:-6],
"aws-codepipeline@1.118.0.jsii.tgz",
)
__all__ = [
"__jsii_assembly__",
]
publication.publish()
|
[
"harun.celjo@visma.com"
] |
harun.celjo@visma.com
|
395d25132aa4014d7562592fe7071faaef12978b
|
39ca6855e3224c1637fae5b4a6406d76afab7745
|
/notifiers/providers/slack.py
|
2ae222793dbdfa6c427b89773479fdca89d3abc0
|
[
"MIT"
] |
permissive
|
Ro9ueAdmin/notifiers
|
3bb8c4c8942c2ba5eabc19832b62bd656a7f0b38
|
7060fab24c83d495298ea3b82adc91b9f6ece93a
|
refs/heads/master
| 2021-10-23T15:00:26.707375
| 2018-08-07T16:28:39
| 2018-08-07T16:28:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,221
|
py
|
from ..core import Provider, Response
from ..utils import requests
class Slack(Provider):
"""Send Slack webhook notifications"""
base_url = 'https://hooks.slack.com/services/'
site_url = 'https://api.slack.com/incoming-webhooks'
name = 'slack'
__fields = {
'type': 'array',
'title': 'Fields are displayed in a table on the message',
'minItems': 1,
'items': {
'type': 'object',
'properties': {
'title': {
'type': 'string',
'title': 'Required Field Title'
},
'value': {
'type': 'string',
'title': 'Text value of the field. May contain standard message markup and must'
' be escaped as normal. May be multi-line'
},
'short': {
'type': 'boolean',
'title': 'Optional flag indicating whether the `value` is short enough to be displayed'
' side-by-side with other values'
}
},
'required': ['title'],
'additionalProperties': False
}
}
__attachments = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'title': {
'type': 'string',
'title': 'Attachment title'
},
'author_name': {
'type': 'string',
'title': "Small text used to display the author's name"
},
'author_link': {
'type': 'string',
'title': 'A valid URL that will hyperlink the author_name text mentioned above. '
'Will only work if author_name is present'
},
'author_icon': {
'type': 'string',
'title': 'A valid URL that displays a small 16x16px image to the left of the author_name text. '
'Will only work if author_name is present'
},
'title_link': {
'type': 'string',
'title': 'Attachment title URL'
},
'image_url': {
'type': 'string',
'format': 'uri',
'title': 'Image URL'
},
'thumb_url': {
'type': 'string',
'format': 'uri',
'title': 'Thumbnail URL'
},
'footer': {
'type': 'string',
'title': 'Footer text'
},
'footer_icon': {
'type': 'string',
'format': 'uri',
'title': 'Footer icon URL'
},
'ts': {
'type': ['integer', 'string'],
'format': 'timestamp',
'title': 'Provided timestamp (epoch)'
},
'fallback': {
'type': 'string',
'title': "A plain-text summary of the attachment. This text will be used in clients that don't"
" show formatted text (eg. IRC, mobile notifications) and should not contain any markup."
},
'text': {
'type': 'string',
'title': 'Optional text that should appear within the attachment'
},
'pretext': {
'type': 'string',
'title': 'Optional text that should appear above the formatted data'
},
'color': {
'type': 'string',
'title': "Can either be one of 'good', 'warning', 'danger', or any hex color code"
},
'fields': __fields
},
'required': ['fallback'],
'additionalProperties': False
}
}
_required = {
'required':
[
'webhook_url',
'message'
]
}
_schema = {
'type': 'object',
'properties': {
'webhook_url': {
'type': 'string',
'format': 'uri',
'title': 'the webhook URL to use. Register one at https://my.slack.com/services/new/incoming-webhook/'
},
'icon_url': {
'type': 'string',
'format': 'uri',
'title': 'override bot icon with image URL'
},
'icon_emoji': {
'type': 'string',
'title': 'override bot icon with emoji name.'
},
'username': {
'type': 'string',
'title': 'override the displayed bot name'
},
'channel': {
'type': 'string',
'title': 'override default channel or private message'
},
'unfurl_links': {
'type': 'boolean',
'title': 'avoid automatic attachment creation from URLs'
},
'message': {
'type': 'string',
'title': 'This is the text that will be posted to the channel'
},
'attachments': __attachments
},
'additionalProperties': False
}
def _prepare_data(self, data: dict) -> dict:
text = data.pop('message')
data['text'] = text
if data.get('icon_emoji'):
icon_emoji = data['icon_emoji']
if not icon_emoji.startswith(':'):
icon_emoji = f':{icon_emoji}'
if not icon_emoji.endswith(':'):
icon_emoji += ':'
data['icon_emoji'] = icon_emoji
return data
def _send_notification(self, data: dict) -> Response:
url = data.pop('webhook_url')
response, errors = requests.post(url, json=data)
return self.create_response(data, response, errors)
|
[
"noreply@github.com"
] |
Ro9ueAdmin.noreply@github.com
|
bdb680ea9f7555e585150ee6db22432adc4fad97
|
953617ae07da4a4aac36cabe6bb1cd2af39d84ad
|
/Simple_Calculator/Release/Calculator.py
|
feee220bfc18d7d80c86a607abebbb8432c1c589
|
[] |
no_license
|
PreetiAP/Simple-Calculator-version2
|
c8091cb1be900bad8e59f533efe31e2f2bbeabe3
|
5953172cd19c9739fcbb7863fb79e1223f7728d9
|
refs/heads/master
| 2021-08-19T07:56:19.623716
| 2017-11-25T10:15:02
| 2017-11-25T10:15:02
| 111,994,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,157
|
py
|
#About: This uses the Calculator functions that are expoed from C++
import Calculator
def GetOperation():
#Valid operations are +, -, *, /
toRepeat = True
while(toRepeat):
#Get user input - operation, numbers
print "\nPress + for addition\nPress - for subtraction\nPress * for multiplication\nPress / for division\n\n"
inputOperation = raw_input("Please enter your choice: ");
if inputOperation in ['+','-','*','/']:
#Valid operation
toRepeat = False
return inputOperation
else:
print "Invalid input! Please enter valid operation"
toRepeat = True
def GetNumber(msg):
#Get float number
toRepeat = True
while(toRepeat):
try:
number = float(raw_input(msg))
return number
except ValueError, e:
print "Invalid input! Please enter number ..."
def Calculate():
#Get user input and perform opration
inputOperation = GetOperation()
number1 = GetNumber("Enter 1st number:")
number2 = GetNumber("Enter 2nd number:")
#Create calculator object and perform operation
obj = Calculator.MyCalculator(number1, number2)
print
if inputOperation == '+':
obj.Add()
elif inputOperation == '-':
obj.Subtract()
elif inputOperation == '*':
obj.Multiply()
elif inputOperation == '/':
obj.Divide()
def Execute():
toRepeat = True
Calculator.SayAboutApplication() #Print application details
while(toRepeat):
try:
Calculate()
print "Do you want to continue ?"
toContinue = raw_input("Press y/Y to continue ..\n")
if not toContinue in ['y','Y']:
toRepeat = False
except Exception, e:
print"Exception occured"
print "Exception: %s" %str(e)
print "\nThank you for using this application"
raw_input("Press any key to exit ...")
if __name__ == "__main__":
Execute()
|
[
"sandisk@8"
] |
sandisk@8
|
81f97fd89319182e8e2b55ef7ca9d2c430091a8d
|
e7fde1b443d8641456d76d501285afebe5b688f9
|
/app/db_record_example.py
|
1beb1d5e85d76e7d03068b38aaa348932d702d79
|
[] |
no_license
|
youaresourcecode/lp_webportal
|
f71745104b80a5a9ce8b82ff7c9a306856e5b476
|
6f5fdfdefa5235ac60ed5f6c333ec8ef4ecc35b4
|
refs/heads/master
| 2020-12-19T13:29:44.603635
| 2019-12-24T17:07:46
| 2019-12-24T17:07:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 855
|
py
|
import datetime
from config import MONGO_URI
from mongoengine import connect
from posts.models import Post
from flask_mongoengine import MongoEngine
db = MongoEngine()
'''
class Post(db.Document):
title = db.StringField(required=True)
tag = db.StringField(max_length=25)
text = db.StringField()
urls = db.ListField(db.StringField())
posted = db.DateTimeField(default=datetime.datetime.now)
def __repr__(self):
return f"Post {self.title}"
'''
connect(db='testdb',host=MONGO_URI)
for post in Post.objects():
print(post.text)
'''
Post(
title='Engine test',
tag='pytest',
text='Hello Everyone!',
urls= ['https://sun6-14.userapi.com/c855128/v855128189/10f4b2/J977kYyDEag.jpg',
'https://sun9-45.userapi.com/c855128/v855128189/10f478/yH9fX5eBFOA.jpg'],
).save()
'''
|
[
"1____1@bk.ru"
] |
1____1@bk.ru
|
cd63da0c193c3425a22c274b0ee7cba4c5909957
|
ce0c2b2a6132729862a9ccc7012939d020dab46e
|
/LinkedListDesign.py
|
4b0b03e7e553115c68739ff0218f157fe805ae9a
|
[] |
no_license
|
ConnorNusser/LeetCodePreparation
|
8138343c65384d50891bdb324045408b31dd4605
|
fa474358e852a4c470a4617fa4a4ba0689b90721
|
refs/heads/main
| 2023-06-03T03:48:44.390144
| 2021-06-21T00:46:03
| 2021-06-21T00:46:03
| 378,058,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,232
|
py
|
class Node:
def __init__(self, val=None, nex=None):
self.val = val
self.nex = nex
class MyLinkedList(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.head = None
def get(self, index):
"""
Get the value of the index-th node in the linked list. If the index is invalid, return -1.
:type index: int
:rtype: int
"""
ct = 0
var = self.head
while(ct < index and var.nex != None):
var = var.nex
ct = ct + 1
if(var.val == None):
return -1
return var.val
def addAtHead(self, val):
"""
Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.
:type val: int
:rtype: None
"""
if self.head == None:
self.head = Node(val)
else:
var1 = self.head
self.head = Node(val)
self.head.nex = var1
def addAtTail(self, val):
"""
Append a node of value val to the last element of the linked list.
:type val: int
:rtype: None
"""
last_element = self.head
while(last_element.nex != None):
last_element = last_element.nex
last_element.nex = Node(val)
def addAtIndex(self, index, val):
"""
Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.
:type index: int
:type val: int
:rtype: None
"""
return_link = self.head
head = self.head
ct = 0
while(ct < index - 1 and head.nex != None):
head = head.nex
ct = ct + 1
if head.nex != None:
end = head.nex
else:
return
head.nex = Node(val, end)
def deleteAtIndex(self, index):
"""
Delete the index-th node in the linked list, if the index is valid.
:type index: int
:rtype: None
"""
if index == 0:
self.Head = self.Head.next
return
curr = self.head
ct = 0
while(index - 1 > ct):
ct = ct + 1
curr = curr.nex
if curr.nex == None:
return
beforeCurr = curr
deleteCurr = beforeCurr.nex
curr = deleteCurr.nex
beforeCurr.nex = curr
# Your MyLinkedList object will be instantiated and called as such:
obj = MyLinkedList()
obj.addAtHead(1)
obj.addAtTail(3)
obj.addAtIndex(1,2)
print(obj.get(0))
print(obj.get(1))
print(obj.get(2))
print("--------")
obj.deleteAtIndex(1)
obj.get(1)
car = obj.head
while(car != None):
print(car.val)
car=car.nex
|
[
"connornusser@gmail.com"
] |
connornusser@gmail.com
|
8221bde2d2eb1e3bcedf585dfda5b07f7d4c9f00
|
ecb16f6b318b0ecd731a9e2dd117aab47069a878
|
/ikpdb_client.py
|
2bd7dfe6318067e527feb687d1110cc08d0bb466
|
[
"MIT"
] |
permissive
|
cmorisse/ikpxdb_tests
|
54513e03f0a2b7f883d8fb7872dfdd1649cbe247
|
b49d468e75aedfcc183f165e502e81ed275a4039
|
refs/heads/master
| 2020-03-17T13:09:41.516217
| 2018-08-18T07:33:45
| 2018-08-18T07:33:45
| 133,619,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,883
|
py
|
# coding: utf-8
import socket
import logging
import json
_logger = logging.getLogger(__file__)
_logger.addHandler(logging.StreamHandler())
_logger.setLevel(logging.INFO)
class IKPdbConnectionError(Exception):
pass
class IKPdbClientError(Exception):
pass
class IKPdbClient(object):
MAGIC_CODE = "LLADpcdtbdpac"
MESSAGE_TEMPLATE = "length=%%s%s%%s" % MAGIC_CODE
SOCKET_BUFFER_SIZE = 4096 # Maximum size of a packet received from client
def __init__(self, host, port=15470, debug=False):
""" Create a client and connect to IKPdbClient
"""
self._host = host
self._port = port
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((self._host, self._port))
self._received_data = ''
self._msg_id = 100
if not debug:
obj = self.receive()
assert obj['info_messages'][0] == u"Welcome to", ("'start'/'Welcome"
"to...' message not received")
def encode(self, obj):
json_obj = json.dumps(obj)
return self.MESSAGE_TEMPLATE % (len(json_obj), json_obj,)
def decode(self, message):
json_obj = message.split(self.MAGIC_CODE)[1]
obj = json.loads(json_obj)
return obj
def log_received(self, msg):
_logger.debug("Received %s bytes >>>%s<<<", len(msg), msg)
def log_sent(self, msg):
_logger.debug("Sent %s bytes >>>%s<<<", len(msg), msg)
def receive(self, timeout=None):
"""
"""
if timeout:
self._socket.settimeout(timeout)
else:
self._socket.settimeout(None)
skip_recv_switch = True if self._received_data else False
while True:
try:
if skip_recv_switch:
data = ''
skip_recv_switch = False
else:
data = self._socket.recv(self.SOCKET_BUFFER_SIZE)
except socket.timeout:
_logger.debug("socket.timeout waiting for ikpdb message.")
raise
except socket.error as socket_err:
return {'command': '_InternalQuit',
'args':{'socket_error_number': socket_err.errno,
'socket_error_str': socket_err.strerror}}
self._received_data += data
# Do we have received a MAGIC_CODE
try:
magic_code_idx = self._received_data.index(self.MAGIC_CODE)
except ValueError:
continue
# Do we have we received a length=
try:
length_idx = self._received_data.index('length=')
except ValueError:
continue
# extract length content from received data
json_length = int(self._received_data[length_idx + 7:magic_code_idx])
message_length = magic_code_idx + len(self.MAGIC_CODE) + json_length
if len(self._received_data) >= message_length:
full_message = self._received_data[:message_length]
self._received_data = self._received_data[message_length:]
break
else:
self.SOCKET_BUFFER_SIZE = message_length - len(self._received_data)
self.log_received(full_message)
obj = self.decode(full_message)
return obj
def send(self, command, **kwargs):
""" Build a message from parameters and send it to debugger.
:param command: The command sent to the debugger
:type command: str
:param _id: Unique id of the sent message. It is generated by the client.
but it can be forced.Right now, it's always `None`
for messages from debugger to client.
:type _id: int
"""
msg = self.encode({
'_id': self._msg_id,
'command': command,
'args': kwargs
})
self._msg_id += 1
if self._socket:
send_bytes_count = self._socket.sendall(msg)
self.log_sent(msg)
return self._msg_id - 1
raise IKPdbConnectionError("Connection lost!")
##### here are high level method #####
def run_script(self):
msg_id = self.send('runScript')
reply_msg = self.receive()
assert reply_msg['_id'] == msg_id, "Unexpected reply message to runScript command."
assert reply_msg['commandExecStatus'] == "ok", "IKPdb failed to start debugged program."
return reply_msg
def resume(self):
msg_id = self.send('resume')
reply_msg = self.receive()
assert reply_msg['_id'] == msg_id, "Unexpected reply message to resume command."
assert reply_msg['commandExecStatus'] == "ok", "IKPdb failed to resume debugged program."
assert reply_msg['result'].get('executionStatus') == 'running', "IKPdb failed to resume debugged program."
return reply_msg
def suspend(self):
msg_id = self.send('suspend')
reply_msg = self.receive()
assert reply_msg['_id'] == msg_id, "Unexpected reply message to 'suspend' command."
assert reply_msg['commandExecStatus'] == "ok", "IKPdb failed to resume debugged program."
assert reply_msg['result'].get('executionStatus') == 'running', "IKPdb failed to resume debugged program."
return reply_msg
def set_breakpoint(self, file_name, line_number, enabled=True, condition=None):
msg_id = self.send('setBreakpoint',
file_name=file_name,
line_number=line_number,
enabled=enabled,
condition=condition)
reply_msg = self.receive()
assert reply_msg['_id'] == msg_id, "Unexpected reply to setBreakpoint."
assert reply_msg['commandExecStatus'] == 'ok', "Failed to setBreakpoint."
return reply_msg
def get_breakpoints(self):
msg_id = self.send('getBreakpoints')
reply_msg = self.receive()
assert reply_msg['_id']==msg_id, "Unexpected reply to getBreakpoints."
assert reply_msg['commandExecStatus']=="ok", "getBreakpoints failed."
return reply_msg['result']
def change_breakpoint_state(self, breakpoint_number, enabled, condition):
msg_id = self.send('changeBreakpointState',
breakpoint_number=breakpoint_number,
enabled=enabled,
condition=condition)
reply_msg = self.receive()
if reply_msg['_id'] != msg_id:
raise IKPdbClientError("Unexpected reply message to 'changeBreakpointState'.")
if reply_msg['commandExecStatus'] != "ok":
raise IKPdbClientError("'changeBreakpointState' command failed.")
return reply_msg
def clear_breakpoint(self, breakpoint_number):
msg_id = self.send('clearBreakpoint',
breakpoint_number=breakpoint_number)
reply_msg = self.receive()
if reply_msg['_id'] != msg_id:
raise IKPdbClientError("Unexpected reply message to 'changeBreakpointState'.")
if reply_msg['commandExecStatus'] != "ok":
raise IKPdbClientError("'clearBreakpoint' command failed.")
return reply_msg
def evaluate(self, frame_id, expression, global_context=False, disableBreak=True):
kw_args = {
'frame': frame_id,
'expression': expression,
'global': global_context,
'disableBreak': disableBreak
}
msg_id = self.send('evaluate',
**kw_args)
reply_msg = self.receive()
if reply_msg['_id'] != msg_id:
raise IKPdbClientError("Unexpected reply message to 'evaluate'.")
if reply_msg['commandExecStatus'] != "ok":
raise IKPdbClientError("'evaluate' command failed.")
return reply_msg
def get_threads(self):
msg_id = self.send('getThreads')
reply_msg = self.receive()
assert reply_msg['_id'] == msg_id, "Unexpected reply to getThreads."
assert reply_msg['commandExecStatus'] == 'ok', "Failed to getThreads list."
return reply_msg
def set_debugged_thread(self, ident):
msg_id = self.send('setDebuggedThread',
ident=ident)
reply_msg = self.receive()
assert reply_msg['_id'] == msg_id, "Unexpected reply to setDebuggedThread."
#assert reply_msg['commandExecStatus'] == 'ok', "setDebuggedThread failed "\
# "with error: '%s'." % (reply_msg['error_messages'][0]
return reply_msg
|
[
"cmorisse@boxes3.net"
] |
cmorisse@boxes3.net
|
db9d65a1d6f1f296c401bc7aef8b40d8792e2d0c
|
2b9702bd13fc5338eada200de47e40bcb6f58366
|
/Contest_Problems/CCC_2021_J1.py
|
581c1c6aa4687e61a807bff135a813efadf8d1eb
|
[] |
no_license
|
alimulyukov/Course_Work_2021
|
028151347a68482f147545b1729d3dc6b71e06b4
|
fca92a1b02d7d987f7a8b4e161c70bb37c0ba849
|
refs/heads/main
| 2023-02-21T16:02:12.223839
| 2021-01-25T19:31:41
| 2021-01-25T19:31:41
| 311,425,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
g1 = input()
g2 = input()
g3 = input()
g4 = input()
g5 = input()
g6 = input()
w = 0
l = 0
if g1 == "w":
w = w + 1
print("1")
|
[
"mulukovali@Mulukovs-MacBook-Air.local"
] |
mulukovali@Mulukovs-MacBook-Air.local
|
052857dbf165f903d441bffd1f3c46eab4b347be
|
2550f5d78cc334863b979ef1265c712205a2fba1
|
/backend/app/app/crud/crud_user.py
|
edcebaaca4f5bdd8a7b85c91c07682917ea4eaaa
|
[] |
no_license
|
JohnJiangLA/ClassManager_backend_service
|
9dbd051bda143833a0e31f2b1fed4ffe07e0cc2e
|
1156e32e31c6068dd917f3c9dabb2807b42f1ad9
|
refs/heads/main
| 2023-07-14T11:30:56.984177
| 2021-08-26T06:44:56
| 2021-08-26T06:44:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,396
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date: 2021/7/13
# @Author: gray
"""
CRUD模块 - 用户相关 非复杂业务CRUD
"""
from sqlalchemy.engine.row import Row
from sqlalchemy.orm import Session
from app.crud.base import CRUDBase
from app.models import User
from app.schemas.user import UserCreate
class CRUDUser(CRUDBase[User, UserCreate, User]):
"""
用户相关,非复杂业务CRUD
模型类: User
数据表: user
"""
def is_openid_exists(self, db: Session, openid: str) -> User:
"""
判断 openid 对应的用户是否存在,存在则返回 id
"""
return (
db.query(self.model.id)
.filter(User.openid == openid)
.first()
)
def get_basic_info(self, db: Session, user_id: int) -> Row:
"""
获取用户基本信息
"""
return (
db.query(self.model.current_member_id, self.model.is_delete)
.filter(User.id == user_id)
.first()
)
def update_current_member(self, db: Session, user_id: int, member_id: int):
"""
更新用户当前所在班级
"""
res = (
db.query(self.model)
.filter(User.id == user_id)
.update({User.current_member_id: member_id})
)
db.commit()
return res
user = CRUDUser(User)
|
[
"l87894741@gmail.com"
] |
l87894741@gmail.com
|
c2a1c58a8ed69f96d2c2772938a0b7c64b5a1c7c
|
7a6c5e84c93f93cfc8462af1db02cc5c0a734177
|
/app.py
|
826d2a5baf56d570ab247dc414e138276eab86ce
|
[] |
no_license
|
kike4815/Flask-MySQL
|
64f8aa488c0510c446b6b0a018e9a879bb4aaca1
|
4bfe8f0a39a0259cca25c257b437e5e0bf85ea90
|
refs/heads/main
| 2023-06-09T01:16:23.494474
| 2021-06-29T16:14:35
| 2021-06-29T16:14:35
| 381,426,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,759
|
py
|
from flask import Flask, render_template, flash, redirect, url_for, session, logging, request
import flask
from data import Articles
from flask_mysqldb import MySQL
from wtforms import Form, StringField, TextAreaField, PasswordField, validators
from passlib.hash import sha256_crypt
from functools import wraps
app = Flask(__name__)
Articles = Articles()
# config mysql
app.config['MySQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'kike'
app.config['MYSQL_PASSWORD'] = '10128291e'
app.config['MYSQL_DB'] = 'myflaskapp'
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
# init mysql
mysql = MySQL(app)
@app.route('/')
def index():
return render_template('home.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/articles')
def articles():
return render_template('articles.html', articles=Articles)
@app.route('/article/<string:id>')
def article(id):
return render_template('article.html', id=id)
class RegisterForm(Form):
name = StringField('Name', [validators.length(min=1, max=50)])
username = StringField('userName', [validators.length(min=4, max=25)])
email = StringField('Email', [validators.length(min=6, max=50)])
password = StringField('Password', [validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords do not match')])
confirm = PasswordField('Confirm password')
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate():
name = form.name.data
email = form.email.data
username = form.username.data
password = sha256_crypt.encrypt(str(form.password.data))
curs = mysql.connection.cursor()
curs.execute(
" INSERT INTO users(name, email, username, password) VALUES (%s, %s, %s, %s)", (name, email, username, password))
mysql.connection.commit()
curs.close()
flash('You are now registered and can log in', 'success')
redirect(url_for('login'))
return render_template('register.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
username = request.form['username']
password_candidate = request.form['password']
cur = mysql.connection.cursor()
result = cur.execute(
"SELECT * FROM users WHERE username= %s", [username])
if result > 0:
data = cur.fetchone()
password = data['password']
if sha256_crypt.verify(password_candidate, password):
session['logged_in'] = True
session['username'] = username
flash('You are now logged in', 'success')
return redirect(url_for('dashboard'))
else:
error = 'Invalid login'
return render_template('login.html', error=error)
cur.close()
else:
error = 'User not found'
return render_template('login.html', error=error)
return render_template('login.html')
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'is_logged_in' in session:
return f(*args, **kwargs)
else:
flash('Unauthorized, Please Login', 'danger')
return redirect(url_for('login'))
return wrap
@app.route('/logout')
def logout():
session.clear()
flash('You are now logged out', 'success')
return redirect(url_for('login'))
@app.route('/dashboard')
@is_logged_in
def dashboard():
return render_template('dashboard.html')
if __name__ == '__main__':
app.secret_key = 'secret'
app.run(debug=True)
|
[
"e.pedros.sanchez@gmail.com"
] |
e.pedros.sanchez@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.